You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by ma...@apache.org on 2015/07/02 23:01:04 UTC
[01/47] phoenix git commit: PHOENIX-1660 Implement missing math
built-in functions ABS, POWER, LN, LOG, SQRT, CBRT, EXP (Shuxiong Ye)
Repository: phoenix
Updated Branches:
refs/heads/calcite b58600738 -> 1327c726a
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c2927dde/phoenix-core/src/test/java/org/apache/phoenix/expression/ExpFunctionTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/ExpFunctionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/ExpFunctionTest.java
new file mode 100644
index 0000000..b7b95c2
--- /dev/null
+++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/ExpFunctionTest.java
@@ -0,0 +1,150 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.expression;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.math.BigDecimal;
+import java.sql.SQLException;
+import java.util.List;
+import java.util.Random;
+
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.phoenix.expression.function.ExpFunction;
+import org.apache.phoenix.schema.SortOrder;
+import org.apache.phoenix.schema.types.PDecimal;
+import org.apache.phoenix.schema.types.PDouble;
+import org.apache.phoenix.schema.types.PFloat;
+import org.apache.phoenix.schema.types.PInteger;
+import org.apache.phoenix.schema.types.PLong;
+import org.apache.phoenix.schema.types.PNumericType;
+import org.apache.phoenix.schema.types.PSmallint;
+import org.apache.phoenix.schema.types.PTinyint;
+import org.apache.phoenix.schema.types.PUnsignedDouble;
+import org.apache.phoenix.schema.types.PUnsignedFloat;
+import org.apache.phoenix.schema.types.PUnsignedInt;
+import org.apache.phoenix.schema.types.PUnsignedLong;
+import org.junit.Test;
+
+import com.google.common.collect.Lists;
+
+/**
+ * Unit tests for {@link ExpFunction}
+ */
+public class ExpFunctionTest {
+ private static final double ZERO = 1e-9;
+
+ private static boolean twoDoubleEquals(double a, double b) {
+ if (Double.isNaN(a) ^ Double.isNaN(b)) return false;
+ if (Double.isNaN(a)) return true;
+ if (Double.isInfinite(a) ^ Double.isInfinite(b)) return false;
+ if (Double.isInfinite(a)) {
+ if ((a > 0) ^ (b > 0)) return false;
+ else return true;
+ }
+ if (Math.abs(a - b) <= ZERO) {
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ private static boolean testExpression(LiteralExpression literal, double expected)
+ throws SQLException {
+ List<Expression> expressions = Lists.newArrayList((Expression) literal);
+ Expression sqrtFunction = new ExpFunction(expressions);
+ ImmutableBytesWritable ptr = new ImmutableBytesWritable();
+ boolean ret = sqrtFunction.evaluate(null, ptr);
+ if (ret) {
+ Double result =
+ (Double) sqrtFunction.getDataType().toObject(ptr, sqrtFunction.getSortOrder());
+ assertTrue(twoDoubleEquals(result.doubleValue(), expected));
+ }
+ return ret;
+ }
+
+ private static void test(Number value, PNumericType dataType, double expected)
+ throws SQLException {
+ LiteralExpression literal;
+ literal = LiteralExpression.newConstant(value, dataType, SortOrder.ASC);
+ boolean ret1 = testExpression(literal, expected);
+ literal = LiteralExpression.newConstant(value, dataType, SortOrder.DESC);
+ boolean ret2 = testExpression(literal, expected);
+ assertEquals(ret1, ret2);
+ }
+
+ private static void testBatch(Number[] value, PNumericType dataType) throws SQLException {
+ double[] expected = new double[value.length];
+ for (int i = 0; i < expected.length; ++i) {
+ expected[i] = Math.exp(value[i].doubleValue());
+ }
+ assertEquals(value.length, expected.length);
+ for (int i = 0; i < value.length; ++i) {
+ test(value[i], dataType, expected[i]);
+ }
+ }
+
+ @Test
+ public void testSqrtFunction() throws Exception {
+ Random random = new Random();
+
+ testBatch(
+ new BigDecimal[] { BigDecimal.valueOf(1.0), BigDecimal.valueOf(0.0),
+ BigDecimal.valueOf(-1.0), BigDecimal.valueOf(123.1234),
+ BigDecimal.valueOf(-123.1234), BigDecimal.valueOf(random.nextDouble()),
+ BigDecimal.valueOf(random.nextDouble()) }, PDecimal.INSTANCE);
+
+ testBatch(new Float[] { 1.0f, 0.0f, -1.0f, 123.1234f, -123.1234f, random.nextFloat(),
+ random.nextFloat() }, PFloat.INSTANCE);
+
+ testBatch(new Float[] { 1.0f, 0.0f, -1.0f, 123.1234f, -123.1234f, random.nextFloat(),
+ random.nextFloat() }, PFloat.INSTANCE);
+
+ testBatch(new Float[] { 1.0f, 0.0f, 123.1234f, }, PUnsignedFloat.INSTANCE);
+
+ testBatch(
+ new Double[] { 1.0, 0.0, -1.0, 123.1234, -123.1234, random.nextDouble(),
+ random.nextDouble() }, PDouble.INSTANCE);
+
+ testBatch(new Double[] { 1.0, 0.0, 123.1234, }, PUnsignedDouble.INSTANCE);
+
+ testBatch(
+ new Long[] { 1L, 0L, -1L, Long.MAX_VALUE, Long.MIN_VALUE, 123L, -123L,
+ random.nextLong(), random.nextLong() }, PLong.INSTANCE);
+
+ testBatch(new Long[] { 1L, 0L, Long.MAX_VALUE, 123L }, PUnsignedLong.INSTANCE);
+
+ testBatch(
+ new Integer[] { 1, 0, -1, Integer.MAX_VALUE, Integer.MIN_VALUE, 123, -123,
+ random.nextInt(), random.nextInt() }, PInteger.INSTANCE);
+
+ testBatch(new Integer[] { 1, 0, Integer.MAX_VALUE, 123 }, PUnsignedInt.INSTANCE);
+
+ testBatch(new Short[] { (short) 1, (short) 0, (short) -1, Short.MAX_VALUE, Short.MIN_VALUE,
+ (short) 123, (short) -123 }, PSmallint.INSTANCE);
+
+ testBatch(new Short[] { (short) 1, (short) 0, Short.MAX_VALUE, (short) 123 },
+ PSmallint.INSTANCE);
+
+ testBatch(new Byte[] { (byte) 1, (byte) 0, (byte) -1, Byte.MAX_VALUE, Byte.MIN_VALUE,
+ (byte) 123, (byte) -123 }, PTinyint.INSTANCE);
+
+ testBatch(new Byte[] { (byte) 1, (byte) 0, Byte.MAX_VALUE, (byte) 123 }, PTinyint.INSTANCE);
+ }
+}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c2927dde/phoenix-core/src/test/java/org/apache/phoenix/expression/LnLogFunctionTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/LnLogFunctionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/LnLogFunctionTest.java
new file mode 100644
index 0000000..a5e6fc7
--- /dev/null
+++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/LnLogFunctionTest.java
@@ -0,0 +1,182 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.expression;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.math.BigDecimal;
+import java.sql.SQLException;
+import java.util.List;
+import java.util.Random;
+
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.phoenix.expression.function.LnFunction;
+import org.apache.phoenix.expression.function.LogFunction;
+import org.apache.phoenix.schema.SortOrder;
+import org.apache.phoenix.schema.types.PDecimal;
+import org.apache.phoenix.schema.types.PDouble;
+import org.apache.phoenix.schema.types.PFloat;
+import org.apache.phoenix.schema.types.PInteger;
+import org.apache.phoenix.schema.types.PLong;
+import org.apache.phoenix.schema.types.PNumericType;
+import org.apache.phoenix.schema.types.PSmallint;
+import org.apache.phoenix.schema.types.PTinyint;
+import org.apache.phoenix.schema.types.PUnsignedDouble;
+import org.apache.phoenix.schema.types.PUnsignedFloat;
+import org.apache.phoenix.schema.types.PUnsignedInt;
+import org.apache.phoenix.schema.types.PUnsignedLong;
+import org.junit.Test;
+
+import com.google.common.collect.Lists;
+
+/**
+ * Unit tests for {@link LnFunction} and {@link LogFunction}
+ */
+public class LnLogFunctionTest {
+ private static final double ZERO = 1e-9;
+ private static final Expression THREE = LiteralExpression.newConstant(3);
+ private static final Expression DEFAULT_VALUE = LiteralExpression.newConstant(10.0);
+
+ private static boolean twoDoubleEquals(double a, double b) {
+ if (Double.isNaN(a) ^ Double.isNaN(b)) return false;
+ if (Double.isNaN(a)) return true;
+ if (Double.isInfinite(a) ^ Double.isInfinite(b)) return false;
+ if (Double.isInfinite(a)) {
+ if ((a > 0) ^ (b > 0)) return false;
+ else return true;
+ }
+ if (Math.abs(a - b) <= ZERO) {
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ private static boolean testExpression(LiteralExpression literal, LiteralExpression literal2,
+ LiteralExpression literal3, double exptForLn, double exptForLog10, double exptForLog3)
+ throws SQLException {
+ List<Expression> expressionsLn = Lists.newArrayList((Expression) literal);
+ List<Expression> expressionsLog10 = Lists.newArrayList(literal2, DEFAULT_VALUE);
+ List<Expression> expressionsLog3 = Lists.newArrayList(literal3, THREE);
+
+ ImmutableBytesWritable ptr = new ImmutableBytesWritable();
+
+ Expression lnFunction = new LnFunction(expressionsLn);
+ boolean retLn = lnFunction.evaluate(null, ptr);
+ if (retLn) {
+ Double result =
+ (Double) lnFunction.getDataType().toObject(ptr, lnFunction.getSortOrder());
+ assertTrue(twoDoubleEquals(result.doubleValue(), exptForLn));
+ }
+
+ Expression log10Function = new LogFunction(expressionsLog10);
+ boolean retLog10 = log10Function.evaluate(null, ptr);
+ if (retLog10) {
+ Double result =
+ (Double) log10Function.getDataType()
+ .toObject(ptr, log10Function.getSortOrder());
+ assertTrue(twoDoubleEquals(result.doubleValue(), exptForLog10));
+ }
+ assertEquals(retLn, retLog10);
+
+ Expression log3Function = new LogFunction(expressionsLog3);
+ boolean retLog3 = log3Function.evaluate(null, ptr);
+ if (retLog3) {
+ Double result =
+ (Double) log3Function.getDataType().toObject(ptr, log3Function.getSortOrder());
+ assertTrue(twoDoubleEquals(result.doubleValue(), exptForLog3));
+ }
+ assertEquals(retLn, retLog3);
+ return retLn;
+ }
+
+ private static void test(Number value, PNumericType dataType, double exptForLn,
+ double exptForLog10, double exptForLog3) throws SQLException {
+ LiteralExpression literal, literal2, literal3;
+ literal = LiteralExpression.newConstant(value, dataType, SortOrder.ASC);
+ literal2 = LiteralExpression.newConstant(value, dataType, SortOrder.ASC);
+ literal3 = LiteralExpression.newConstant(value, dataType, SortOrder.ASC);
+ boolean ret1 =
+ testExpression(literal, literal2, literal3, exptForLn, exptForLog10, exptForLog3);
+ literal = LiteralExpression.newConstant(value, dataType, SortOrder.DESC);
+ literal2 = LiteralExpression.newConstant(value, dataType, SortOrder.DESC);
+ literal3 = LiteralExpression.newConstant(value, dataType, SortOrder.DESC);
+ boolean ret2 =
+ testExpression(literal, literal2, literal3, exptForLn, exptForLog10, exptForLog3);
+ assertEquals(ret1, ret2);
+ }
+
+ private static void testBatch(Number[] value, PNumericType dataType) throws SQLException {
+ double[][] expected = new double[value.length][3];
+ for (int i = 0; i < expected.length; ++i) {
+ expected[i][0] = Math.log(value[i].doubleValue());
+ expected[i][1] = Math.log10(value[i].doubleValue());
+ expected[i][2] = Math.log10(value[i].doubleValue()) / Math.log10(3);
+ }
+ assertEquals(value.length, expected.length);
+ for (int i = 0; i < value.length; ++i) {
+ test(value[i], dataType, expected[i][0], expected[i][1], expected[i][2]);
+ }
+ }
+
+ @Test
+ public void testLnLogFunction() throws Exception {
+ Random random = new Random();
+
+ testBatch(
+ new BigDecimal[] { BigDecimal.valueOf(1.0), BigDecimal.valueOf(0.0),
+ BigDecimal.valueOf(-1.0), BigDecimal.valueOf(123.1234),
+ BigDecimal.valueOf(-123.1234), BigDecimal.valueOf(random.nextDouble()),
+ BigDecimal.valueOf(random.nextDouble()) }, PDecimal.INSTANCE);
+
+ testBatch(new Float[] { 1.0f, 0.0f, -1.0f, 123.1234f, -123.1234f, random.nextFloat(),
+ random.nextFloat() }, PFloat.INSTANCE);
+
+ testBatch(new Float[] { 1.0f, 0.0f, 123.1234f, }, PUnsignedFloat.INSTANCE);
+
+ testBatch(
+ new Double[] { 1.0, 0.0, -1.0, 123.1234, -123.1234, random.nextDouble(),
+ random.nextDouble() }, PDouble.INSTANCE);
+
+ testBatch(new Double[] { 1.0, 0.0, 123.1234, }, PUnsignedDouble.INSTANCE);
+
+ testBatch(
+ new Long[] { 1L, 0L, -1L, Long.MAX_VALUE, Long.MIN_VALUE, 123L, -123L,
+ random.nextLong(), random.nextLong() }, PLong.INSTANCE);
+
+ testBatch(new Long[] { 1L, 0L, Long.MAX_VALUE, 123L }, PUnsignedLong.INSTANCE);
+
+ testBatch(
+ new Integer[] { 1, 0, -1, Integer.MAX_VALUE, Integer.MIN_VALUE, 123, -123,
+ random.nextInt(), random.nextInt() }, PInteger.INSTANCE);
+
+ testBatch(new Integer[] { 1, 0, Integer.MAX_VALUE, 123 }, PUnsignedInt.INSTANCE);
+
+ testBatch(new Short[] { (short) 1, (short) 0, (short) -1, Short.MAX_VALUE, Short.MIN_VALUE,
+ (short) 123, (short) -123 }, PSmallint.INSTANCE);
+
+ testBatch(new Short[] { (short) 1, (short) 0, Short.MAX_VALUE, (short) 123 },
+ PSmallint.INSTANCE);
+
+ testBatch(new Byte[] { (byte) 1, (byte) 0, (byte) -1, Byte.MAX_VALUE, Byte.MIN_VALUE,
+ (byte) 123, (byte) -123 }, PTinyint.INSTANCE);
+
+ testBatch(new Byte[] { (byte) 1, (byte) 0, Byte.MAX_VALUE, (byte) 123 }, PTinyint.INSTANCE);
+ }
+}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c2927dde/phoenix-core/src/test/java/org/apache/phoenix/expression/PowerFunctionTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/PowerFunctionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/PowerFunctionTest.java
new file mode 100644
index 0000000..9710e52
--- /dev/null
+++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/PowerFunctionTest.java
@@ -0,0 +1,182 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.expression;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.math.BigDecimal;
+import java.sql.SQLException;
+import java.util.List;
+import java.util.Random;
+
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.phoenix.expression.function.PowerFunction;
+import org.apache.phoenix.schema.SortOrder;
+import org.apache.phoenix.schema.types.PDecimal;
+import org.apache.phoenix.schema.types.PDouble;
+import org.apache.phoenix.schema.types.PFloat;
+import org.apache.phoenix.schema.types.PInteger;
+import org.apache.phoenix.schema.types.PLong;
+import org.apache.phoenix.schema.types.PNumericType;
+import org.apache.phoenix.schema.types.PSmallint;
+import org.apache.phoenix.schema.types.PTinyint;
+import org.apache.phoenix.schema.types.PUnsignedDouble;
+import org.apache.phoenix.schema.types.PUnsignedFloat;
+import org.apache.phoenix.schema.types.PUnsignedInt;
+import org.apache.phoenix.schema.types.PUnsignedLong;
+import org.junit.Test;
+
+import com.google.common.collect.Lists;
+
+/**
+ * Unit tests for {@link PowerFunction}
+ */
+public class PowerFunctionTest {
+ private static final double ZERO = 1e-9;
+ private static final Expression ONE_POINT_FIVE = LiteralExpression.newConstant(1.5);
+ private static final Expression TWO = LiteralExpression.newConstant(2);
+ private static final Expression THREE = LiteralExpression.newConstant(3);
+
+ private static boolean twoDoubleEquals(double a, double b) {
+ if (Double.isNaN(a) ^ Double.isNaN(b)) return false;
+ if (Double.isNaN(a)) return true;
+ if (Double.isInfinite(a) ^ Double.isInfinite(b)) return false;
+ if (Double.isInfinite(a)) {
+ if ((a > 0) ^ (b > 0)) return false;
+ else return true;
+ }
+ if (Math.abs(a - b) <= ZERO) {
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ private static boolean testExpression(LiteralExpression literal, LiteralExpression literal2,
+ LiteralExpression literal3, double exptFor15, double exptFor2, double exptFor3)
+ throws SQLException {
+ List<Expression> expressions15 = Lists.newArrayList(literal, ONE_POINT_FIVE);
+ List<Expression> expressions2 = Lists.newArrayList(literal2, TWO);
+ List<Expression> expressions3 = Lists.newArrayList(literal3, THREE);
+
+ ImmutableBytesWritable ptr = new ImmutableBytesWritable();
+
+ Expression powerFunction15 = new PowerFunction(expressions15);
+ boolean ret15 = powerFunction15.evaluate(null, ptr);
+ if (ret15) {
+ Double result =
+ (Double) powerFunction15.getDataType().toObject(ptr,
+ powerFunction15.getSortOrder());
+ assertTrue(twoDoubleEquals(result.doubleValue(), exptFor15));
+ }
+
+ Expression powerFunction2 = new PowerFunction(expressions2);
+ boolean ret2 = powerFunction2.evaluate(null, ptr);
+ if (ret2) {
+ Double result =
+ (Double) powerFunction2.getDataType().toObject(ptr,
+ powerFunction2.getSortOrder());
+ assertTrue(twoDoubleEquals(result.doubleValue(), exptFor2));
+ }
+ assertEquals(ret15, ret2);
+
+ Expression powerFunction3 = new PowerFunction(expressions3);
+ boolean ret3 = powerFunction3.evaluate(null, ptr);
+ if (ret3) {
+ Double result =
+ (Double) powerFunction3.getDataType().toObject(ptr,
+ powerFunction3.getSortOrder());
+ assertTrue(twoDoubleEquals(result.doubleValue(), exptFor3));
+ }
+ assertEquals(ret15, ret3);
+ return ret15;
+ }
+
+ private static void test(Number value, PNumericType dataType, double exptFor15,
+ double exptFor2, double exptFor3) throws SQLException {
+ LiteralExpression literal, literal2, literal3;
+ literal = LiteralExpression.newConstant(value, dataType, SortOrder.ASC);
+ literal2 = LiteralExpression.newConstant(value, dataType, SortOrder.ASC);
+ literal3 = LiteralExpression.newConstant(value, dataType, SortOrder.ASC);
+ boolean ret1 = testExpression(literal, literal2, literal3, exptFor15, exptFor2, exptFor3);
+ literal = LiteralExpression.newConstant(value, dataType, SortOrder.DESC);
+ literal2 = LiteralExpression.newConstant(value, dataType, SortOrder.DESC);
+ literal3 = LiteralExpression.newConstant(value, dataType, SortOrder.DESC);
+ boolean ret2 = testExpression(literal, literal2, literal3, exptFor15, exptFor2, exptFor3);
+ assertEquals(ret1, ret2);
+ }
+
+ private static void testBatch(Number[] value, PNumericType dataType) throws SQLException {
+ double[][] expected = new double[value.length][3];
+ for (int i = 0; i < expected.length; ++i) {
+ expected[i][0] = Math.pow(value[i].doubleValue(), 1.5);
+ expected[i][1] = Math.pow(value[i].doubleValue(), 2);
+ expected[i][2] = Math.pow(value[i].doubleValue(), 3);
+ }
+ assertEquals(value.length, expected.length);
+ for (int i = 0; i < value.length; ++i) {
+ test(value[i], dataType, expected[i][0], expected[i][1], expected[i][2]);
+ }
+ }
+
+ @Test
+ public void testLnLogFunction() throws Exception {
+ Random random = new Random();
+
+ testBatch(
+ new BigDecimal[] { BigDecimal.valueOf(1.0), BigDecimal.valueOf(0.0),
+ BigDecimal.valueOf(-1.0), BigDecimal.valueOf(123.1234),
+ BigDecimal.valueOf(-123.1234), BigDecimal.valueOf(random.nextDouble()),
+ BigDecimal.valueOf(random.nextDouble()) }, PDecimal.INSTANCE);
+
+ testBatch(new Float[] { 1.0f, 0.0f, -1.0f, 123.1234f, -123.1234f, random.nextFloat(),
+ random.nextFloat() }, PFloat.INSTANCE);
+
+ testBatch(new Float[] { 1.0f, 0.0f, 123.1234f, }, PUnsignedFloat.INSTANCE);
+
+ testBatch(
+ new Double[] { 1.0, 0.0, -1.0, 123.1234, -123.1234, random.nextDouble(),
+ random.nextDouble() }, PDouble.INSTANCE);
+
+ testBatch(new Double[] { 1.0, 0.0, 123.1234, }, PUnsignedDouble.INSTANCE);
+
+ testBatch(
+ new Long[] { 1L, 0L, -1L, Long.MAX_VALUE, Long.MIN_VALUE, 123L, -123L,
+ random.nextLong(), random.nextLong() }, PLong.INSTANCE);
+
+ testBatch(new Long[] { 1L, 0L, Long.MAX_VALUE, 123L }, PUnsignedLong.INSTANCE);
+
+ testBatch(
+ new Integer[] { 1, 0, -1, Integer.MAX_VALUE, Integer.MIN_VALUE, 123, -123,
+ random.nextInt(), random.nextInt() }, PInteger.INSTANCE);
+
+ testBatch(new Integer[] { 1, 0, Integer.MAX_VALUE, 123 }, PUnsignedInt.INSTANCE);
+
+ testBatch(new Short[] { (short) 1, (short) 0, (short) -1, Short.MAX_VALUE, Short.MIN_VALUE,
+ (short) 123, (short) -123 }, PSmallint.INSTANCE);
+
+ testBatch(new Short[] { (short) 1, (short) 0, Short.MAX_VALUE, (short) 123 },
+ PSmallint.INSTANCE);
+
+ testBatch(new Byte[] { (byte) 1, (byte) 0, (byte) -1, Byte.MAX_VALUE, Byte.MIN_VALUE,
+ (byte) 123, (byte) -123 }, PTinyint.INSTANCE);
+
+ testBatch(new Byte[] { (byte) 1, (byte) 0, Byte.MAX_VALUE, (byte) 123 }, PTinyint.INSTANCE);
+ }
+}
[07/47] phoenix git commit: PHOENIX-1981 : PhoenixHBase Load and
Store Funcs should handle all Pig data types
Posted by ma...@apache.org.
PHOENIX-1981 : PhoenixHBase Load and Store Funcs should handle all Pig data types
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/8a0dee77
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/8a0dee77
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/8a0dee77
Branch: refs/heads/calcite
Commit: 8a0dee77c67761c57feae31350c84304ccc44c07
Parents: 8076126
Author: Prashant Kommireddi <pk...@pkommireddi-ltm.internal.salesforce.com>
Authored: Mon May 18 19:47:01 2015 -0700
Committer: Eli Levine <el...@apache.org>
Committed: Mon Jun 15 18:17:45 2015 -0700
----------------------------------------------------------------------
.../org/apache/phoenix/pig/util/TypeUtil.java | 24 ++++++++++++++------
.../apache/phoenix/pig/util/TypeUtilTest.java | 20 ++++++++++++++++
2 files changed, 37 insertions(+), 7 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/8a0dee77/phoenix-pig/src/main/java/org/apache/phoenix/pig/util/TypeUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-pig/src/main/java/org/apache/phoenix/pig/util/TypeUtil.java b/phoenix-pig/src/main/java/org/apache/phoenix/pig/util/TypeUtil.java
index 6549445..c8bc9d8 100644
--- a/phoenix-pig/src/main/java/org/apache/phoenix/pig/util/TypeUtil.java
+++ b/phoenix-pig/src/main/java/org/apache/phoenix/pig/util/TypeUtil.java
@@ -1,11 +1,21 @@
/*
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE
- * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
- * License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by
- * applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language
- * governing permissions and limitations under the License.
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ *distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you maynot use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicablelaw or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*/
package org.apache.phoenix.pig.util;
http://git-wip-us.apache.org/repos/asf/phoenix/blob/8a0dee77/phoenix-pig/src/test/java/org/apache/phoenix/pig/util/TypeUtilTest.java
----------------------------------------------------------------------
diff --git a/phoenix-pig/src/test/java/org/apache/phoenix/pig/util/TypeUtilTest.java b/phoenix-pig/src/test/java/org/apache/phoenix/pig/util/TypeUtilTest.java
index 25d9f48..56167f6 100644
--- a/phoenix-pig/src/test/java/org/apache/phoenix/pig/util/TypeUtilTest.java
+++ b/phoenix-pig/src/test/java/org/apache/phoenix/pig/util/TypeUtilTest.java
@@ -1,3 +1,23 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ *distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you maynot use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicablelaw or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
package org.apache.phoenix.pig.util;
import static org.junit.Assert.assertEquals;
[03/47] phoenix git commit: PHOENIX-2040 Mark spark/scala
dependencies as 'provided' (Josh Mahonin)
Posted by ma...@apache.org.
PHOENIX-2040 Mark spark/scala dependencies as 'provided' (Josh Mahonin)
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/b61ef77e
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/b61ef77e
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/b61ef77e
Branch: refs/heads/calcite
Commit: b61ef77e509a024ccaf6c3ce74c385c31c5f534a
Parents: c2927dd
Author: Nick Dimiduk <nd...@apache.org>
Authored: Mon Jun 15 16:16:03 2015 -0700
Committer: Nick Dimiduk <nd...@apache.org>
Committed: Mon Jun 15 16:16:03 2015 -0700
----------------------------------------------------------------------
phoenix-assembly/pom.xml | 4 ++++
phoenix-spark/pom.xml | 51 ++++++++++++++++++++++++-------------------
2 files changed, 32 insertions(+), 23 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/b61ef77e/phoenix-assembly/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-assembly/pom.xml b/phoenix-assembly/pom.xml
index baf6738..51ff74d 100644
--- a/phoenix-assembly/pom.xml
+++ b/phoenix-assembly/pom.xml
@@ -152,6 +152,10 @@
</dependency>
<dependency>
<groupId>org.apache.phoenix</groupId>
+ <artifactId>phoenix-spark</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.phoenix</groupId>
<artifactId>phoenix-server</artifactId>
</dependency>
<dependency>
http://git-wip-us.apache.org/repos/asf/phoenix/blob/b61ef77e/phoenix-spark/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-spark/pom.xml b/phoenix-spark/pom.xml
index 7086bb6..289801a 100644
--- a/phoenix-spark/pom.xml
+++ b/phoenix-spark/pom.xml
@@ -45,12 +45,7 @@
<groupId>org.apache.phoenix</groupId>
<artifactId>phoenix-core</artifactId>
</dependency>
- <dependency>
- <groupId>org.apache.phoenix</groupId>
- <artifactId>phoenix-core</artifactId>
- <classifier>tests</classifier>
- <scope>test</scope>
- </dependency>
+
<!-- Force import of Spark's servlet API for unit tests -->
<dependency>
<groupId>javax.servlet</groupId>
@@ -59,16 +54,38 @@
<scope>test</scope>
</dependency>
+ <!-- Mark Spark / Scala as provided -->
<dependency>
- <groupId>junit</groupId>
- <artifactId>junit</artifactId>
+ <groupId>org.scala-lang</groupId>
+ <artifactId>scala-library</artifactId>
+ <version>${scala.version}</version>
+ <scope>provided</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.spark</groupId>
+ <artifactId>spark-core_${scala.binary.version}</artifactId>
+ <version>${spark.version}</version>
+ <scope>provided</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.spark</groupId>
+ <artifactId>spark-sql_${scala.binary.version}</artifactId>
+ <version>${spark.version}</version>
+ <scope>provided</scope>
+ </dependency>
+
+ <!-- Test dependencies -->
+ <dependency>
+ <groupId>org.apache.phoenix</groupId>
+ <artifactId>phoenix-core</artifactId>
+ <classifier>tests</classifier>
<scope>test</scope>
</dependency>
<dependency>
- <groupId>org.scala-lang</groupId>
- <artifactId>scala-library</artifactId>
- <version>${scala.version}</version>
+ <groupId>junit</groupId>
+ <artifactId>junit</artifactId>
+ <scope>test</scope>
</dependency>
<dependency>
@@ -86,18 +103,6 @@
</dependency>
<dependency>
- <groupId>org.apache.spark</groupId>
- <artifactId>spark-core_${scala.binary.version}</artifactId>
- <version>${spark.version}</version>
- </dependency>
-
- <dependency>
- <groupId>org.apache.spark</groupId>
- <artifactId>spark-sql_${scala.binary.version}</artifactId>
- <version>${spark.version}</version>
- </dependency>
-
- <dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>${hadoop-two.version}</version>
[21/47] phoenix git commit: PHOENIX-2021 - Implement ARRAY_CAT built
in function (Dumindu Buddhika)
Posted by ma...@apache.org.
PHOENIX-2021 - Implement ARRAY_CAT built in function (Dumindu Buddhika)
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/7385899d
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/7385899d
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/7385899d
Branch: refs/heads/calcite
Commit: 7385899d966e38cfc798fd509445db24653ad7de
Parents: 7175dcb
Author: ramkrishna <ra...@gmail.com>
Authored: Sun Jun 21 22:05:13 2015 +0530
Committer: ramkrishna <ra...@gmail.com>
Committed: Sun Jun 21 22:14:16 2015 +0530
----------------------------------------------------------------------
.../phoenix/end2end/ArrayAppendFunctionIT.java | 17 -
.../phoenix/end2end/ArrayConcatFunctionIT.java | 578 ++++++++++++++++++
.../phoenix/expression/ExpressionType.java | 4 +-
.../function/ArrayAppendFunction.java | 53 +-
.../function/ArrayConcatFunction.java | 83 +++
.../function/ArrayModifierFunction.java | 155 ++++-
.../function/ArrayPrependFunction.java | 54 +-
.../phoenix/schema/types/PArrayDataType.java | 163 +++++-
.../expression/ArrayConcatFunctionTest.java | 584 +++++++++++++++++++
9 files changed, 1543 insertions(+), 148 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7385899d/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArrayAppendFunctionIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArrayAppendFunctionIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArrayAppendFunctionIT.java
index 1957b3a..cf45724 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArrayAppendFunctionIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArrayAppendFunctionIT.java
@@ -497,23 +497,6 @@ public class ArrayAppendFunctionIT extends BaseHBaseManagedTimeIT {
}
@Test
- public void testArrayAppendFunctionIntegerWithNull() throws Exception {
- Connection conn = DriverManager.getConnection(getUrl());
- initTables(conn);
-
- ResultSet rs;
- rs = conn.createStatement().executeQuery("SELECT ARRAY_APPEND(NULL,NULL) FROM regions WHERE region_name = 'SF Bay Area'");
- assertTrue(rs.next());
-
- Integer[] integers = new Integer[]{2345, 46345, 23234, 456};
-
- Array array = conn.createArrayOf("INTEGER", integers);
-
- assertEquals(null, rs.getArray(1));
- assertFalse(rs.next());
- }
-
- @Test
public void testArrayAppendFunctionVarcharWithNull() throws Exception {
Connection conn = DriverManager.getConnection(getUrl());
initTables(conn);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7385899d/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArrayConcatFunctionIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArrayConcatFunctionIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArrayConcatFunctionIT.java
new file mode 100644
index 0000000..247bfb7
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArrayConcatFunctionIT.java
@@ -0,0 +1,578 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import org.apache.phoenix.schema.TypeMismatchException;
+import org.junit.Test;
+
+import java.sql.*;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+public class ArrayConcatFunctionIT extends BaseHBaseManagedTimeIT {
+
+ private void initTables(Connection conn) throws Exception {
+ String ddl = "CREATE TABLE regions (region_name VARCHAR PRIMARY KEY,varchars VARCHAR[],integers INTEGER[],doubles DOUBLE[],bigints BIGINT[],chars CHAR(15)[],double1 DOUBLE,char1 CHAR(17),nullcheck INTEGER,chars2 CHAR(15)[])";
+ conn.createStatement().execute(ddl);
+ String dml = "UPSERT INTO regions(region_name,varchars,integers,doubles,bigints,chars,double1,char1,nullcheck,chars2) VALUES('SF Bay Area'," +
+ "ARRAY['2345','46345','23234']," +
+ "ARRAY[2345,46345,23234,456]," +
+ "ARRAY[23.45,46.345,23.234,45.6,5.78]," +
+ "ARRAY[12,34,56,78,910]," +
+ "ARRAY['a','bbbb','c','ddd','e']," +
+ "23.45," +
+ "'wert'," +
+ "NULL," +
+ "ARRAY['a','bbbb','c','ddd','e','foo']" +
+ ")";
+ PreparedStatement stmt = conn.prepareStatement(dml);
+ stmt.execute();
+ conn.commit();
+ }
+
+ @Test
+ public void testArrayConcatFunctionVarchar() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ initTables(conn);
+
+ ResultSet rs;
+ rs = conn.createStatement().executeQuery("SELECT ARRAY_CAT(varchars,varchars) FROM regions WHERE region_name = 'SF Bay Area'");
+ assertTrue(rs.next());
+
+ String[] strings = new String[]{"2345", "46345", "23234", "2345", "46345", "23234"};
+
+ Array array = conn.createArrayOf("VARCHAR", strings);
+
+ assertEquals(array, rs.getArray(1));
+ assertFalse(rs.next());
+ }
+
+ @Test
+ public void testArrayConcatFunctionInteger() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ initTables(conn);
+
+ ResultSet rs;
+ rs = conn.createStatement().executeQuery("SELECT ARRAY_CAT(integers,integers) FROM regions WHERE region_name = 'SF Bay Area'");
+ assertTrue(rs.next());
+
+ Integer[] integers = new Integer[]{2345, 46345, 23234, 456, 2345, 46345, 23234, 456};
+
+ Array array = conn.createArrayOf("INTEGER", integers);
+
+ assertEquals(array, rs.getArray(1));
+ assertFalse(rs.next());
+ }
+
+ @Test
+ public void testArrayConcatFunctionDouble() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ initTables(conn);
+
+ ResultSet rs;
+ rs = conn.createStatement().executeQuery("SELECT ARRAY_CAT(doubles,doubles) FROM regions WHERE region_name = 'SF Bay Area'");
+ assertTrue(rs.next());
+
+ Double[] doubles = new Double[]{23.45, 46.345, 23.234, 45.6, 5.78, 23.45, 46.345, 23.234, 45.6, 5.78};
+
+ Array array = conn.createArrayOf("DOUBLE", doubles);
+
+ assertEquals(array, rs.getArray(1));
+ assertFalse(rs.next());
+ }
+
+ @Test
+ public void testArrayConcatFunctionDouble2() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ initTables(conn);
+
+ ResultSet rs;
+ rs = conn.createStatement().executeQuery("SELECT ARRAY_CAT(doubles,ARRAY[23]) FROM regions WHERE region_name = 'SF Bay Area'");
+ assertTrue(rs.next());
+
+ Double[] doubles = new Double[]{23.45, 46.345, 23.234, 45.6, 5.78, new Double(23)};
+
+ Array array = conn.createArrayOf("DOUBLE", doubles);
+
+ assertEquals(array, rs.getArray(1));
+ assertFalse(rs.next());
+ }
+
+ @Test
+ public void testArrayConcatFunctionBigint() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ initTables(conn);
+ ResultSet rs;
+ rs = conn.createStatement().executeQuery("SELECT ARRAY_CAT(bigints,bigints) FROM regions WHERE region_name = 'SF Bay Area'");
+ assertTrue(rs.next());
+
+ Long[] longs = new Long[]{12l, 34l, 56l, 78l, 910l, 12l, 34l, 56l, 78l, 910l};
+
+ Array array = conn.createArrayOf("BIGINT", longs);
+
+ assertEquals(array, rs.getArray(1));
+ assertFalse(rs.next());
+ }
+
+ @Test
+ public void testArrayConcatFunctionChar() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ initTables(conn);
+ ResultSet rs;
+ rs = conn.createStatement().executeQuery("SELECT ARRAY_CAT(chars,chars) FROM regions WHERE region_name = 'SF Bay Area'");
+ assertTrue(rs.next());
+
+ String[] strings = new String[]{"a", "bbbb", "c", "ddd", "e", "a", "bbbb", "c", "ddd", "e"};
+
+ Array array = conn.createArrayOf("CHAR", strings);
+
+ assertEquals(array, rs.getArray(1));
+ assertFalse(rs.next());
+ }
+
+ @Test
+ public void testArrayConcatFunctionChar3() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ initTables(conn);
+ ResultSet rs;
+ rs = conn.createStatement().executeQuery("SELECT ARRAY_CAT(chars,chars2) FROM regions WHERE region_name = 'SF Bay Area'");
+ assertTrue(rs.next());
+
+ String[] strings = new String[]{"a", "bbbb", "c", "ddd", "e", "a", "bbbb", "c", "ddd", "e", "foo"};
+
+ Array array = conn.createArrayOf("CHAR", strings);
+
+ assertEquals(array, rs.getArray(1));
+ assertFalse(rs.next());
+ }
+
+ @Test(expected = TypeMismatchException.class)
+ public void testArrayConcatFunctionIntToCharArray() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ initTables(conn);
+
+ ResultSet rs;
+ rs = conn.createStatement().executeQuery("SELECT ARRAY_CAT(varchars,ARRAY[23,45]) FROM regions WHERE region_name = 'SF Bay Area'");
+ }
+
+ @Test(expected = TypeMismatchException.class)
+ public void testArrayConcatFunctionVarcharToIntegerArray() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ initTables(conn);
+
+ ResultSet rs;
+ rs = conn.createStatement().executeQuery("SELECT ARRAY_CAT(integers,ARRAY['a', 'b']) FROM regions WHERE region_name = 'SF Bay Area'");
+
+ }
+
+ @Test(expected = SQLException.class)
+ public void testArrayConcatFunctionChar2() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ initTables(conn);
+ ResultSet rs;
+ rs = conn.createStatement().executeQuery("SELECT ARRAY_CAT(chars,ARRAY['facfacfacfacfacfacfac','facfacfacfacfacfacfac']) FROM regions WHERE region_name = 'SF Bay Area'");
+ rs.next();
+ rs.getArray(1);
+ }
+
+ @Test
+ public void testArrayConcatFunctionIntegerArrayToDoubleArray() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ initTables(conn);
+
+ ResultSet rs;
+ rs = conn.createStatement().executeQuery("SELECT ARRAY_CAT(doubles,ARRAY[45, 55]) FROM regions WHERE region_name = 'SF Bay Area'");
+ assertTrue(rs.next());
+
+ Double[] doubles = new Double[]{23.45, 46.345, 23.234, 45.6, 5.78, 45.0, 55.0};
+
+ Array array = conn.createArrayOf("DOUBLE", doubles);
+
+ assertEquals(array, rs.getArray(1));
+ assertFalse(rs.next());
+ }
+
+ @Test
+ public void testArrayConcatFunctionWithNestedFunctions1() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ initTables(conn);
+
+ ResultSet rs;
+ rs = conn.createStatement().executeQuery("SELECT ARRAY_CAT(ARRAY[23,45],ARRAY[integers[1],integers[1]]) FROM regions WHERE region_name = 'SF Bay Area'");
+ assertTrue(rs.next());
+
+ Integer[] integers = new Integer[]{23, 45, 2345, 2345};
+
+ Array array = conn.createArrayOf("INTEGER", integers);
+
+ assertEquals(array, rs.getArray(1));
+ assertFalse(rs.next());
+ }
+
+ @Test
+ public void testArrayConcatFunctionWithNestedFunctions2() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ initTables(conn);
+
+ ResultSet rs;
+ rs = conn.createStatement().executeQuery("SELECT ARRAY_CAT(integers,ARRAY[ARRAY_ELEM(ARRAY[2,4],1),ARRAY_ELEM(ARRAY[2,4],2)]) FROM regions WHERE region_name = 'SF Bay Area'");
+ assertTrue(rs.next());
+
+ Integer[] integers = new Integer[]{2345, 46345, 23234, 456, 2, 4};
+
+ Array array = conn.createArrayOf("INTEGER", integers);
+
+ assertEquals(array, rs.getArray(1));
+ assertFalse(rs.next());
+ }
+
+ @Test
+ public void testArrayConcatFunctionWithNestedFunctions3() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ initTables(conn);
+
+ ResultSet rs;
+ rs = conn.createStatement().executeQuery("SELECT ARRAY_CAT(doubles,ARRAY[ARRAY_ELEM(doubles, 1), ARRAY_ELEM(doubles, 1)]) FROM regions WHERE region_name = 'SF Bay Area'");
+ assertTrue(rs.next());
+
+ Double[] doubles = new Double[]{23.45, 46.345, 23.234, 45.6, 5.78, 23.45, 23.45};
+
+ Array array = conn.createArrayOf("DOUBLE", doubles);
+
+ assertEquals(array, rs.getArray(1));
+ assertFalse(rs.next());
+ }
+
+ @Test
+ public void testArrayConcatFunctionWithUpsert1() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+
+ String ddl = "CREATE TABLE regions (region_name VARCHAR PRIMARY KEY,varchars VARCHAR[])";
+ conn.createStatement().execute(ddl);
+
+ String dml = "UPSERT INTO regions(region_name,varchars) VALUES('SF Bay Area',ARRAY_CAT(ARRAY['hello','world'],ARRAY[':-)']))";
+ conn.createStatement().execute(dml);
+ conn.commit();
+
+ ResultSet rs;
+ rs = conn.createStatement().executeQuery("SELECT varchars FROM regions WHERE region_name = 'SF Bay Area'");
+ assertTrue(rs.next());
+
+ String[] strings = new String[]{"hello", "world", ":-)"};
+
+ Array array = conn.createArrayOf("VARCHAR", strings);
+
+ assertEquals(array, rs.getArray(1));
+ assertFalse(rs.next());
+ }
+
+ @Test
+ public void testArrayConcatFunctionWithUpsert2() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+
+ String ddl = "CREATE TABLE regions (region_name VARCHAR PRIMARY KEY,integers INTEGER[])";
+ conn.createStatement().execute(ddl);
+
+ String dml = "UPSERT INTO regions(region_name,integers) VALUES('SF Bay Area',ARRAY_CAT(ARRAY[4,5],ARRAY[6, 7]))";
+ conn.createStatement().execute(dml);
+ conn.commit();
+
+ ResultSet rs;
+ rs = conn.createStatement().executeQuery("SELECT integers FROM regions WHERE region_name = 'SF Bay Area'");
+ assertTrue(rs.next());
+
+ Integer[] integers = new Integer[]{4, 5, 6, 7};
+
+ Array array = conn.createArrayOf("INTEGER", integers);
+
+ assertEquals(array, rs.getArray(1));
+ assertFalse(rs.next());
+ }
+
+ @Test
+ public void testArrayConcatFunctionWithUpsert3() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+
+ String ddl = "CREATE TABLE regions (region_name VARCHAR PRIMARY KEY,doubles DOUBLE[])";
+ conn.createStatement().execute(ddl);
+
+ String dml = "UPSERT INTO regions(region_name,doubles) VALUES('SF Bay Area',ARRAY_CAT(ARRAY[5.67,7.87],ARRAY[9.0, 8.0]))";
+ conn.createStatement().execute(dml);
+ conn.commit();
+
+ ResultSet rs;
+ rs = conn.createStatement().executeQuery("SELECT doubles FROM regions WHERE region_name = 'SF Bay Area'");
+ assertTrue(rs.next());
+
+ Double[] doubles = new Double[]{5.67, 7.87, new Double(9), new Double(8)};
+
+ Array array = conn.createArrayOf("DOUBLE", doubles);
+
+ assertEquals(array, rs.getArray(1));
+ assertFalse(rs.next());
+ }
+
+ @Test
+ public void testArrayConcatFunctionWithUpsertSelect1() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+
+ String ddl = "CREATE TABLE source (region_name VARCHAR PRIMARY KEY,doubles DOUBLE[])";
+ conn.createStatement().execute(ddl);
+
+ ddl = "CREATE TABLE target (region_name VARCHAR PRIMARY KEY,doubles DOUBLE[])";
+ conn.createStatement().execute(ddl);
+
+ String dml = "UPSERT INTO source(region_name,doubles) VALUES('SF Bay Area',ARRAY_CAT(ARRAY[5.67,7.87],ARRAY[9.0, 4.0]))";
+ conn.createStatement().execute(dml);
+
+ dml = "UPSERT INTO source(region_name,doubles) VALUES('SF Bay Area2',ARRAY_CAT(ARRAY[56.7,7.87],ARRAY[9.2, 3.4]))";
+ conn.createStatement().execute(dml);
+ conn.commit();
+
+ dml = "UPSERT INTO target(region_name, doubles) SELECT region_name, ARRAY_CAT(doubles,doubles) FROM source";
+ conn.createStatement().execute(dml);
+ conn.commit();
+
+ ResultSet rs;
+ rs = conn.createStatement().executeQuery("SELECT doubles FROM target");
+ assertTrue(rs.next());
+
+ Double[] doubles = new Double[]{5.67, 7.87, new Double(9), new Double(4), 5.67, 7.87, new Double(9), new Double(4)};
+ Array array = conn.createArrayOf("DOUBLE", doubles);
+
+ assertEquals(array, rs.getArray(1));
+ assertTrue(rs.next());
+
+ doubles = new Double[]{56.7, 7.87, new Double(9.2), new Double(3.4), 56.7, 7.87, new Double(9.2), new Double(3.4)};
+ array = conn.createArrayOf("DOUBLE", doubles);
+
+ assertEquals(array, rs.getArray(1));
+ assertFalse(rs.next());
+ }
+
+ @Test
+ public void testArrayConcatFunctionWithUpsertSelect2() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+
+ String ddl = "CREATE TABLE source (region_name VARCHAR PRIMARY KEY,varchars VARCHAR[])";
+ conn.createStatement().execute(ddl);
+
+ ddl = "CREATE TABLE target (region_name VARCHAR PRIMARY KEY,varchars VARCHAR[])";
+ conn.createStatement().execute(ddl);
+
+ String dml = "UPSERT INTO source(region_name,varchars) VALUES('SF Bay Area',ARRAY_CAT(ARRAY['abcd','b'],ARRAY['c', 'd']))";
+ conn.createStatement().execute(dml);
+
+ dml = "UPSERT INTO source(region_name,varchars) VALUES('SF Bay Area2',ARRAY_CAT(ARRAY['d','fgh'],ARRAY['something','something']))";
+ conn.createStatement().execute(dml);
+ conn.commit();
+
+ dml = "UPSERT INTO target(region_name, varchars) SELECT region_name, ARRAY_CAT(varchars,varchars) FROM source";
+ conn.createStatement().execute(dml);
+ conn.commit();
+
+ ResultSet rs;
+ rs = conn.createStatement().executeQuery("SELECT varchars FROM target");
+ assertTrue(rs.next());
+
+ String[] strings = new String[]{"abcd", "b", "c", "d", "abcd", "b", "c", "d"};
+ Array array = conn.createArrayOf("VARCHAR", strings);
+
+ assertEquals(array, rs.getArray(1));
+ assertTrue(rs.next());
+
+ strings = new String[]{"d", "fgh", "something", "something", "d", "fgh", "something", "something"};
+ array = conn.createArrayOf("VARCHAR", strings);
+
+ assertEquals(array, rs.getArray(1));
+ assertFalse(rs.next());
+ }
+
+ @Test
+ public void testArrayConcatFunctionInWhere1() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ initTables(conn);
+
+ ResultSet rs;
+ rs = conn.createStatement().executeQuery("SELECT region_name FROM regions WHERE ARRAY[2345,46345,23234,456,123]=ARRAY_CAT(integers,ARRAY[123])");
+ assertTrue(rs.next());
+
+ assertEquals("SF Bay Area", rs.getString(1));
+ assertFalse(rs.next());
+ }
+
+ @Test
+ public void testArrayConcatFunctionInWhere2() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ initTables(conn);
+
+ ResultSet rs;
+ rs = conn.createStatement().executeQuery("SELECT region_name FROM regions WHERE varchars[1]=ANY(ARRAY_CAT(ARRAY['2345','46345','23234'],ARRAY['1234']))");
+ assertTrue(rs.next());
+
+ assertEquals("SF Bay Area", rs.getString(1));
+ assertFalse(rs.next());
+ }
+
+ @Test
+ public void testArrayConcatFunctionInWhere3() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ initTables(conn);
+
+ ResultSet rs;
+ rs = conn.createStatement().executeQuery("SELECT region_name FROM regions WHERE ARRAY['2345','46345','23234','1234','234']=ARRAY_CAT(ARRAY['2345','46345','23234'],ARRAY['1234', '234'])");
+ assertTrue(rs.next());
+
+ assertEquals("SF Bay Area", rs.getString(1));
+ assertFalse(rs.next());
+ }
+
+ @Test
+ public void testArrayConcatFunctionInWhere4() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ initTables(conn);
+
+ ResultSet rs;
+ rs = conn.createStatement().executeQuery("SELECT region_name FROM regions WHERE ARRAY[23.45,4634.5,2.3234,123.4,12.0]=ARRAY_CAT(ARRAY[23.45,4634.5,2.3234],ARRAY[123.4,12.0])");
+ assertTrue(rs.next());
+
+ assertEquals("SF Bay Area", rs.getString(1));
+ assertFalse(rs.next());
+ }
+
+ @Test
+ public void testArrayConcatFunctionInWhere5() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ initTables(conn);
+
+ ResultSet rs;
+ rs = conn.createStatement().executeQuery("SELECT region_name FROM regions WHERE ARRAY['2345','46345','23234','foo','foo']=ARRAY_CAT(varchars,ARRAY['foo','foo'])");
+ assertTrue(rs.next());
+
+ assertEquals("SF Bay Area", rs.getString(1));
+ assertFalse(rs.next());
+ }
+
+ @Test
+ public void testArrayConcatFunctionInWhere6() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ initTables(conn);
+
+ ResultSet rs;
+ rs = conn.createStatement().executeQuery("SELECT region_name FROM regions WHERE chars2=ARRAY_CAT(chars,ARRAY['foo'])");
+ assertTrue(rs.next());
+
+ assertEquals("SF Bay Area", rs.getString(1));
+ assertFalse(rs.next());
+ }
+
+ @Test
+ public void testArrayConcatFunctionInWhere7() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ initTables(conn);
+
+ ResultSet rs;
+ rs = conn.createStatement().executeQuery("SELECT region_name FROM regions WHERE ARRAY[2,3,4,5]=ARRAY_CAT(ARRAY[2,3],ARRAY[4,5])");
+ assertTrue(rs.next());
+
+ assertEquals("SF Bay Area", rs.getString(1));
+ assertFalse(rs.next());
+ }
+
+ @Test
+ public void testArrayConcatFunctionWithNulls1() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ initTables(conn);
+
+ ResultSet rs;
+ PreparedStatement st = conn.prepareStatement("SELECT ARRAY_CAT(?,?) FROM regions WHERE region_name = 'SF Bay Area'");
+ Array array1 = conn.createArrayOf("VARCHAR", new Object[]{"a", "b", "c", null});
+ st.setArray(1, array1);
+ Array array2 = conn.createArrayOf("VARCHAR", new Object[]{"a", "b", "c"});
+ st.setArray(2, array2);
+ rs = st.executeQuery();
+ assertTrue(rs.next());
+
+ Array expected = conn.createArrayOf("VARCHAR", new Object[]{"a", "b", "c", null, "a", "b", "c"});
+
+ assertEquals(expected, rs.getArray(1));
+ assertFalse(rs.next());
+ }
+
+ @Test
+ public void testArrayConcatFunctionWithNulls2() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ initTables(conn);
+
+ ResultSet rs;
+ PreparedStatement st = conn.prepareStatement("SELECT ARRAY_CAT(?,?) FROM regions WHERE region_name = 'SF Bay Area'");
+ Array array1 = conn.createArrayOf("VARCHAR", new Object[]{"a", "b", "c"});
+ st.setArray(1, array1);
+ Array array2 = conn.createArrayOf("VARCHAR", new Object[]{null, "a", "b", "c"});
+ st.setArray(2, array2);
+ rs = st.executeQuery();
+ assertTrue(rs.next());
+
+ Array expected = conn.createArrayOf("VARCHAR", new Object[]{"a", "b", "c", null, "a", "b", "c"});
+
+ assertEquals(expected, rs.getArray(1));
+ assertFalse(rs.next());
+ }
+
+ @Test
+ public void testArrayConcatFunctionWithNulls3() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ initTables(conn);
+
+ ResultSet rs;
+ PreparedStatement st = conn.prepareStatement("SELECT ARRAY_CAT(?,?) FROM regions WHERE region_name = 'SF Bay Area'");
+ Array array1 = conn.createArrayOf("VARCHAR", new Object[]{"a", "b", "c", null});
+ st.setArray(1, array1);
+ Array array2 = conn.createArrayOf("VARCHAR", new Object[]{null, "a", "b", "c"});
+ st.setArray(2, array2);
+ rs = st.executeQuery();
+ assertTrue(rs.next());
+
+ Array expected = conn.createArrayOf("VARCHAR", new Object[]{"a", "b", "c", null, null, "a", "b", "c"});
+
+ assertEquals(expected, rs.getArray(1));
+ assertFalse(rs.next());
+ }
+
+ @Test
+ public void testArrayConcatFunctionWithNulls4() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ initTables(conn);
+
+ ResultSet rs;
+ PreparedStatement st = conn.prepareStatement("SELECT ARRAY_CAT(?,?) FROM regions WHERE region_name = 'SF Bay Area'");
+ Array array1 = conn.createArrayOf("VARCHAR", new Object[]{null, "a", null, "b", "c", null, null});
+ st.setArray(1, array1);
+ Array array2 = conn.createArrayOf("VARCHAR", new Object[]{null, null, "a", null, "b", null, "c", null});
+ st.setArray(2, array2);
+ rs = st.executeQuery();
+ assertTrue(rs.next());
+
+ Array expected = conn.createArrayOf("VARCHAR", new Object[]{null, "a", null, "b", "c", null, null, null, null, "a", null, "b", null, "c", null});
+
+ assertEquals(expected, rs.getArray(1));
+ assertFalse(rs.next());
+ }
+}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7385899d/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
index 4f98cb8..51f4089 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
@@ -23,6 +23,7 @@ import org.apache.phoenix.expression.function.AbsFunction;
import org.apache.phoenix.expression.function.ArrayAllComparisonExpression;
import org.apache.phoenix.expression.function.ArrayAnyComparisonExpression;
import org.apache.phoenix.expression.function.ArrayAppendFunction;
+import org.apache.phoenix.expression.function.ArrayConcatFunction;
import org.apache.phoenix.expression.function.ArrayElemRefExpression;
import org.apache.phoenix.expression.function.ArrayIndexFunction;
import org.apache.phoenix.expression.function.ArrayLengthFunction;
@@ -245,7 +246,8 @@ public enum ExpressionType {
LnFunction(LnFunction.class),
LogFunction(LogFunction.class),
ExpFunction(ExpFunction.class),
- PowerFunction(PowerFunction.class)
+ PowerFunction(PowerFunction.class),
+ ArrayConcatFunction(ArrayConcatFunction.class)
;
ExpressionType(Class<? extends Expression> clazz) {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7385899d/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ArrayAppendFunction.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ArrayAppendFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ArrayAppendFunction.java
index bf6c29f..8c7fa9f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ArrayAppendFunction.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ArrayAppendFunction.java
@@ -20,18 +20,13 @@ package org.apache.phoenix.expression.function;
import java.util.List;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.phoenix.exception.DataExceedsCapacityException;
import org.apache.phoenix.expression.Expression;
-import org.apache.phoenix.expression.LiteralExpression;
import org.apache.phoenix.parse.FunctionParseNode;
-import org.apache.phoenix.schema.SortOrder;
import org.apache.phoenix.schema.TypeMismatchException;
import org.apache.phoenix.schema.types.*;
-import org.apache.phoenix.schema.tuple.Tuple;
@FunctionParseNode.BuiltInFunction(name = ArrayAppendFunction.NAME, args = {
- @FunctionParseNode.Argument(allowedTypes = {PBinaryArray.class,
- PVarbinaryArray.class}),
+ @FunctionParseNode.Argument(allowedTypes = {PBinaryArray.class, PVarbinaryArray.class}),
@FunctionParseNode.Argument(allowedTypes = {PVarbinary.class}, defaultValue = "null")})
public class ArrayAppendFunction extends ArrayModifierFunction {
@@ -45,54 +40,12 @@ public class ArrayAppendFunction extends ArrayModifierFunction {
}
@Override
- public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) {
-
- if (!getArrayExpr().evaluate(tuple, ptr)) {
- return false;
- } else if (ptr.getLength() == 0) {
- return true;
- }
- int arrayLength = PArrayDataType.getArrayLength(ptr, getBaseType(), getArrayExpr().getMaxLength());
-
- int length = ptr.getLength();
- int offset = ptr.getOffset();
- byte[] arrayBytes = ptr.get();
-
- if (!getElementExpr().evaluate(tuple, ptr) || ptr.getLength() == 0) {
- ptr.set(arrayBytes, offset, length);
- return true;
- }
-
- checkSizeCompatibility(ptr);
- coerceBytes(ptr);
- return PArrayDataType.appendItemToArray(ptr, length, offset, arrayBytes, getBaseType(), arrayLength, getMaxLength(), getArrayExpr().getSortOrder());
- }
-
- @Override
- public PDataType getDataType() {
- return children.get(0).getDataType();
- }
-
- @Override
- public Integer getMaxLength() {
- return this.children.get(0).getMaxLength();
- }
-
- @Override
- public SortOrder getSortOrder() {
- return getChildren().get(0).getSortOrder();
+ protected boolean modifierFunction(ImmutableBytesWritable ptr, int len, int offset, byte[] arrayBytes, PDataType baseDataType, int arrayLength, Integer maxLength, Expression arrayExp) {
+ return PArrayDataType.appendItemToArray(ptr, len, offset, arrayBytes, baseDataType, arrayLength, getMaxLength(), arrayExp.getSortOrder());
}
@Override
public String getName() {
return NAME;
}
-
- public Expression getArrayExpr() {
- return getChildren().get(0);
- }
-
- public Expression getElementExpr() {
- return getChildren().get(1);
- }
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7385899d/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ArrayConcatFunction.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ArrayConcatFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ArrayConcatFunction.java
new file mode 100644
index 0000000..d2b846a
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ArrayConcatFunction.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.expression.function;
+
+import java.util.List;
+
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.parse.FunctionParseNode;
+import org.apache.phoenix.schema.TypeMismatchException;
+import org.apache.phoenix.schema.tuple.Tuple;
+import org.apache.phoenix.schema.types.PArrayDataType;
+import org.apache.phoenix.schema.types.PBinaryArray;
+import org.apache.phoenix.schema.types.PDataType;
+import org.apache.phoenix.schema.types.PVarbinaryArray;
+
+@FunctionParseNode.BuiltInFunction(name = ArrayConcatFunction.NAME, args = {
+ @FunctionParseNode.Argument(allowedTypes = {PBinaryArray.class, PVarbinaryArray.class}),
+ @FunctionParseNode.Argument(allowedTypes = {PBinaryArray.class, PVarbinaryArray.class})})
+public class ArrayConcatFunction extends ArrayModifierFunction {
+
+ public static final String NAME = "ARRAY_CAT";
+
+ public ArrayConcatFunction() {
+ }
+
+ public ArrayConcatFunction(List<Expression> children) throws TypeMismatchException {
+ super(children);
+ }
+
+
+ @Override
+ public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) {
+
+ if (!getLHSExpr().evaluate(tuple, ptr)|| ptr.getLength() == 0){
+ return false;
+ }
+
+ int actualLengthOfArray1 = Math.abs(PArrayDataType.getArrayLength(ptr, getLHSBaseType(), getLHSExpr().getMaxLength()));
+ int lengthArray1 = ptr.getLength();
+ int offsetArray1 = ptr.getOffset();
+ byte[] array1Bytes = ptr.get();
+ if (!getRHSExpr().evaluate(tuple, ptr)|| ptr.getLength() == 0){
+ ptr.set(array1Bytes, offsetArray1, lengthArray1);
+ return true;
+ }
+
+ checkSizeCompatibility(ptr, getLHSExpr(), getLHSExpr().getDataType(), getRHSExpr(),getRHSExpr().getDataType());
+
+ // Coerce array2 to array1 type
+ coerceBytes(ptr, getLHSExpr(), getLHSExpr().getDataType(), getRHSExpr(),getRHSExpr().getDataType());
+ return modifierFunction(ptr, lengthArray1, offsetArray1, array1Bytes, getLHSBaseType(), actualLengthOfArray1, getMaxLength(), getLHSExpr());
+ }
+
+ @Override
+ protected boolean modifierFunction(ImmutableBytesWritable ptr, int len, int offset,
+ byte[] array1Bytes, PDataType baseDataType, int actualLengthOfArray1, Integer maxLength,
+ Expression array1Exp) {
+ int actualLengthOfArray2 = Math.abs(PArrayDataType.getArrayLength(ptr, baseDataType, array1Exp.getMaxLength()));
+ return PArrayDataType.concatArrays(ptr, len, offset, array1Bytes, baseDataType, actualLengthOfArray1, actualLengthOfArray2);
+ }
+
+ @Override
+ public String getName() {
+ return NAME;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7385899d/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ArrayModifierFunction.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ArrayModifierFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ArrayModifierFunction.java
index afd10e5..3177c29 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ArrayModifierFunction.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ArrayModifierFunction.java
@@ -24,7 +24,9 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.phoenix.exception.DataExceedsCapacityException;
import org.apache.phoenix.expression.Expression;
import org.apache.phoenix.expression.LiteralExpression;
+import org.apache.phoenix.schema.SortOrder;
import org.apache.phoenix.schema.TypeMismatchException;
+import org.apache.phoenix.schema.tuple.Tuple;
import org.apache.phoenix.schema.types.*;
public abstract class ArrayModifierFunction extends ScalarFunction {
@@ -34,42 +36,153 @@ public abstract class ArrayModifierFunction extends ScalarFunction {
public ArrayModifierFunction(List<Expression> children) throws TypeMismatchException {
super(children);
+ Expression arrayExpr = null;
+ PDataType baseDataType = null;
+ Expression otherExpr = null;
+ PDataType otherExpressionType = null;
+ if (getLHSExpr().getDataType().isArrayType()) {
+ arrayExpr = getLHSExpr();
+ baseDataType = getLHSBaseType();
+ otherExpr = getRHSExpr();
+ otherExpressionType = getRHSBaseType();
+ } else {
+ arrayExpr = getRHSExpr();
+ baseDataType = getRHSBaseType();
+ otherExpr = getLHSExpr();
+ otherExpressionType = getLHSBaseType();
+ }
+ if (getDataType() != null && !(otherExpr instanceof LiteralExpression && otherExpr.isNullable()) && !otherExpressionType.isCoercibleTo(baseDataType)) {
+ throw TypeMismatchException.newException(baseDataType, otherExpressionType);
+ }
- if (getDataType() != null && !(getElementExpr() instanceof LiteralExpression && getElementExpr().isNullable()) && !getElementDataType().isCoercibleTo(getBaseType())) {
- throw TypeMismatchException.newException(getBaseType(), getElementDataType());
+ // If the base type of an element is fixed width, make sure the element
+ // being appended will fit
+ if (getDataType() != null && otherExpressionType.getByteSize() == null
+ && otherExpressionType != null && baseDataType.isFixedWidth()
+ && otherExpressionType.isFixedWidth() && arrayExpr.getMaxLength() != null
+ && otherExpr.getMaxLength() != null
+ && otherExpr.getMaxLength() > arrayExpr.getMaxLength()) {
+ throw new DataExceedsCapacityException("Values are not size compatible");
}
+ // If the base type has a scale, make sure the element being appended has a
+ // scale less than or equal to it
+ if (getDataType() != null && arrayExpr.getScale() != null && otherExpr.getScale() != null
+ && otherExpr.getScale() > arrayExpr.getScale()) {
+ throw new DataExceedsCapacityException(baseDataType, arrayExpr.getMaxLength(),
+ arrayExpr.getScale());
+ }
+ }
- // If the base type of an element is fixed width, make sure the element being appended will fit
- if (getDataType() != null && getElementExpr().getDataType().getByteSize() == null && getElementDataType() != null && getBaseType().isFixedWidth() && getElementDataType().isFixedWidth() && getArrayExpr().getMaxLength() != null &&
- getElementExpr().getMaxLength() != null && getElementExpr().getMaxLength() > getArrayExpr().getMaxLength()) {
- throw new DataExceedsCapacityException("");
+ @Override
+ public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) {
+ Expression arrayExpr = null;
+ PDataType baseDataType = null;
+ Expression otherExpr = null;
+ PDataType otherExpressionType = null;
+ if (getLHSExpr().getDataType().isArrayType()) {
+ arrayExpr = getLHSExpr();
+ baseDataType = getLHSBaseType();
+ otherExpr = getRHSExpr();
+ otherExpressionType = getRHSBaseType();
+ } else {
+ arrayExpr = getRHSExpr();
+ baseDataType = getRHSBaseType();
+ otherExpr = getLHSExpr();
+ otherExpressionType = getLHSBaseType();
}
- // If the base type has a scale, make sure the element being appended has a scale less than or equal to it
- if (getDataType() != null && getArrayExpr().getScale() != null && getElementExpr().getScale() != null &&
- getElementExpr().getScale() > getArrayExpr().getScale()) {
- throw new DataExceedsCapacityException(getBaseType(), getArrayExpr().getMaxLength(), getArrayExpr().getScale());
+ if (!arrayExpr.evaluate(tuple, ptr)) {
+ return false;
+ } else if (ptr.getLength() == 0) {
+ return true;
}
+ int arrayLength = PArrayDataType.getArrayLength(ptr, baseDataType, arrayExpr.getMaxLength());
+
+ int length = ptr.getLength();
+ int offset = ptr.getOffset();
+ byte[] arrayBytes = ptr.get();
+
+ otherExpr.evaluate(tuple, ptr);
+
+ checkSizeCompatibility(ptr, arrayExpr, baseDataType, otherExpr, otherExpressionType);
+ coerceBytes(ptr, arrayExpr, baseDataType, otherExpr, otherExpressionType);
+ return modifierFunction(ptr, length, offset, arrayBytes, baseDataType, arrayLength, getMaxLength(),
+ arrayExpr);
}
- protected void checkSizeCompatibility(ImmutableBytesWritable ptr) {
- if (!getBaseType().isSizeCompatible(ptr, null, getElementDataType(), getElementExpr().getMaxLength(), getElementExpr().getScale(), getArrayExpr().getMaxLength(), getArrayExpr().getScale())) {
- throw new DataExceedsCapacityException("");
+ // Override this method for various function implementations
+ protected boolean modifierFunction(ImmutableBytesWritable ptr, int len, int offset,
+ byte[] arrayBytes, PDataType baseDataType, int arrayLength, Integer maxLength,
+ Expression arrayExp) {
+ return false;
+ }
+
+ protected void checkSizeCompatibility(ImmutableBytesWritable ptr, Expression arrayExpr,
+ PDataType baseDataType, Expression otherExpr, PDataType otherExpressionType) {
+ if (!baseDataType.isSizeCompatible(ptr, null, otherExpressionType,
+ otherExpr.getMaxLength(), otherExpr.getScale(), arrayExpr.getMaxLength(),
+ arrayExpr.getScale())) {
+ throw new DataExceedsCapacityException("Values are not size compatible");
}
}
- protected void coerceBytes(ImmutableBytesWritable ptr) {
- getBaseType().coerceBytes(ptr, null, getElementDataType(), getElementExpr().getMaxLength(), getElementExpr().getScale(), getElementExpr().getSortOrder(), getArrayExpr().getMaxLength(), getArrayExpr().getScale(), getArrayExpr().getSortOrder());
+
+ protected void coerceBytes(ImmutableBytesWritable ptr, Expression arrayExpr,
+ PDataType baseDataType, Expression otherExpr, PDataType otherExpressionType) {
+ baseDataType.coerceBytes(ptr, null, otherExpressionType, otherExpr.getMaxLength(),
+ otherExpr.getScale(), otherExpr.getSortOrder(), arrayExpr.getMaxLength(),
+ arrayExpr.getScale(), arrayExpr.getSortOrder());
}
- public abstract Expression getArrayExpr();
+ public Expression getRHSExpr() {
+ return this.children.get(1);
+ }
- public abstract Expression getElementExpr();
+ public Expression getLHSExpr() {
+ return this.children.get(0);
+ }
- public PDataType getBaseType() {
- return PDataType.arrayBaseType(getArrayExpr().getDataType());
+ public PDataType getLHSBaseType() {
+ if (getLHSExpr().getDataType().isArrayType()) {
+ return PDataType.arrayBaseType(getLHSExpr().getDataType());
+ } else {
+ return getLHSExpr().getDataType();
+ }
}
- public PDataType getElementDataType() {
- return getElementExpr().getDataType();
+ public PDataType getRHSBaseType() {
+ if (getRHSExpr().getDataType().isArrayType()) {
+ return PDataType.arrayBaseType(getRHSExpr().getDataType());
+ } else {
+ return getRHSExpr().getDataType();
+ }
+ }
+
+ @Override
+ public PDataType getDataType() {
+ if (getLHSExpr().getDataType().isArrayType()) {
+ return getLHSExpr().getDataType();
+ } else {
+ return getRHSExpr().getDataType();
+ }
+ }
+
+
+ @Override
+ public Integer getMaxLength() {
+ if (getLHSExpr().getDataType().isArrayType()) {
+ return getLHSExpr().getMaxLength();
+ } else {
+ return getRHSExpr().getMaxLength();
+ }
+ }
+
+ @Override
+ public SortOrder getSortOrder() {
+ if (getLHSExpr().getDataType().isArrayType()) {
+ return getLHSExpr().getSortOrder();
+ } else {
+ return getRHSExpr().getSortOrder();
+ }
}
+
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7385899d/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ArrayPrependFunction.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ArrayPrependFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ArrayPrependFunction.java
index 3cea4df..c2311fb 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ArrayPrependFunction.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ArrayPrependFunction.java
@@ -23,16 +23,13 @@ import java.util.List;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.phoenix.expression.Expression;
import org.apache.phoenix.parse.FunctionParseNode;
-import org.apache.phoenix.schema.SortOrder;
import org.apache.phoenix.schema.TypeMismatchException;
-import org.apache.phoenix.schema.tuple.Tuple;
import org.apache.phoenix.schema.types.*;
@FunctionParseNode.BuiltInFunction(name = ArrayPrependFunction.NAME, args = {
@FunctionParseNode.Argument(allowedTypes = {PVarbinary.class}),
- @FunctionParseNode.Argument(allowedTypes = {PBinaryArray.class,
- PVarbinaryArray.class})})
-public class ArrayPrependFunction extends ArrayModifierFunction {
+ @FunctionParseNode.Argument(allowedTypes = {PBinaryArray.class, PVarbinaryArray.class})})
+public class ArrayPrependFunction extends ArrayModifierFunction {
public static final String NAME = "ARRAY_PREPEND";
@@ -44,53 +41,14 @@ public class ArrayPrependFunction extends ArrayModifierFunction {
}
@Override
- public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) {
-
- if (!getArrayExpr().evaluate(tuple, ptr)) {
- return false;
- } else if (ptr.getLength() == 0) {
- return true;
- }
- int arrayLength = PArrayDataType.getArrayLength(ptr, getBaseType(), getArrayExpr().getMaxLength());
-
- int length = ptr.getLength();
- int offset = ptr.getOffset();
- byte[] arrayBytes = ptr.get();
-
- getElementExpr().evaluate(tuple, ptr);
-
- checkSizeCompatibility(ptr);
- coerceBytes(ptr);
- return PArrayDataType.prependItemToArray(ptr, length, offset, arrayBytes, getBaseType(), arrayLength, getMaxLength(), getArrayExpr().getSortOrder());
- }
-
- @Override
- public PDataType getDataType() {
- return children.get(1).getDataType();
- }
-
- @Override
- public Integer getMaxLength() {
- return this.children.get(1).getMaxLength();
- }
-
- @Override
- public SortOrder getSortOrder() {
- return getChildren().get(1).getSortOrder();
+ protected boolean modifierFunction(ImmutableBytesWritable ptr, int len, int offset,
+ byte[] arrayBytes, PDataType baseDataType, int arrayLength, Integer maxLength,
+ Expression arrayExp) {
+ return PArrayDataType.prependItemToArray(ptr, len, offset, arrayBytes, baseDataType, arrayLength, getMaxLength(), arrayExp.getSortOrder());
}
@Override
public String getName() {
return NAME;
}
-
- @Override
- public Expression getArrayExpr() {
- return getChildren().get(1);
- }
-
- @Override
- public Expression getElementExpr() {
- return getChildren().get(0);
- }
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7385899d/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PArrayDataType.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PArrayDataType.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PArrayDataType.java
index 86f22f7..4e32cc0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PArrayDataType.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PArrayDataType.java
@@ -21,7 +21,6 @@ import java.io.DataOutputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.text.Format;
-import java.util.Arrays;
import java.util.LinkedList;
import java.util.List;
@@ -129,6 +128,19 @@ public abstract class PArrayDataType<T> extends PDataType<T> {
}
return 0;
}
+
+ public static int serializeNulls(byte[] bytes, int position, int nulls){
+ int nMultiplesOver255 = nulls / 255;
+ while (nMultiplesOver255-- > 0) {
+ bytes[position++] = 1;
+ }
+ int nRemainingNulls = nulls % 255;
+ if (nRemainingNulls > 0) {
+ byte nNullByte = SortOrder.invert((byte)(nRemainingNulls-1));
+ bytes[position++] = nNullByte;
+ }
+ return position;
+ }
public static void writeEndSeperatorForVarLengthArray(DataOutputStream oStream) throws IOException {
oStream.write(QueryConstants.SEPARATOR_BYTE);
@@ -246,6 +258,10 @@ public abstract class PArrayDataType<T> extends PDataType<T> {
pArr = new PhoenixArray(pArr, desiredMaxLength);
}
}
+ //Coerce to new max length when only max lengths differ
+ if(actualType == desiredType && !pArr.isPrimitiveType() && maxLength != null && maxLength != desiredMaxLength){
+ pArr = new PhoenixArray(pArr, desiredMaxLength);
+ }
baseType = desiredBaseType;
ptr.set(toBytes(pArr, baseType, expectedModifier));
} else {
@@ -460,6 +476,11 @@ public abstract class PArrayDataType<T> extends PDataType<T> {
}
public static boolean appendItemToArray(ImmutableBytesWritable ptr, int length, int offset, byte[] arrayBytes, PDataType baseType, int arrayLength, Integer maxLength, SortOrder sortOrder) {
+ if (ptr.getLength() == 0) {
+ ptr.set(arrayBytes, offset, length);
+ return true;
+ }
+
int elementLength = maxLength == null ? ptr.getLength() : maxLength;
//padding
@@ -617,16 +638,8 @@ public abstract class PArrayDataType<T> extends PDataType<T> {
currentPosition++;
newOffsetArrayPosition = offsetArrayPosition + lengthIncrease;
- while (nMultiplesOver255-- > 0) {
- newArray[currentPosition] = (byte) 1;
- currentPosition++;
- }
- // Write a byte for the remaining null elements
- if (nRemainingNulls > 0) {
- byte nNullByte = SortOrder.invert((byte) (nRemainingNulls - 1));
- newArray[currentPosition] = nNullByte; // Single byte for repeating nulls
- currentPosition++;
- }
+ //serialize nulls at the beginning
+ currentPosition = serializeNulls(newArray, currentPosition, nulls);
} else {
if (!useInt) {
if (PArrayDataType.useShortForOffsetArray(endElementPosition)) {
@@ -702,6 +715,134 @@ public abstract class PArrayDataType<T> extends PDataType<T> {
Bytes.putByte(newArray, currentPosition, arrayBytes[offset + length - 1]);
}
+ public static boolean concatArrays(ImmutableBytesWritable ptr, int array1BytesLength, int array1BytesOffset, byte[] array1Bytes, PDataType baseType, int actualLengthOfArray1, int actualLengthOfArray2) {
+ int array2BytesLength = ptr.getLength();
+ int array2BytesOffset = ptr.getOffset();
+ byte[] array2Bytes = ptr.get();
+
+ byte[] newArray;
+
+ if (!baseType.isFixedWidth()) {
+ int offsetArrayPositionArray1 = Bytes.toInt(array1Bytes, array1BytesOffset + array1BytesLength - Bytes.SIZEOF_INT - Bytes.SIZEOF_INT - Bytes.SIZEOF_BYTE, Bytes.SIZEOF_INT);
+ int offsetArrayPositionArray2 = Bytes.toInt(array2Bytes, array2BytesOffset + array2BytesLength - Bytes.SIZEOF_INT - Bytes.SIZEOF_INT - Bytes.SIZEOF_BYTE, Bytes.SIZEOF_INT);
+ int offsetArrayLengthArray1 = array1BytesLength - offsetArrayPositionArray1 - Bytes.SIZEOF_INT - Bytes.SIZEOF_INT - Bytes.SIZEOF_BYTE;
+ int offsetArrayLengthArray2 = array2BytesLength - offsetArrayPositionArray2 - Bytes.SIZEOF_INT - Bytes.SIZEOF_INT - Bytes.SIZEOF_BYTE;
+ int newArrayLength = actualLengthOfArray1 + actualLengthOfArray2;
+ int nullsAtTheEndOfArray1 = 0;
+ int nullsAtTheBeginningOfArray2 = 0;
+ //checks whether offset array consists of shorts or integers
+ boolean useIntArray1 = offsetArrayLengthArray1 / actualLengthOfArray1 == Bytes.SIZEOF_INT;
+ boolean useIntArray2 = offsetArrayLengthArray2 / actualLengthOfArray2 == Bytes.SIZEOF_INT;
+ boolean useIntNewArray = false;
+ //count nulls at the end of array 1
+ for (int index = actualLengthOfArray1 - 1; index > -1; index--) {
+ int offset = getOffset(array1Bytes, index, !useIntArray1, array1BytesOffset + offsetArrayPositionArray1);
+ if (array1Bytes[array1BytesOffset + offset] == QueryConstants.SEPARATOR_BYTE) {
+ nullsAtTheEndOfArray1++;
+ } else {
+ break;
+ }
+ }
+ //count nulls at the beginning of the array 2
+ int array2FirstNonNullElementOffset = 0;
+ int array2FirstNonNullIndex = 0;
+ for (int index = 0; index < actualLengthOfArray2; index++) {
+ int offset = getOffset(array2Bytes, index, !useIntArray2, array2BytesOffset + offsetArrayPositionArray2);
+ if (array2Bytes[array2BytesOffset + offset] == QueryConstants.SEPARATOR_BYTE) {
+ nullsAtTheBeginningOfArray2++;
+ } else {
+ array2FirstNonNullIndex = index;
+ array2FirstNonNullElementOffset = offset;
+ break;
+ }
+ }
+ int nullsInMiddleAfterConcat = nullsAtTheEndOfArray1 + nullsAtTheBeginningOfArray2;
+ int bytesForNullsBefore = nullsAtTheBeginningOfArray2 / 255 + (nullsAtTheBeginningOfArray2 % 255 == 0 ? 0 : 1);
+ int bytesForNullsAfter = nullsInMiddleAfterConcat / 255 + (nullsInMiddleAfterConcat % 255 == 0 ? 0 : 1);
+ //Increase of length required to store nulls
+ int lengthIncreaseForNulls = bytesForNullsAfter - bytesForNullsBefore;
+ //Length increase incremented by one when there were no nulls at the beginning of array and when there are
+ //nulls at the end of array 1 as we need to allocate a byte for separator byte in this case.
+ lengthIncreaseForNulls += nullsAtTheBeginningOfArray2 == 0 && nullsAtTheEndOfArray1 != 0 ? Bytes.SIZEOF_BYTE : 0;
+ int newOffsetArrayPosition = offsetArrayPositionArray1 + offsetArrayPositionArray2 + lengthIncreaseForNulls - 2 * Bytes.SIZEOF_BYTE;
+ int endElementPositionOfArray2 = getOffset(array2Bytes, actualLengthOfArray2 - 1, !useIntArray2, array2BytesOffset + offsetArrayPositionArray2);
+ int newEndElementPosition = lengthIncreaseForNulls + endElementPositionOfArray2 + offsetArrayPositionArray1 - 2 * Bytes.SIZEOF_BYTE;
+ //Creates a byre array to store the concatenated array
+ if (PArrayDataType.useShortForOffsetArray(newEndElementPosition)) {
+ newArray = new byte[newOffsetArrayPosition + newArrayLength * Bytes.SIZEOF_SHORT + Bytes.SIZEOF_INT + Bytes.SIZEOF_INT + Bytes.SIZEOF_BYTE];
+ } else {
+ useIntNewArray = true;
+ newArray = new byte[newOffsetArrayPosition + newArrayLength * Bytes.SIZEOF_INT + Bytes.SIZEOF_INT + Bytes.SIZEOF_INT + Bytes.SIZEOF_BYTE];
+ }
+
+ int currentPosition = 0;
+ //Copies all the elements from array 1 to new array
+ System.arraycopy(array1Bytes, array1BytesOffset, newArray, currentPosition, offsetArrayPositionArray1 - 2 * Bytes.SIZEOF_BYTE);
+ currentPosition = offsetArrayPositionArray1 - 2 * Bytes.SIZEOF_BYTE;
+ int array2StartingPosition = currentPosition;
+ currentPosition += nullsInMiddleAfterConcat != 0 ? 1 : 0;
+ //Writes nulls in the middle of the array.
+ currentPosition = serializeNulls(newArray, currentPosition, nullsInMiddleAfterConcat);
+ //Copies the elements from array 2 beginning from the first non null element.
+ System.arraycopy(array2Bytes, array2BytesOffset + array2FirstNonNullElementOffset, newArray, currentPosition, offsetArrayPositionArray2 - array2FirstNonNullElementOffset);
+ currentPosition += offsetArrayPositionArray2 - array2FirstNonNullElementOffset;
+
+ //Writing offset arrays
+ if (useIntNewArray) {
+ //offsets for the elements from array 1. Simply copied.
+ for (int index = 0; index < actualLengthOfArray1; index++) {
+ int offset = getOffset(array1Bytes, index, !useIntArray1, array1BytesOffset + offsetArrayPositionArray1);
+ Bytes.putInt(newArray, currentPosition, offset);
+ currentPosition += Bytes.SIZEOF_INT;
+ }
+ //offsets for nulls in the middle
+ for (int index = 0; index < array2FirstNonNullIndex; index++) {
+ int offset = getOffset(array2Bytes, index, !useIntArray2, array2BytesOffset + offsetArrayPositionArray2);
+ Bytes.putInt(newArray, currentPosition, offset + array2StartingPosition);
+ currentPosition += Bytes.SIZEOF_INT;
+ }
+ //offsets for the elements from the first non null element from array 2
+ int part2NonNullStartingPosition = array2StartingPosition + bytesForNullsAfter + (bytesForNullsAfter == 0 ? 0 : Bytes.SIZEOF_BYTE);
+ for (int index = array2FirstNonNullIndex; index < actualLengthOfArray2; index++) {
+ int offset = getOffset(array2Bytes, index, !useIntArray2, array2BytesOffset + offsetArrayPositionArray2);
+ Bytes.putInt(newArray, currentPosition, offset - array2FirstNonNullElementOffset + part2NonNullStartingPosition);
+ currentPosition += Bytes.SIZEOF_INT;
+ }
+ } else {
+ //offsets for the elements from array 1. Simply copied.
+ for (int index = 0; index < actualLengthOfArray1; index++) {
+ int offset = getOffset(array1Bytes, index, !useIntArray1, array1BytesOffset + offsetArrayPositionArray1);
+ Bytes.putShort(newArray, currentPosition, (short) (offset - Short.MAX_VALUE));
+ currentPosition += Bytes.SIZEOF_SHORT;
+ }
+ //offsets for nulls in the middle
+ for (int index = 0; index < array2FirstNonNullIndex; index++) {
+ int offset = getOffset(array2Bytes, index, !useIntArray2, array2BytesOffset + offsetArrayPositionArray2);
+ Bytes.putShort(newArray, currentPosition, (short) (offset + array2StartingPosition - Short.MAX_VALUE));
+ currentPosition += Bytes.SIZEOF_SHORT;
+ }
+ //offsets for the elements from the first non null element from array 2
+ int part2NonNullStartingPosition = array2StartingPosition + bytesForNullsAfter + (bytesForNullsAfter == 0 ? 0 : Bytes.SIZEOF_BYTE);
+ for (int index = array2FirstNonNullIndex; index < actualLengthOfArray2; index++) {
+ int offset = getOffset(array2Bytes, index, !useIntArray2, array2BytesOffset + offsetArrayPositionArray2);
+ Bytes.putShort(newArray, currentPosition, (short) (offset - array2FirstNonNullElementOffset + part2NonNullStartingPosition - Short.MAX_VALUE));
+ currentPosition += Bytes.SIZEOF_SHORT;
+ }
+ }
+ Bytes.putInt(newArray, currentPosition, newOffsetArrayPosition);
+ currentPosition += Bytes.SIZEOF_INT;
+ Bytes.putInt(newArray, currentPosition, useIntNewArray ? -newArrayLength : newArrayLength);
+ currentPosition += Bytes.SIZEOF_INT;
+ Bytes.putByte(newArray, currentPosition, array1Bytes[array1BytesOffset + array1BytesLength - 1]);
+ } else {
+ newArray = new byte[array1BytesLength + array2BytesLength];
+ System.arraycopy(array1Bytes, array1BytesOffset, newArray, 0, array1BytesLength);
+ System.arraycopy(array2Bytes, array2BytesOffset, newArray, array1BytesLength, array2BytesLength);
+ }
+ ptr.set(newArray);
+ return true;
+ }
+
public static int serailizeOffsetArrayIntoStream(DataOutputStream oStream, TrustedByteArrayOutputStream byteStream,
int noOfElements, int maxOffset, int[] offsetPos) throws IOException {
int offsetPosition = (byteStream.size());
[19/47] phoenix git commit: PHOENIX-1920 - Pherf - Add support for
mixed r/w workloads
Posted by ma...@apache.org.
PHOENIX-1920 - Pherf - Add support for mixed r/w workloads
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/7175dcbc
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/7175dcbc
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/7175dcbc
Branch: refs/heads/calcite
Commit: 7175dcbc011dff48f6d041697ec84da98f80f729
Parents: 466eeb3
Author: cmarcel <cm...@salesforce.com>
Authored: Fri Jun 19 16:34:41 2015 -0700
Committer: cmarcel <cm...@salesforce.com>
Committed: Fri Jun 19 16:34:41 2015 -0700
----------------------------------------------------------------------
.gitignore | 2 +
phoenix-pherf/pom.xml | 10 +-
.../org/apache/phoenix/pherf/DataIngestIT.java | 134 ++++--
.../org/apache/phoenix/pherf/PherfMainIT.java | 36 ++
.../apache/phoenix/pherf/ResultBaseTestIT.java | 31 +-
.../apache/phoenix/pherf/SchemaReaderIT.java | 17 +-
.../java/org/apache/phoenix/pherf/Pherf.java | 179 +++++---
.../apache/phoenix/pherf/PherfConstants.java | 8 +-
.../phoenix/pherf/configuration/DataModel.java | 10 -
.../phoenix/pherf/configuration/Scenario.java | 12 +-
.../pherf/configuration/WriteParams.java | 72 +++
.../pherf/configuration/XMLConfigParser.java | 25 +-
.../phoenix/pherf/jmx/MonitorManager.java | 153 ++++---
.../phoenix/pherf/loaddata/DataLoader.java | 332 --------------
.../pherf/result/DataLoadThreadTime.java | 87 ++--
.../pherf/result/DataLoadTimeSummary.java | 54 +--
.../phoenix/pherf/result/DataModelResult.java | 68 ++-
.../phoenix/pherf/result/QueryResult.java | 17 +-
.../phoenix/pherf/result/QuerySetResult.java | 40 +-
.../org/apache/phoenix/pherf/result/Result.java | 11 +-
.../phoenix/pherf/result/ResultHandler.java | 5 +
.../phoenix/pherf/result/ResultManager.java | 19 +-
.../apache/phoenix/pherf/result/ResultUtil.java | 119 +++--
.../phoenix/pherf/result/ResultValue.java | 4 +-
.../apache/phoenix/pherf/result/RunTime.java | 179 ++++----
.../phoenix/pherf/result/ScenarioResult.java | 44 +-
.../apache/phoenix/pherf/result/ThreadTime.java | 34 +-
.../phoenix/pherf/result/file/Extension.java | 3 +-
.../phoenix/pherf/result/file/Header.java | 11 +-
.../pherf/result/impl/CSVResultHandler.java | 47 +-
.../pherf/result/impl/ImageResultHandler.java | 58 +--
.../pherf/result/impl/XMLResultHandler.java | 36 +-
.../phoenix/pherf/schema/SchemaReader.java | 2 +-
.../apache/phoenix/pherf/util/PhoenixUtil.java | 64 ++-
.../pherf/workload/MultiThreadedRunner.java | 153 +++++++
.../pherf/workload/MultithreadedDiffer.java | 131 +++---
.../pherf/workload/MultithreadedRunner.java | 170 -------
.../phoenix/pherf/workload/QueryExecutor.java | 459 ++++++++++---------
.../phoenix/pherf/workload/QueryVerifier.java | 265 +++++------
.../apache/phoenix/pherf/workload/Workload.java | 10 +
.../pherf/workload/WorkloadExecutor.java | 109 ++---
.../phoenix/pherf/workload/WriteWorkload.java | 403 ++++++++++++++++
.../scenario/prod_test_unsalted_scenario.xml | 35 ++
.../phoenix/pherf/ConfigurationParserTest.java | 102 +++--
.../org/apache/phoenix/pherf/ResultTest.java | 5 +-
.../apache/phoenix/pherf/RuleGeneratorTest.java | 15 +-
.../test/resources/scenario/test_scenario.xml | 58 ++-
47 files changed, 2171 insertions(+), 1667 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7175dcbc/.gitignore
----------------------------------------------------------------------
diff --git a/.gitignore b/.gitignore
index fc0e4af..b918d76 100644
--- a/.gitignore
+++ b/.gitignore
@@ -22,3 +22,5 @@
target/
release/
RESULTS/
+CSV_EXPORT/
+
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7175dcbc/phoenix-pherf/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-pherf/pom.xml b/phoenix-pherf/pom.xml
index 1667c66..0facbde 100644
--- a/phoenix-pherf/pom.xml
+++ b/phoenix-pherf/pom.xml
@@ -16,7 +16,8 @@
~ limitations under the License.
-->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
@@ -30,7 +31,7 @@
<name>Phoenix - Pherf</name>
<properties>
- <top.dir>${project.basedir}/..</top.dir>
+ <top.dir>${project.basedir}/..</top.dir>
</properties>
<profiles>
@@ -233,6 +234,11 @@
<!-- Test Dependencies -->
<dependency>
+ <groupId>com.jcabi</groupId>
+ <artifactId>jcabi-jdbc</artifactId>
+ <version>0.15</version>
+ </dependency>
+ <dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.11</version>
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7175dcbc/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/DataIngestIT.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/DataIngestIT.java b/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/DataIngestIT.java
index 2b56f43..828ac38 100644
--- a/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/DataIngestIT.java
+++ b/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/DataIngestIT.java
@@ -18,70 +18,122 @@
package org.apache.phoenix.pherf;
+import com.jcabi.jdbc.JdbcSession;
+import com.jcabi.jdbc.Outcome;
import org.apache.phoenix.pherf.configuration.Column;
+import org.apache.phoenix.pherf.configuration.DataModel;
import org.apache.phoenix.pherf.configuration.DataTypeMapping;
import org.apache.phoenix.pherf.configuration.Scenario;
-import org.apache.phoenix.pherf.configuration.XMLConfigParser;
-import org.apache.phoenix.pherf.loaddata.DataLoader;
import org.apache.phoenix.pherf.rules.DataValue;
import org.apache.phoenix.pherf.rules.RulesApplier;
-import org.apache.phoenix.pherf.schema.SchemaReader;
-import org.apache.phoenix.pherf.util.PhoenixUtil;
+import org.apache.phoenix.pherf.workload.QueryExecutor;
+import org.apache.phoenix.pherf.workload.WorkloadExecutor;
+import org.apache.phoenix.pherf.workload.WriteWorkload;
+import org.junit.Before;
import org.junit.Test;
-import java.nio.file.Path;
+import java.sql.Connection;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.Assert.*;
public class DataIngestIT extends ResultBaseTestIT {
- protected static PhoenixUtil util = new PhoenixUtil(true);
- static final String matcherScenario = ".*scenario/.*test.*xml";
- static final String matcherSchema = ".*datamodel/.*test.*sql";
- @Test
- public void generateData() throws Exception {
- util.setZookeeper("localhost");
- SchemaReader reader = new SchemaReader(util, matcherSchema);
- XMLConfigParser parser = new XMLConfigParser(matcherScenario);
+ @Before
+ public void applySchema() throws Exception {
+ reader.applySchema();
+ resources = new ArrayList<>(reader.getResourceList());
- // 1. Generate table schema from file
- List<Path> resources = new ArrayList<>(reader.getResourceList());
assertTrue("Could not pull list of schema files.", resources.size() > 0);
assertNotNull("Could not read schema file.", reader.resourceToString(resources.get(0)));
- reader.applySchema();
+ }
+
+ @Test
+ public void testColumnRulesApplied() {
+
+ Scenario scenario = null;
+ try {
+ scenario = parser.getScenarioByName("testScenario");
+ List<Column>
+ columnListFromPhoenix =
+ util.getColumnsFromPhoenix(scenario.getSchemaName(),
+ scenario.getTableNameWithoutSchemaName(), util.getConnection());
+ assertTrue("Could not get phoenix columns.", columnListFromPhoenix.size() > 0);
+
+ WriteWorkload loader = new WriteWorkload(util, parser, scenario);
+ WorkloadExecutor executor = new WorkloadExecutor();
+ executor.add(loader);
+
+ RulesApplier rulesApplier = loader.getRulesApplier();
+ List<Map> modelList = rulesApplier.getModelList();
+ assertTrue("Could not generate the modelList", modelList.size() > 0);
+
+ for (Column column : columnListFromPhoenix) {
+ DataValue data = rulesApplier.getDataForRule(scenario, column);
- // 2. Load the metadata of for the test tables
- Scenario scenario = parser.getScenarios().get(0);
- List<Column> columnListFromPhoenix = util.getColumnsFromPhoenix(scenario.getSchemaName(), scenario.getTableNameWithoutSchemaName(), util.getConnection());
- assertTrue("Could not get phoenix columns.", columnListFromPhoenix.size() > 0);
- DataLoader loader = new DataLoader(util,parser);
- RulesApplier rulesApplier = loader.getRulesApplier();
- List<Map> modelList = rulesApplier.getModelList();
- assertTrue("Could not generate the modelList", modelList.size() > 0);
-
- for (Column column : columnListFromPhoenix) {
- DataValue data = rulesApplier.getDataForRule(scenario, column);
-
- // We are generating data values so the value should have been specified by this point.
- assertTrue("Failed to retrieve data for column type: " + column.getType(), data != null);
-
- // Test that we still retrieve the GENERAL_CHAR rule even after an override is applied to another CHAR type.
- // NEWVAL_STRING Column does not specify an override so we should get the default rule.
- if ((column.getType() == DataTypeMapping.VARCHAR) && (column.getName().equals("NEWVAL_STRING"))) {
- assertTrue("Failed to retrieve data for column type: ", data.getDistribution() == Integer.MIN_VALUE);
+ // We are generating data values
+ // so the value should have been specified by this point.
+ assertTrue("Failed to retrieve data for column type: " + column.getType(),
+ data != null);
+
+ // Test that we still retrieve the GENERAL_CHAR rule even after an override is
+ // applied to another CHAR type. NEWVAL_STRING Column does not specify an override
+ // so we should get the default rule.
+ if ((column.getType() == DataTypeMapping.VARCHAR) && (column.getName()
+ .equals("NEWVAL_STRING"))) {
+ assertTrue("Failed to retrieve data for column type: ",
+ data.getDistribution() == Integer.MIN_VALUE);
+ }
}
+ } catch (Exception e) {
+ fail("We had an exception: " + e.getMessage());
}
+ }
+
+ @Test
+ public void testRWWorkload() throws Exception {
+
+ Connection connection = util.getConnection();
+
+ WorkloadExecutor executor = new WorkloadExecutor();
+ DataModel dataModel = parser.getDataModelByName("test_scenario");
+ List<DataModel> dataModels = new ArrayList<>();
+ dataModels.add(dataModel);
+ QueryExecutor
+ qe =
+ new QueryExecutor(parser, util, executor.getPool(), dataModels, null, false,
+ PherfConstants.RunMode.PERFORMANCE);
+ executor.add(qe);
+ Scenario scenario = parser.getScenarioByName("testScenarioRW");
+
+ String sql = "select count(*) from " + scenario.getTableName();
- // Load up the data.
try {
- loader.execute();
+ // Wait for data to load up.
+ executor.get();
+ executor.shutdown();
+
+ // Verify data has been loaded
+ Integer count = new JdbcSession(connection).sql(sql).select(new Outcome<Integer>() {
+ @Override public Integer handle(ResultSet resultSet, Statement statement)
+ throws SQLException {
+ while (resultSet.next()) {
+ return resultSet.getInt(1);
+ }
+ return null;
+ }
+ });
+ assertNotNull("Could not retrieve count. " + count);
+
+ // It would be better to sum up all the rowcounts for the scenarios, but this is fine
+ assertTrue("Could not query any rows for in " + scenario.getTableName(), count > 0);
} catch (Exception e) {
- fail("Failed to lead data. An exception was thrown: " + e.getMessage());
+ fail("Failed to load data. An exception was thrown: " + e.getMessage());
}
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7175dcbc/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/PherfMainIT.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/PherfMainIT.java b/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/PherfMainIT.java
new file mode 100644
index 0000000..2407ef4
--- /dev/null
+++ b/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/PherfMainIT.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.phoenix.pherf;
+
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.contrib.java.lang.system.ExpectedSystemExit;
+
+public class PherfMainIT extends ResultBaseTestIT {
+ @Rule
+ public final ExpectedSystemExit exit = ExpectedSystemExit.none();
+
+ @Test
+ public void testPherfMain() {
+ String[] args = { "-q",
+ "--scenarioFile", ".*prod_test_unsalted_scenario.*",
+ "-m", "--monitorFrequency", "10" };
+ Pherf.main(args);
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7175dcbc/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/ResultBaseTestIT.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/ResultBaseTestIT.java b/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/ResultBaseTestIT.java
index 6e103b8..d2c5173 100644
--- a/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/ResultBaseTestIT.java
+++ b/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/ResultBaseTestIT.java
@@ -19,27 +19,38 @@
package org.apache.phoenix.pherf;
import org.apache.phoenix.end2end.BaseHBaseManagedTimeIT;
+import org.apache.phoenix.pherf.configuration.XMLConfigParser;
import org.apache.phoenix.pherf.result.ResultUtil;
+import org.apache.phoenix.pherf.schema.SchemaReader;
+import org.apache.phoenix.pherf.util.PhoenixUtil;
import org.junit.BeforeClass;
+import java.nio.file.Path;
+import java.util.List;
import java.util.Properties;
public class ResultBaseTestIT extends BaseHBaseManagedTimeIT {
- private static boolean isSetUpDone = false;
+ protected static final String matcherScenario = ".*scenario/.*test.*xml";
+ protected static final String matcherSchema = ".*datamodel/.*test.*sql";
- @BeforeClass
- public static void setUp() throws Exception {
- if (isSetUpDone) {
- return;
- }
+ protected static PhoenixUtil util = PhoenixUtil.create(true);
+ protected static Properties properties;
+ protected static SchemaReader reader;
+ protected static XMLConfigParser parser;
+ protected static List<Path> resources;
+ protected static ResultUtil resultUtil = new ResultUtil();
+
+ @BeforeClass public static void setUp() throws Exception {
- ResultUtil util = new ResultUtil();
PherfConstants constants = PherfConstants.create();
- Properties properties = constants.getProperties(PherfConstants.PHERF_PROPERTIES);
+ properties = constants.getProperties(PherfConstants.PHERF_PROPERTIES);
String dir = properties.getProperty("pherf.default.results.dir");
String targetDir = "target/" + dir;
properties.setProperty("pherf.default.results.dir", targetDir);
- util.ensureBaseDirExists(targetDir);
- isSetUpDone = true;
+ resultUtil.ensureBaseDirExists(targetDir);
+
+ util.setZookeeper("localhost");
+ reader = new SchemaReader(util, matcherSchema);
+ parser = new XMLConfigParser(matcherScenario);
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7175dcbc/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/SchemaReaderIT.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/SchemaReaderIT.java b/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/SchemaReaderIT.java
index 2cb7c13..bce1e91 100644
--- a/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/SchemaReaderIT.java
+++ b/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/SchemaReaderIT.java
@@ -34,15 +34,12 @@ import java.sql.Connection;
import java.util.ArrayList;
import java.util.List;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.Assert.*;
public class SchemaReaderIT extends BaseHBaseManagedTimeIT {
- protected static PhoenixUtil util = new PhoenixUtil(true);
+ protected static PhoenixUtil util = PhoenixUtil.create(true);
- @Test
- public void testSchemaReader() {
+ @Test public void testSchemaReader() {
// Test for the unit test version of the schema files.
assertApplySchemaTest();
}
@@ -55,7 +52,8 @@ public class SchemaReaderIT extends BaseHBaseManagedTimeIT {
List<Path> resources = new ArrayList<>(reader.getResourceList());
assertTrue("Could not pull list of schema files.", resources.size() > 0);
assertNotNull("Could not read schema file.", this.getClass().getResourceAsStream(
- PherfConstants.RESOURCE_DATAMODEL + "/" + resources.get(0).getFileName().toString()));
+ PherfConstants.RESOURCE_DATAMODEL + "/" + resources.get(0).getFileName()
+ .toString()));
assertNotNull("Could not read schema file.", reader.resourceToString(resources.get(0)));
reader.applySchema();
@@ -67,7 +65,10 @@ public class SchemaReaderIT extends BaseHBaseManagedTimeIT {
DataModel data = XMLConfigParser.readDataModel(resourcePath);
List<Scenario> scenarioList = data.getScenarios();
Scenario scenario = scenarioList.get(0);
- List<Column> columnList = util.getColumnsFromPhoenix(scenario.getSchemaName(), scenario.getTableNameWithoutSchemaName(), connection);
+ List<Column>
+ columnList =
+ util.getColumnsFromPhoenix(scenario.getSchemaName(),
+ scenario.getTableNameWithoutSchemaName(), connection);
assertTrue("Could not retrieve Metadata from Phoenix", columnList.size() > 0);
} catch (Exception e) {
fail("Could not initialize SchemaReader");
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7175dcbc/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/Pherf.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/Pherf.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/Pherf.java
index 073c661..5a9f45f 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/Pherf.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/Pherf.java
@@ -18,44 +18,61 @@
package org.apache.phoenix.pherf;
+import org.apache.commons.cli.*;
import org.apache.phoenix.pherf.configuration.XMLConfigParser;
+import org.apache.phoenix.pherf.jmx.MonitorManager;
import org.apache.phoenix.pherf.schema.SchemaReader;
import org.apache.phoenix.pherf.util.PhoenixUtil;
import org.apache.phoenix.pherf.util.ResourceList;
+import org.apache.phoenix.pherf.workload.QueryExecutor;
+import org.apache.phoenix.pherf.workload.Workload;
import org.apache.phoenix.pherf.workload.WorkloadExecutor;
-
-import org.apache.commons.cli.*;
+import org.apache.phoenix.pherf.workload.WriteWorkload;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.nio.file.Path;
+import java.util.ArrayList;
import java.util.Collection;
+import java.util.List;
import java.util.Properties;
public class Pherf {
private static final Logger logger = LoggerFactory.getLogger(Pherf.class);
private static final Options options = new Options();
+ private final PhoenixUtil phoenixUtil = PhoenixUtil.create();
static {
+ options.addOption("disableSchemaApply", false, "Set to disable schema from being applied.");
+ options.addOption("z", "zookeeper", true,
+ "HBase Zookeeper address for connection. Default: localhost");
+ options.addOption("q", "query", false, "Executes multi-threaded query sets");
+ options.addOption("listFiles", false, "List available resource files");
+ options.addOption("l", "load", false,
+ "Pre-loads data according to specified configuration values.");
+ options.addOption("scenarioFile", true,
+ "Regex or file name for the Test Scenario configuration .xml file to use.");
+ options.addOption("drop", true, "Regex drop all tables with schema name as PHERF. "
+ + "\nExample drop Event tables: -drop .*(EVENT).* Drop all: -drop .* or -drop all");
+ options.addOption("schemaFile", true,
+ "Regex or file name for the Test phoenix table schema .sql to use.");
options.addOption("m", "monitor", false, "Launch the stats profilers");
- options.addOption("monitorFrequency", true, "Override for frequency in Ms for which monitor should log stats. " +
- "\n See pherf.default.monitorFrequency in pherf.properties");
- options.addOption("d", "debug", false, "Put tool in debug mode");
- options.addOption("z", "zookeeper", true, "HBase Zookeeper address for connection. Default: localhost");
- options.addOption("l", "load", false, "Loads data according to specified configuration values.");
- options.addOption("scenarioFile", true, "Regex or file name for the Test Scenario configuration .xml file to use.");
- options.addOption("drop", true, "Regex drop all tables with schema name as PHERF. " +
- "\nExample drop Event tables: -drop .*(EVENT).* Drop all: -drop .* or -drop all");
- options.addOption("schemaFile", true, "Regex or file name for the Test phoenix table schema .sql to use.");
- options.addOption("rowCountOverride", true, "Row count override to use instead of one specified in scenario.");
+ options.addOption("monitorFrequency", true,
+ "Override for frequency in Ms for which monitor should log stats. "
+ + "\n See pherf.default.monitorFrequency in pherf.properties");
+ options.addOption("rowCountOverride", true,
+ "Row count override to use instead of one specified in scenario.");
options.addOption("hint", true, "Executes all queries with specified hint. Example SMALL");
- options.addOption("diff", false, "Run pherf in verification mode and diff with exported results");
- options.addOption("export", false, "Exports query results to CSV files in " + PherfConstants.EXPORT_DIR + " directory");
- options.addOption("listFiles", false, "List available resource files");
- options.addOption("writerThreadSize", true, "Override the default number of writer threads. " +
- "See pherf.default.dataloader.threadpool in Pherf.properties.");
- options.addOption("q", "query", false, "Executes multi-threaded query sets");
+ options.addOption("diff", false,
+ "Run pherf in verification mode and diff with exported results");
+ options.addOption("export", false,
+ "Exports query results to CSV files in " + PherfConstants.EXPORT_DIR
+ + " directory");
+ options.addOption("writerThreadSize", true,
+ "Override the default number of writer threads. "
+ + "See pherf.default.dataloader.threadpool in Pherf.properties.");
options.addOption("h", "help", false, "Get help on using this utility.");
+ options.addOption("d", "debug", false, "Put tool in debug mode");
}
private final String zookeeper;
@@ -63,14 +80,15 @@ public class Pherf {
private final String schemaFile;
private final String queryHint;
private final Properties properties;
- private final boolean loadData;
+ private final boolean preLoadData;
private final String dropPherfTablesRegEx;
private final boolean executeQuerySets;
private final boolean exportCSV;
private final boolean diff;
private final boolean monitor;
private final int rowCountOverride;
- private final boolean listFiles;
+ private final boolean listFiles;
+ private final boolean applySchema;
public Pherf(String[] args) throws Exception {
CommandLineParser parser = new PosixParser();
@@ -87,30 +105,35 @@ public class Pherf {
properties = PherfConstants.create().getProperties(PherfConstants.PHERF_PROPERTIES);
dropPherfTablesRegEx = command.getOptionValue("drop", null);
monitor = command.hasOption("m");
- String monitorFrequency = (command.hasOption("m") && command.hasOption("monitorFrequency"))
- ? command.getOptionValue("monitorFrequency")
- : properties.getProperty("pherf.default.monitorFrequency");
+ String
+ monitorFrequency =
+ (command.hasOption("m") && command.hasOption("monitorFrequency")) ?
+ command.getOptionValue("monitorFrequency") :
+ properties.getProperty("pherf.default.monitorFrequency");
properties.setProperty("pherf.default.monitorFrequency", monitorFrequency);
logger.debug("Using Monitor: " + monitor);
logger.debug("Monitor Frequency Ms:" + monitorFrequency);
- loadData = command.hasOption("l");
+ preLoadData = command.hasOption("l");
executeQuerySets = command.hasOption("q");
zookeeper = command.getOptionValue("z", "localhost");
queryHint = command.getOptionValue("hint", null);
exportCSV = command.hasOption("export");
diff = command.hasOption("diff");
listFiles = command.hasOption("listFiles");
- scenarioFile = command.hasOption("scenarioFile") ? command.getOptionValue("scenarioFile") : null;
+ applySchema = !command.hasOption("disableSchemaApply");
+ scenarioFile =
+ command.hasOption("scenarioFile") ? command.getOptionValue("scenarioFile") : null;
schemaFile = command.hasOption("schemaFile") ? command.getOptionValue("schemaFile") : null;
rowCountOverride = Integer.parseInt(command.getOptionValue("rowCountOverride", "0"));
- String writerThreadPoolSize = command.getOptionValue("writerThreadSize",
- properties.getProperty("pherf.default.dataloader.threadpool"));
+ String
+ writerThreadPoolSize =
+ command.getOptionValue("writerThreadSize",
+ properties.getProperty("pherf.default.dataloader.threadpool"));
properties.setProperty("pherf. default.dataloader.threadpool", writerThreadPoolSize);
-
- if ((command.hasOption("h") || (args == null || args.length == 0))
- && !command.hasOption("listFiles")) {
+ if ((command.hasOption("h") || (args == null || args.length == 0)) && !command
+ .hasOption("listFiles")) {
hf.printHelp("Pherf", options);
System.exit(1);
}
@@ -128,17 +151,22 @@ public class Pherf {
}
public void run() throws Exception {
- WorkloadExecutor workloadExec = null;
+ MonitorManager monitorManager = null;
+ List<Workload> workloads = new ArrayList<>();
+ WorkloadExecutor workloadExecutor = new WorkloadExecutor(properties, workloads);
try {
if (listFiles) {
ResourceList list = new ResourceList(PherfConstants.RESOURCE_DATAMODEL);
- Collection<Path> schemaFiles = list.getResourceList(PherfConstants.SCHEMA_ROOT_PATTERN + ".sql");
+ Collection<Path>
+ schemaFiles =
+ list.getResourceList(PherfConstants.SCHEMA_ROOT_PATTERN + ".sql");
System.out.println("Schema Files:");
for (Path path : schemaFiles) {
System.out.println(path);
}
list = new ResourceList(PherfConstants.RESOURCE_SCENARIO);
- Collection<Path> scenarioFiles =
+ Collection<Path>
+ scenarioFiles =
list.getResourceList(PherfConstants.SCENARIO_ROOT_PATTERN + ".xml");
System.out.println("Scenario Files:");
for (Path path : scenarioFiles) {
@@ -146,49 +174,86 @@ public class Pherf {
}
return;
}
- workloadExec = (scenarioFile == null)
- ? new WorkloadExecutor(properties,
- new XMLConfigParser(PherfConstants.DEFAULT_FILE_PATTERN),
- monitor)
- : new WorkloadExecutor(properties,
- new XMLConfigParser(scenarioFile),
- monitor);
+ XMLConfigParser parser = new XMLConfigParser(scenarioFile);
// Drop tables with PHERF schema and regex comparison
if (null != dropPherfTablesRegEx) {
- logger.info("\nDropping existing table with PHERF namename and "
- + dropPherfTablesRegEx + " regex expression.");
- new PhoenixUtil().deleteTables(dropPherfTablesRegEx);
+ logger.info(
+ "\nDropping existing table with PHERF namename and " + dropPherfTablesRegEx
+ + " regex expression.");
+ phoenixUtil.deleteTables(dropPherfTablesRegEx);
}
- // Schema and Data Load
- if (loadData) {
+ if (monitor) {
+ monitorManager =
+ new MonitorManager(Integer.parseInt(
+ properties.getProperty("pherf.default.monitorFrequency")));
+ workloadExecutor.add(monitorManager);
+ }
+
+ if (applySchema) {
logger.info("\nStarting to apply schema...");
- SchemaReader reader = (schemaFile == null)
- ? new SchemaReader(".*.sql")
- : new SchemaReader(schemaFile);
+ SchemaReader
+ reader =
+ (schemaFile == null) ?
+ new SchemaReader(".*.sql") :
+ new SchemaReader(schemaFile);
reader.applySchema();
+ }
+ // Schema and Data Load
+ if (preLoadData) {
logger.info("\nStarting Data Load...");
- workloadExec.executeDataLoad();
+ WriteWorkload workload = new WriteWorkload(parser);
+ workloadExecutor.add(workload);
+
+ // Wait for dataLoad to complete
+ workloadExecutor.get(workload);
logger.info("\nGenerate query gold files after data load");
- workloadExec.executeMultithreadedQueryExecutor(queryHint, true, PherfConstants.RunMode.FUNCTIONAL);
+ QueryExecutor
+ goldFileGenerator =
+ new QueryExecutor(parser, phoenixUtil, workloadExecutor.getPool(),
+ parser.getDataModels(), queryHint, true,
+ PherfConstants.RunMode.FUNCTIONAL);
+ workloadExecutor
+ .add(goldFileGenerator);
+
+ // Wait for dataLoad to complete
+ workloadExecutor.get(goldFileGenerator);
} else {
- logger.info("\nSKIPPED: Data Load and schema creation as -l argument not specified");
+ logger.info(
+ "\nSKIPPED: Data Load and schema creation as -l argument not specified");
}
// Execute multi-threaded query sets
if (executeQuerySets) {
- logger.info("\nStarting to apply schema...");
- workloadExec.executeMultithreadedQueryExecutor(queryHint, exportCSV, diff ? PherfConstants.RunMode.FUNCTIONAL : PherfConstants.RunMode.PERFORMANCE);
+ logger.info("\nStarting to apply Execute Queries...");
+
+ workloadExecutor
+ .add(new QueryExecutor(parser, phoenixUtil, workloadExecutor.getPool(),
+ parser.getDataModels(), queryHint, exportCSV, diff ?
+ PherfConstants.RunMode.FUNCTIONAL :
+ PherfConstants.RunMode.PERFORMANCE));
+
} else {
- logger.info("\nSKIPPED: Multithreaded query set execution as -q argument not specified");
+ logger.info(
+ "\nSKIPPED: Multithreaded query set execution as -q argument not specified");
+ }
+
+ // Clean up the monitor explicitly
+ if (monitorManager != null) {
+ logger.info("Run completed. Shutting down Monitor.");
+ monitorManager.complete();
}
+
+ // Collect any final jobs
+ workloadExecutor.get();
+
} finally {
- if (workloadExec != null) {
- logger.info("Run completed. Shutting down Monitor if it was running.");
- workloadExec.shutdown();
+ if (workloadExecutor != null) {
+ logger.info("Run completed. Shutting down thread pool.");
+ workloadExecutor.shutdown();
}
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7175dcbc/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/PherfConstants.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/PherfConstants.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/PherfConstants.java
index 493f5a8..e060e53 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/PherfConstants.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/PherfConstants.java
@@ -28,14 +28,13 @@ public class PherfConstants {
public static final int DEFAULT_THREAD_POOL_SIZE = 10;
public static final int DEFAULT_BATCH_SIZE = 1000;
public static final String DEFAULT_DATE_PATTERN = "yyyy-MM-dd HH:mm:ss.SSS";
- public static final String DEFAULT_FILE_PATTERN = ".*scenario.xml";
public static final String RESOURCE_SCENARIO = "/scenario";
public static final String
SCENARIO_ROOT_PATTERN =
".*" + PherfConstants.RESOURCE_SCENARIO.substring(1) + ".*";
public static final String SCHEMA_ROOT_PATTERN = ".*";
public static final String PHERF_PROPERTIES = "pherf.properties";
-// public static final String RESULT_DIR = "RESULTS";
+
public static final String EXPORT_DIR = "CSV_EXPORT";
public static final String RESULT_PREFIX = "RESULT_";
public static final String PATH_SEPARATOR = "/";
@@ -51,6 +50,7 @@ public class PherfConstants {
public static final String PHERF_SCHEMA_NAME = "PHERF";
+ // TODO MOve to properties
// log out data load per n rows
public static final int LOG_PER_NROWS = 1000000;
public static final String COMBINED_FILE_NAME = "COMBINED";
@@ -86,7 +86,9 @@ public class PherfConstants {
InputStream is = null;
try {
is = getClass().getClassLoader().getResourceAsStream(fileName);
- properties.load(is);
+ if (is != null) {
+ properties.load(is);
+ }
} finally {
if (is != null) {
is.close();
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7175dcbc/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/DataModel.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/DataModel.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/DataModel.java
index 25c0df1..8eb42ff 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/DataModel.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/DataModel.java
@@ -26,7 +26,6 @@ import java.util.List;
@XmlRootElement(name = "datamodel")
public class DataModel {
- private String release;
private String name;
private List<Scenario> scenarios;
private List<Column> dataMappingColumns;
@@ -34,15 +33,6 @@ public class DataModel {
public DataModel() {
}
- public String getRelease() {
- return this.release;
- }
-
- @XmlAttribute()
- public void setRelease(String release) {
- this.release = release;
- }
-
public List<Scenario> getScenarios() {
return scenarios;
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7175dcbc/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/Scenario.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/Scenario.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/Scenario.java
index d2f113a..7de96cc 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/Scenario.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/Scenario.java
@@ -34,10 +34,12 @@ public class Scenario {
private int rowCount;
private Map<String, String> phoenixProperties;
private DataOverride dataOverride;
- private List<QuerySet> querySet = new ArrayList<QuerySet>();
+ private List<QuerySet> querySet = new ArrayList<>();
+ private WriteParams writeParams;
private String name;
public Scenario() {
+ writeParams = new WriteParams();
}
/**
@@ -161,6 +163,14 @@ public class Scenario {
this.name = name;
}
+ public WriteParams getWriteParams() {
+ return writeParams;
+ }
+
+ public void setWriteParams(WriteParams writeParams) {
+ this.writeParams = writeParams;
+ }
+
@Override
public String toString() {
StringBuilder stringBuilder = new StringBuilder();
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7175dcbc/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/WriteParams.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/WriteParams.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/WriteParams.java
new file mode 100644
index 0000000..04be239
--- /dev/null
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/WriteParams.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.phoenix.pherf.configuration;
+
+import javax.xml.bind.annotation.XmlAttribute;
+
+public class WriteParams {
+ private int writerThreadCount;
+ private long threadSleepDuration;
+ private long batchSize;
+ private long executionDurationInMs;
+
+ public WriteParams() {
+ this.batchSize = Long.MIN_VALUE;
+ this.writerThreadCount = Integer.MIN_VALUE;
+ this.threadSleepDuration = Long.MIN_VALUE;
+ this.executionDurationInMs = Long.MAX_VALUE;
+ }
+
+ public long getThreadSleepDuration() {
+ return threadSleepDuration;
+ }
+
+ @SuppressWarnings("unused")
+ public void setThreadSleepDuration(long threadSleepDuration) {
+ this.threadSleepDuration = threadSleepDuration;
+ }
+
+ public long getBatchSize() {
+ return batchSize;
+ }
+
+ @SuppressWarnings("unused")
+ public void setBatchSize(long batchSize) {
+ this.batchSize = batchSize;
+ }
+
+ public int getWriterThreadCount() {
+ return writerThreadCount;
+ }
+
+ @SuppressWarnings("unused")
+ public void setWriterThreadCount(int writerThreadCount) {
+ this.writerThreadCount = writerThreadCount;
+ }
+
+ @XmlAttribute()
+ public long getExecutionDurationInMs() {
+ return executionDurationInMs;
+ }
+
+ @SuppressWarnings("unused")
+ public void setExecutionDurationInMs(long executionDurationInMs) {
+ this.executionDurationInMs = executionDurationInMs;
+ }
+}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7175dcbc/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/XMLConfigParser.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/XMLConfigParser.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/XMLConfigParser.java
index 9b5a9e9..393fa7e 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/XMLConfigParser.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/XMLConfigParser.java
@@ -52,6 +52,24 @@ public class XMLConfigParser {
return dataModels;
}
+ public DataModel getDataModelByName(String name) {
+ for (DataModel dataModel : getDataModels()) {
+ if (dataModel.getName().equals(name)) {
+ return dataModel;
+ }
+ }
+ return null;
+ }
+
+ public Scenario getScenarioByName(String name) throws Exception {
+ for (Scenario scenario : getScenarios()) {
+ if (scenario.getName().equals(name)) {
+ return scenario;
+ }
+ }
+ return null;
+ }
+
public synchronized Collection<Path> getPaths(String strPattern) throws Exception {
if (paths != null) {
return paths;
@@ -87,7 +105,8 @@ public class XMLConfigParser {
* Unmarshall an XML data file
*
* @param file Name of File
- * @return
+ * @return {@link org.apache.phoenix.pherf.configuration.DataModel} Returns DataModel from
+ * XML configuration
* @throws JAXBException
*/
// TODO Remove static calls
@@ -151,8 +170,6 @@ public class XMLConfigParser {
}
private Collection<Path> getResources(String pattern) throws Exception {
- Collection<Path> resourceFiles = new ArrayList<Path>();
- resourceFiles = resourceList.getResourceList(pattern);
- return resourceFiles;
+ return resourceList.getResourceList(pattern);
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7175dcbc/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/MonitorManager.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/MonitorManager.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/MonitorManager.java
index 6f97551..5b39b2b 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/MonitorManager.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/jmx/MonitorManager.java
@@ -21,48 +21,54 @@ package org.apache.phoenix.pherf.jmx;
import org.apache.phoenix.pherf.PherfConstants;
import org.apache.phoenix.pherf.exception.FileLoaderRuntimeException;
import org.apache.phoenix.pherf.jmx.monitors.Monitor;
-import org.apache.phoenix.pherf.result.file.ResultFileDetails;
-import org.apache.phoenix.pherf.result.impl.CSVResultHandler;
import org.apache.phoenix.pherf.result.Result;
import org.apache.phoenix.pherf.result.ResultHandler;
+import org.apache.phoenix.pherf.result.file.ResultFileDetails;
+import org.apache.phoenix.pherf.result.impl.CSVResultHandler;
+import org.apache.phoenix.pherf.workload.Workload;
import org.apache.phoenix.util.DateUtil;
-import javax.management.*;
+import javax.management.InstanceAlreadyExistsException;
+import javax.management.MBeanServer;
+import javax.management.ObjectName;
+import javax.management.StandardMBean;
import java.io.IOException;
import java.lang.management.ManagementFactory;
import java.util.*;
+import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
/**
- * This class starts JMX stats for the configured monitors. Monitors should be configured in MonitorDetails Enum.
+ * This class starts JMX stats for the configured monitors.
+ * Monitors should be configured in MonitorDetails Enum.
* Each stat implements {@link org.apache.phoenix.pherf.jmx.monitors.Monitor}.
*
- * For the duration of any Pherf run, when the configured {@link org.apache.phoenix.pherf.PherfConstants#MONITOR_FREQUENCY}
- * is reached a snapshot of each monitor is taken and dumped out to a log file.
+ * For the duration of any Pherf run, when the configured
+ * {@link org.apache.phoenix.pherf.PherfConstants#MONITOR_FREQUENCY} is reached a snapshot of
+ * each monitor is taken and dumped out to a log file.
*/
-public class MonitorManager implements Runnable {
+public class MonitorManager implements Workload {
// List of MonitorDetails for all the running monitors.
// TODO Move this out to config. Possible use Guice and use IOC to inject it in.
- private static final List<MonitorDetails> MONITOR_DETAILS_LIST =
+ private static final List<MonitorDetails>
+ MONITOR_DETAILS_LIST =
Arrays.asList(MonitorDetails.values());
private final ResultHandler resultHandler;
- private final long monitorFrequency;
- private AtomicLong rowCount;
- private volatile boolean shouldStop = false;
- private volatile boolean isRunning = false;
+ private final AtomicLong monitorFrequency;
+ private final AtomicLong rowCount;
+ private final AtomicBoolean shouldStop = new AtomicBoolean(false);
+ private final AtomicBoolean isRunning = new AtomicBoolean(false);
- @SuppressWarnings("unused")
- public MonitorManager() throws Exception {
+ @SuppressWarnings("unused") public MonitorManager() throws Exception {
this(PherfConstants.MONITOR_FREQUENCY);
}
/**
- *
* @param monitorFrequency Frequency at which monitor stats are written to a log file.
* @throws Exception
*/
public MonitorManager(long monitorFrequency) throws Exception {
- this.monitorFrequency = monitorFrequency;
+ this.monitorFrequency = new AtomicLong(monitorFrequency);
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
// Register all the monitors to JMX
@@ -77,74 +83,87 @@ public class MonitorManager implements Runnable {
}
}
rowCount = new AtomicLong(0);
- this.resultHandler = new CSVResultHandler(PherfConstants.MONITOR_FILE_NAME, ResultFileDetails.CSV);
+ this.resultHandler =
+ new CSVResultHandler(PherfConstants.MONITOR_FILE_NAME, ResultFileDetails.CSV);
}
- @Override
- public void run() {
- try {
- while (!shouldStop()) {
- isRunning = true;
- List rowValues = new ArrayList<String>();
- synchronized (resultHandler) {
- for (MonitorDetails monitorDetails : MONITOR_DETAILS_LIST) {
- rowValues.clear();
- try {
- StandardMBean bean = new StandardMBean(monitorDetails.getMonitor(), Monitor.class);
-
- Calendar calendar = new GregorianCalendar();
- rowValues.add(monitorDetails);
-
- rowValues.add(((Monitor) bean.getImplementation()).getStat());
- rowValues.add(DateUtil.DEFAULT_MS_DATE_FORMATTER.format(calendar.getTime()));
- Result
- result = new Result(ResultFileDetails.CSV, ResultFileDetails.CSV_MONITOR.getHeader().toString(), rowValues);
- resultHandler.write(result);
- } catch (Exception e) {
- throw new FileLoaderRuntimeException("Could not log monitor result.", e);
+ @Override public synchronized void complete() {
+ this.shouldStop.set(true);
+ }
+
+ @Override public Runnable execute() {
+ return new Runnable() {
+ @Override public void run() {
+ try {
+ while (!shouldStop()) {
+ isRunning.set(true);
+ List rowValues = new ArrayList<String>();
+ synchronized (resultHandler) {
+ for (MonitorDetails monitorDetails : MONITOR_DETAILS_LIST) {
+ rowValues.clear();
+ try {
+ StandardMBean
+ bean =
+ new StandardMBean(monitorDetails.getMonitor(),
+ Monitor.class);
+
+ Calendar calendar = new GregorianCalendar();
+ rowValues.add(monitorDetails);
+
+ rowValues.add(((Monitor) bean.getImplementation()).getStat());
+ rowValues.add(DateUtil.DEFAULT_MS_DATE_FORMATTER
+ .format(calendar.getTime()));
+ Result
+ result =
+ new Result(ResultFileDetails.CSV,
+ ResultFileDetails.CSV_MONITOR.getHeader()
+ .toString(), rowValues);
+ resultHandler.write(result);
+ } catch (Exception e) {
+ throw new FileLoaderRuntimeException(
+ "Could not log monitor result.", e);
+ }
+ rowCount.getAndIncrement();
+ }
+ try {
+ resultHandler.flush();
+ Thread.sleep(getMonitorFrequency());
+ } catch (Exception e) {
+ Thread.currentThread().interrupt();
+ e.printStackTrace();
+ }
}
- rowCount.getAndIncrement();
}
+ } finally {
try {
- resultHandler.flush();
- Thread.sleep(getMonitorFrequency());
+ isRunning.set(false);
+ if (resultHandler != null) {
+ resultHandler.close();
+ }
} catch (Exception e) {
- Thread.currentThread().interrupt();
- e.printStackTrace();
+ throw new FileLoaderRuntimeException("Could not close monitor results.", e);
}
}
}
- } finally {
- try {
- isRunning = false;
- if (resultHandler != null) {
- resultHandler.close();
- }
- } catch (Exception e) {
- throw new FileLoaderRuntimeException("Could not close monitor results.", e);
- }
- }
-
+ };
}
public long getMonitorFrequency() {
- return monitorFrequency;
- }
-
- public synchronized boolean shouldStop() {
- return shouldStop;
+ return monitorFrequency.get();
}
- public synchronized void stop() {
- this.shouldStop = true;
+ public boolean shouldStop() {
+ return shouldStop.get();
}
- public synchronized long getRowCount() {
+ // Convenience method for testing.
+ @SuppressWarnings("unused")
+ public long getRowCount() {
return rowCount.get();
}
- public synchronized boolean isRunning() {
- return isRunning;
+ public boolean isRunning() {
+ return isRunning.get();
}
/**
@@ -157,7 +176,9 @@ public class MonitorManager implements Runnable {
ResultHandler handler = null;
try {
if (resultHandler.isClosed()) {
- handler = new CSVResultHandler(PherfConstants.MONITOR_FILE_NAME, ResultFileDetails.CSV);
+ handler =
+ new CSVResultHandler(PherfConstants.MONITOR_FILE_NAME,
+ ResultFileDetails.CSV);
return handler.read();
} else {
return resultHandler.read();
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7175dcbc/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/loaddata/DataLoader.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/loaddata/DataLoader.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/loaddata/DataLoader.java
deleted file mode 100644
index c521822..0000000
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/loaddata/DataLoader.java
+++ /dev/null
@@ -1,332 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.phoenix.pherf.loaddata;
-
-import java.math.BigDecimal;
-import java.sql.Connection;
-import java.sql.Date;
-import java.sql.PreparedStatement;
-import java.sql.SQLException;
-import java.sql.Types;
-import java.text.SimpleDateFormat;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Properties;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-
-import org.apache.phoenix.pherf.result.ResultUtil;
-import org.apache.phoenix.pherf.util.ResourceList;
-import org.apache.phoenix.pherf.util.RowCalculator;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.phoenix.pherf.PherfConstants;
-import org.apache.phoenix.pherf.configuration.Column;
-import org.apache.phoenix.pherf.configuration.DataModel;
-import org.apache.phoenix.pherf.configuration.Scenario;
-import org.apache.phoenix.pherf.configuration.XMLConfigParser;
-import org.apache.phoenix.pherf.exception.PherfException;
-import org.apache.phoenix.pherf.result.DataLoadThreadTime;
-import org.apache.phoenix.pherf.result.DataLoadTimeSummary;
-import org.apache.phoenix.pherf.rules.DataValue;
-import org.apache.phoenix.pherf.rules.RulesApplier;
-import org.apache.phoenix.pherf.util.PhoenixUtil;
-
-public class DataLoader {
- private static final Logger logger = LoggerFactory.getLogger(DataLoader.class);
- private final PhoenixUtil pUtil;
- private final XMLConfigParser parser;
- private final RulesApplier rulesApplier;
- private final ResultUtil resultUtil;
- private final ExecutorService pool;
-
- private final int threadPoolSize;
- private final int batchSize;
-
- public DataLoader(XMLConfigParser parser) throws Exception {
- this(new PhoenixUtil(), parser);
- }
-
- public DataLoader(PhoenixUtil phoenixUtil, XMLConfigParser parser) throws Exception{
- this(phoenixUtil, PherfConstants.create().getProperties(PherfConstants.PHERF_PROPERTIES), parser);
- }
-
- /**
- * Default the writers to use up all available cores for threads.
- *
- * @param parser
- * @throws Exception
- */
- public DataLoader(PhoenixUtil phoenixUtil, Properties properties, XMLConfigParser parser) throws Exception {
- this.pUtil = phoenixUtil;
- this.parser = parser;
- this.rulesApplier = new RulesApplier(parser);
- this.resultUtil = new ResultUtil();
- int size = Integer.parseInt(properties.getProperty("pherf.default.dataloader.threadpool"));
- this.threadPoolSize = (size == 0) ? Runtime.getRuntime().availableProcessors() : size;
- this.pool = Executors.newFixedThreadPool(this.threadPoolSize);
- String bSize = properties.getProperty("pherf.default.dataloader.batchsize");
- this.batchSize = (bSize == null) ? PherfConstants.DEFAULT_BATCH_SIZE : Integer.parseInt(bSize);
- }
-
- public void execute() throws Exception {
- try {
- DataLoadTimeSummary dataLoadTimeSummary = new DataLoadTimeSummary();
- DataLoadThreadTime dataLoadThreadTime = new DataLoadThreadTime();
-
- for (Scenario scenario : getParser().getScenarios()) {
- List<Future> writeBatches = new ArrayList<Future>();
- logger.info("\nLoading " + scenario.getRowCount()
- + " rows for " + scenario.getTableName());
- long start = System.currentTimeMillis();
-
- RowCalculator rowCalculator = new RowCalculator(getThreadPoolSize(), scenario.getRowCount());
- for (int i = 0; i < getThreadPoolSize(); i++) {
- List<Column> phxMetaCols = pUtil.getColumnsFromPhoenix(
- scenario.getSchemaName(),
- scenario.getTableNameWithoutSchemaName(),
- pUtil.getConnection());
- int threadRowCount = rowCalculator.getNext();
- logger.info("Kick off thread (#" + i + ")for upsert with (" + threadRowCount + ") rows.");
- Future<Info> write = upsertData(scenario, phxMetaCols,
- scenario.getTableName(), threadRowCount, dataLoadThreadTime);
- writeBatches.add(write);
- }
-
- if (writeBatches.isEmpty()) {
- throw new PherfException(
- "Holy shit snacks! Throwing up hands in disbelief and exiting. Could not write data for some unknown reason.");
- }
-
- int sumRows = 0, sumDuration = 0;
- // Wait for all the batch threads to complete
- for (Future<Info> write : writeBatches) {
- Info writeInfo = write.get();
- sumRows += writeInfo.getRowCount();
- sumDuration += writeInfo.getDuration();
- logger.info("Executor writes complete with row count ("
- + writeInfo.getRowCount()
- + ") in Ms ("
- + writeInfo.getDuration() + ")");
- }
- logger.info("Writes completed with total row count (" + sumRows
- + ") with total time of(" + sumDuration + ") Ms");
- dataLoadTimeSummary.add(scenario.getTableName(), sumRows, (int) (System.currentTimeMillis() - start));
-
-
- // always update stats for Phoenix base tables
- updatePhoenixStats(scenario.getTableName());
- }
- resultUtil.write(dataLoadTimeSummary);
- resultUtil.write(dataLoadThreadTime);
-
- } finally {
- pool.shutdown();
- }
- }
-
- /**
- * TODO Move this method to PhoenixUtil
- * Update Phoenix table stats
- *
- * @param tableName
- * @throws Exception
- */
- public void updatePhoenixStats(String tableName) throws Exception {
- logger.info("Updating stats for " + tableName);
- pUtil.executeStatement("UPDATE STATISTICS " + tableName);
- }
-
- public Future<Info> upsertData(final Scenario scenario,
- final List<Column> columns, final String tableName,
- final int rowCount, final DataLoadThreadTime dataLoadThreadTime) {
- Future<Info> future = pool.submit(new Callable<Info>() {
- @Override
- public Info call() throws Exception {
- int rowsCreated = 0;
- Info info = null;
- long start = 0, duration = 0, totalDuration = 0;
- SimpleDateFormat simpleDateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
- Connection connection = null;
- try {
- connection = pUtil.getConnection();
- long logStartTime = System.currentTimeMillis();
- for (int i = 0; i < rowCount; i++) {
- String sql = buildSql(columns, tableName);
- PreparedStatement stmt = connection
- .prepareStatement(sql);
- stmt = buildStatement(scenario, columns, stmt, simpleDateFormat);
- start = System.currentTimeMillis();
- rowsCreated += stmt.executeUpdate();
- stmt.close();
- if ((i % getBatchSize()) == 0) {
- connection.commit();
- duration = System.currentTimeMillis() - start;
- logger.info("Committed Batch. Total " + tableName + " rows for this thread (" + this.hashCode() + ") in ("
- + duration + ") Ms");
-
- if (i % PherfConstants.LOG_PER_NROWS == 0 && i != 0) {
- dataLoadThreadTime.add(tableName, Thread.currentThread().getName(), i, System.currentTimeMillis() - logStartTime);
- logStartTime = System.currentTimeMillis();
- }
- }
- }
- } finally {
- if (connection != null) {
- try {
- connection.commit();
- duration = System.currentTimeMillis() - start;
- logger.info("Committed Final Batch. Duration (" + duration + ") Ms");
- connection.close();
- } catch (SQLException e) {
- // Swallow since we are closing anyway
- e.printStackTrace();
- }
- }
- }
- totalDuration = System.currentTimeMillis() - start;
- return new Info(totalDuration, rowsCreated);
- }
- });
- return future;
- }
-
- private PreparedStatement buildStatement(Scenario scenario,
- List<Column> columns, PreparedStatement statement, SimpleDateFormat simpleDateFormat) throws Exception {
- int count = 1;
- for (Column column : columns) {
-
- DataValue dataValue = getRulesApplier().getDataForRule(scenario,
- column);
- switch (column.getType()) {
- case VARCHAR:
- if (dataValue.getValue().equals("")) {
- statement.setNull(count, Types.VARCHAR);
- } else {
- statement.setString(count, dataValue.getValue());
- }
- break;
- case CHAR:
- if (dataValue.getValue().equals("")) {
- statement.setNull(count, Types.CHAR);
- } else {
- statement.setString(count, dataValue.getValue());
- }
- break;
- case DECIMAL:
- if (dataValue.getValue().equals("")) {
- statement.setNull(count, Types.DECIMAL);
- } else {
- statement.setBigDecimal(count,
- new BigDecimal(dataValue.getValue()));
- }
- break;
- case INTEGER:
- if (dataValue.getValue().equals("")) {
- statement.setNull(count, Types.INTEGER);
- } else {
- statement.setInt(count,
- Integer.parseInt(dataValue.getValue()));
- }
- break;
- case DATE:
- if (dataValue.getValue().equals("")) {
- statement.setNull(count, Types.DATE);
- } else {
- Date date = new java.sql.Date(simpleDateFormat.parse(dataValue.getValue()).getTime());
- statement.setDate(count, date);
- }
- break;
- default:
- break;
- }
- count++;
- }
- return statement;
- }
-
- private String buildSql(final List<Column> columns, final String tableName) {
- StringBuilder builder = new StringBuilder();
- builder.append("upsert into ");
- builder.append(tableName);
- builder.append(" (");
- int count = 1;
- for (Column column : columns) {
- builder.append(column.getName());
- if (count < columns.size()) {
- builder.append(",");
- } else {
- builder.append(")");
- }
- count++;
- }
- builder.append(" VALUES (");
- for (int i = 0; i < columns.size(); i++) {
- if (i < columns.size() - 1) {
- builder.append("?,");
- } else {
- builder.append("?)");
- }
- }
- return builder.toString();
- }
-
- public XMLConfigParser getParser() {
- return parser;
- }
-
- public RulesApplier getRulesApplier() {
- return rulesApplier;
- }
-
- public int getBatchSize() {
- return batchSize;
- }
-
- public int getThreadPoolSize() {
- return threadPoolSize;
- }
-
- private class Info {
-
- private final int rowCount;
- private final long duration;
-
- public Info(long duration, int rows) {
- this(0, 0, 0, duration, rows);
- }
-
- public Info(int regionSize, int completedIterations, int timesSeen,
- long duration, int rows) {
- this.duration = duration;
- this.rowCount = rows;
- }
-
- public long getDuration() {
- return duration;
- }
-
- public int getRowCount() {
- return rowCount;
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7175dcbc/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/DataLoadThreadTime.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/DataLoadThreadTime.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/DataLoadThreadTime.java
index 23dcdd5..e5553cc 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/DataLoadThreadTime.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/DataLoadThreadTime.java
@@ -18,61 +18,68 @@
package org.apache.phoenix.pherf.result;
+import org.apache.phoenix.pherf.PherfConstants;
+
import java.util.ArrayList;
import java.util.List;
-import org.apache.phoenix.pherf.PherfConstants;
-
public class DataLoadThreadTime {
- private List<WriteThreadTime> threadTime = new ArrayList<WriteThreadTime>();
+ private List<WriteThreadTime> threadTime = new ArrayList<WriteThreadTime>();
+
+ public List<WriteThreadTime> getThreadTime() {
+ return threadTime;
+ }
- public List<WriteThreadTime> getThreadTime() {
- return threadTime;
- }
+ public void add(String tableName, String threadName, long rowsUpserted,
+ long timeInMsPerMillionRows) {
+ threadTime.add(new WriteThreadTime(tableName, threadName, rowsUpserted,
+ timeInMsPerMillionRows));
+ }
- public void add(String tableName, String threadName, int rowsUpserted, long timeInMsPerMillionRows) {
- threadTime.add(new WriteThreadTime(tableName, threadName, rowsUpserted, timeInMsPerMillionRows));
- }
-
- public String getCsvTitle() {
- return "TABLE_NAME,THREAD_NAME,ROWS_UPSERTED,TIME_IN_MS_PER_" + PherfConstants.LOG_PER_NROWS + "_ROWS\n";
- }
+ public String getCsvTitle() {
+ return "TABLE_NAME,THREAD_NAME,ROWS_UPSERTED,TIME_IN_MS_PER_" + PherfConstants.LOG_PER_NROWS
+ + "_ROWS\n";
+ }
}
class WriteThreadTime {
- private String tableName;
- private String threadName;
- private int rowsUpserted;
- private long timeInMsPerMillionRows;
-
- public WriteThreadTime(String tableName, String threadName, int rowsUpserted, long timeInMsPerMillionRows) {
- this.tableName = tableName;
- this.threadName = threadName;
- this.rowsUpserted = rowsUpserted;
- this.timeInMsPerMillionRows = timeInMsPerMillionRows;
- }
-
- public String getTableName() {
- return tableName;
- }
- public String getThreadName() {
- return threadName;
- }
- public long getTimeInMsPerMillionRows() {
- return timeInMsPerMillionRows;
- }
+ private String tableName;
+ private String threadName;
+ private long rowsUpserted;
+ private long timeInMsPerMillionRows;
+
+ public WriteThreadTime(String tableName, String threadName, long rowsUpserted,
+ long timeInMsPerMillionRows) {
+ this.tableName = tableName;
+ this.threadName = threadName;
+ this.rowsUpserted = rowsUpserted;
+ this.timeInMsPerMillionRows = timeInMsPerMillionRows;
+ }
+
+ public String getTableName() {
+ return tableName;
+ }
+
+ public String getThreadName() {
+ return threadName;
+ }
+
+ public long getTimeInMsPerMillionRows() {
+ return timeInMsPerMillionRows;
+ }
- public List<ResultValue> getCsvRepresentation(ResultUtil util) {
+ public List<ResultValue> getCsvRepresentation(ResultUtil util) {
List<ResultValue> rowValues = new ArrayList<>();
rowValues.add(new ResultValue(util.convertNull(getTableName())));
rowValues.add(new ResultValue(util.convertNull(getThreadName())));
rowValues.add(new ResultValue(util.convertNull(String.valueOf(getRowsUpserted()))));
- rowValues.add(new ResultValue(util.convertNull(String.valueOf(getTimeInMsPerMillionRows()))));
+ rowValues.add(new ResultValue(
+ util.convertNull(String.valueOf(getTimeInMsPerMillionRows()))));
return rowValues;
- }
+ }
- public int getRowsUpserted() {
- return rowsUpserted;
- }
+ public long getRowsUpserted() {
+ return rowsUpserted;
+ }
}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7175dcbc/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/DataLoadTimeSummary.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/DataLoadTimeSummary.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/DataLoadTimeSummary.java
index bb23f16..0ff5c59 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/DataLoadTimeSummary.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/DataLoadTimeSummary.java
@@ -22,29 +22,29 @@ import java.util.ArrayList;
import java.util.List;
public class DataLoadTimeSummary {
- private List<TableLoadTime> tableLoadTime = new ArrayList<TableLoadTime>();
+ private List<TableLoadTime> tableLoadTime = new ArrayList<TableLoadTime>();
- public List<TableLoadTime> getTableLoadTime() {
- return tableLoadTime;
- }
-
- public void add(String tableName, int rowCount, int durationInMs) {
- tableLoadTime.add(new TableLoadTime(tableName, rowCount, durationInMs));
- }
+ public List<TableLoadTime> getTableLoadTime() {
+ return tableLoadTime;
+ }
+
+ public void add(String tableName, int rowCount, int durationInMs) {
+ tableLoadTime.add(new TableLoadTime(tableName, rowCount, durationInMs));
+ }
}
class TableLoadTime {
- private int durationInMs;
- private String tableName;
- private int rowCount;
+ private int durationInMs;
+ private String tableName;
+ private int rowCount;
+
+ public TableLoadTime(String tableName, int rowCount, int durationInMs) {
+ this.tableName = tableName;
+ this.rowCount = rowCount;
+ this.durationInMs = durationInMs;
+ }
- public TableLoadTime(String tableName, int rowCount, int durationInMs) {
- this.tableName = tableName;
- this.rowCount = rowCount;
- this.durationInMs = durationInMs;
- }
-
- public List<ResultValue> getCsvRepresentation(ResultUtil util) {
+ public List<ResultValue> getCsvRepresentation(ResultUtil util) {
List<ResultValue> rowValues = new ArrayList<>();
rowValues.add(new ResultValue(util.convertNull(getTableName())));
rowValues.add(new ResultValue(util.convertNull(String.valueOf(getRowCount()))));
@@ -53,15 +53,15 @@ class TableLoadTime {
return rowValues;
}
- public int getDurationInMs() {
- return durationInMs;
- }
+ public int getDurationInMs() {
+ return durationInMs;
+ }
- public String getTableName() {
- return tableName;
- }
+ public String getTableName() {
+ return tableName;
+ }
- public int getRowCount() {
- return rowCount;
- }
+ public int getRowCount() {
+ return rowCount;
+ }
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7175dcbc/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/DataModelResult.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/DataModelResult.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/DataModelResult.java
index 72920fa..5c07ffe 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/DataModelResult.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/DataModelResult.java
@@ -18,61 +18,57 @@
package org.apache.phoenix.pherf.result;
-import java.util.ArrayList;
-import java.util.List;
+import org.apache.phoenix.pherf.configuration.DataModel;
import javax.xml.bind.annotation.XmlAttribute;
import javax.xml.bind.annotation.XmlRootElement;
+import java.util.ArrayList;
+import java.util.List;
-import org.apache.phoenix.pherf.configuration.DataModel;
+@XmlRootElement(namespace = "org.apache.phoenix.pherf.result") public class DataModelResult
+ extends DataModel {
+ private List<ScenarioResult> scenarioResult = new ArrayList<ScenarioResult>();
+ private String zookeeper;
-@XmlRootElement(namespace = "org.apache.phoenix.pherf.result")
-public class DataModelResult extends DataModel {
- private List<ScenarioResult> scenarioResult = new ArrayList<ScenarioResult>();
- private String zookeeper;
+ public List<ScenarioResult> getScenarioResult() {
+ return scenarioResult;
+ }
- public List<ScenarioResult> getScenarioResult() {
- return scenarioResult;
- }
+ @SuppressWarnings("unused") public void setScenarioResult(List<ScenarioResult> scenarioResult) {
+ this.scenarioResult = scenarioResult;
+ }
- @SuppressWarnings("unused")
- public void setScenarioResult(List<ScenarioResult> scenarioResult) {
- this.scenarioResult = scenarioResult;
- }
-
- public DataModelResult() {
- }
+ public DataModelResult() {
+ }
- private DataModelResult(String name, String release, String zookeeper) {
+ private DataModelResult(String name, String zookeeper) {
this.setName(name);
- this.setRelease(release);
this.zookeeper = zookeeper;
}
/**
* Copy constructor
- *
+ *
* @param dataModelResult
*/
public DataModelResult(DataModelResult dataModelResult) {
- this(dataModelResult.getName(), dataModelResult.getRelease(), dataModelResult.getZookeeper());
+ this(dataModelResult.getName(), dataModelResult.getZookeeper());
this.scenarioResult = dataModelResult.getScenarioResult();
}
-
- public DataModelResult(DataModel dataModel, String zookeeper) {
- this(dataModel.getName(), dataModel.getRelease(), zookeeper);
- }
-
- public DataModelResult(DataModel dataModel) {
- this(dataModel, null);
- }
- @XmlAttribute()
- public String getZookeeper() {
- return zookeeper;
- }
+ public DataModelResult(DataModel dataModel, String zookeeper) {
+ this(dataModel.getName(), zookeeper);
+ }
+
+ public DataModelResult(DataModel dataModel) {
+ this(dataModel, null);
+ }
+
+ @XmlAttribute() public String getZookeeper() {
+ return zookeeper;
+ }
- public void setZookeeper(String zookeeper) {
- this.zookeeper = zookeeper;
- }
+ public void setZookeeper(String zookeeper) {
+ this.zookeeper = zookeeper;
+ }
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7175dcbc/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/QueryResult.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/QueryResult.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/QueryResult.java
index b5fd082..1a682da 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/QueryResult.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/QueryResult.java
@@ -18,14 +18,14 @@
package org.apache.phoenix.pherf.result;
-import java.util.ArrayList;
-import java.util.Date;
-import java.util.List;
-
import org.apache.phoenix.pherf.PherfConstants.RunMode;
import org.apache.phoenix.pherf.configuration.Query;
import org.apache.phoenix.util.DateUtil;
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.List;
+
public class QueryResult extends Query {
private List<ThreadTime> threadTimes = new ArrayList<ThreadTime>();
@@ -47,8 +47,7 @@ public class QueryResult extends Query {
this.setId(query.getId());
}
- @SuppressWarnings("unused")
- public QueryResult() {
+ @SuppressWarnings("unused") public QueryResult() {
}
public Date getStartTime() {
@@ -136,8 +135,8 @@ public class QueryResult extends Query {
}
private String getStartTimeText() {
- return (null == this.getStartTime())
- ? ""
- : DateUtil.DEFAULT_MS_DATE_FORMATTER.format(this.getStartTime());
+ return (null == this.getStartTime()) ?
+ "" :
+ DateUtil.DEFAULT_MS_DATE_FORMATTER.format(this.getStartTime());
}
}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7175dcbc/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/QuerySetResult.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/QuerySetResult.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/QuerySetResult.java
index 9010c21..c2be5a3 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/QuerySetResult.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/QuerySetResult.java
@@ -18,31 +18,31 @@
package org.apache.phoenix.pherf.result;
+import org.apache.phoenix.pherf.configuration.QuerySet;
+
import java.util.ArrayList;
import java.util.List;
-import org.apache.phoenix.pherf.configuration.QuerySet;
-
public class QuerySetResult extends QuerySet {
-
- private List<QueryResult> queryResults = new ArrayList<QueryResult>();
-
- public QuerySetResult(QuerySet querySet) {
- this.setConcurrency(querySet.getConcurrency());
- this.setNumberOfExecutions(querySet.getNumberOfExecutions());
- this.setExecutionDurationInMs(querySet.getExecutionDurationInMs());
- this.setExecutionType(querySet.getExecutionType());
- }
-
- public QuerySetResult() {
- }
-
- public List<QueryResult> getQueryResults() {
- return queryResults;
- }
+
+ private List<QueryResult> queryResults = new ArrayList<>();
+
+ public QuerySetResult(QuerySet querySet) {
+ this.setConcurrency(querySet.getConcurrency());
+ this.setNumberOfExecutions(querySet.getNumberOfExecutions());
+ this.setExecutionDurationInMs(querySet.getExecutionDurationInMs());
+ this.setExecutionType(querySet.getExecutionType());
+ }
+
+ public QuerySetResult() {
+ }
+
+ public List<QueryResult> getQueryResults() {
+ return queryResults;
+ }
@SuppressWarnings("unused")
public void setQueryResults(List<QueryResult> queryResults) {
- this.queryResults = queryResults;
- }
+ this.queryResults = queryResults;
+ }
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7175dcbc/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/Result.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/Result.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/Result.java
index 4ccdd2b..158ed11 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/Result.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/Result.java
@@ -18,10 +18,10 @@
package org.apache.phoenix.pherf.result;
-import java.util.List;
-
import org.apache.phoenix.pherf.result.file.ResultFileDetails;
+import java.util.List;
+
/**
* Common container for Pherf results.
*/
@@ -33,10 +33,9 @@ public class Result {
private final String header;
/**
- *
- * @param type {@link org.apache.phoenix.pherf.result.file.ResultFileDetails} Currently unused, but gives metadata about the
- * contents of the result.
- * @param header Used for CSV, otherwise pass null. For CSV pass comma separated string of header fields.
+ * @param type {@link org.apache.phoenix.pherf.result.file.ResultFileDetails} Currently unused, but gives metadata about the
+ * contents of the result.
+ * @param header Used for CSV, otherwise pass null. For CSV pass comma separated string of header fields.
* @param messageValues List<{@link ResultValue} All fields combined represent the data
* for a row to be written.
*/
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7175dcbc/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultHandler.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultHandler.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultHandler.java
index f650cbb..5b71300 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultHandler.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultHandler.java
@@ -29,9 +29,14 @@ import java.util.List;
*/
public interface ResultHandler {
public void write(Result result) throws Exception;
+
public void flush() throws Exception;
+
public void close() throws Exception;
+
public List<Result> read() throws Exception;
+
public boolean isClosed();
+
public ResultFileDetails getResultFileDetails();
}
[06/47] phoenix git commit: PHOENIX-1981 : PhoenixHBase Load and
Store Funcs should handle all Pig data types
Posted by ma...@apache.org.
PHOENIX-1981 : PhoenixHBase Load and Store Funcs should handle all Pig data types
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/a4aa780c
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/a4aa780c
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/a4aa780c
Branch: refs/heads/calcite
Commit: a4aa780c78f73cf0ee5f7d5e7afefd7ab581097a
Parents: 8a0dee7
Author: Prashant Kommireddi <pk...@pkommireddi-ltm.internal.salesforce.com>
Authored: Mon May 18 19:48:30 2015 -0700
Committer: Eli Levine <el...@apache.org>
Committed: Mon Jun 15 18:17:45 2015 -0700
----------------------------------------------------------------------
.../src/main/java/org/apache/phoenix/pig/util/TypeUtil.java | 8 +++-----
.../test/java/org/apache/phoenix/pig/util/TypeUtilTest.java | 8 +++-----
2 files changed, 6 insertions(+), 10 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/a4aa780c/phoenix-pig/src/main/java/org/apache/phoenix/pig/util/TypeUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-pig/src/main/java/org/apache/phoenix/pig/util/TypeUtil.java b/phoenix-pig/src/main/java/org/apache/phoenix/pig/util/TypeUtil.java
index c8bc9d8..6e32fb5 100644
--- a/phoenix-pig/src/main/java/org/apache/phoenix/pig/util/TypeUtil.java
+++ b/phoenix-pig/src/main/java/org/apache/phoenix/pig/util/TypeUtil.java
@@ -1,17 +1,15 @@
/*
- * Copyright 2010 The Apache Software Foundation
- *
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
- *distributed with this work for additional information
+ * distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
- * "License"); you maynot use this file except in compliance
+ * "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
- * Unless required by applicablelaw or agreed to in writing, software
+ * Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
http://git-wip-us.apache.org/repos/asf/phoenix/blob/a4aa780c/phoenix-pig/src/test/java/org/apache/phoenix/pig/util/TypeUtilTest.java
----------------------------------------------------------------------
diff --git a/phoenix-pig/src/test/java/org/apache/phoenix/pig/util/TypeUtilTest.java b/phoenix-pig/src/test/java/org/apache/phoenix/pig/util/TypeUtilTest.java
index 56167f6..0b44d2b 100644
--- a/phoenix-pig/src/test/java/org/apache/phoenix/pig/util/TypeUtilTest.java
+++ b/phoenix-pig/src/test/java/org/apache/phoenix/pig/util/TypeUtilTest.java
@@ -1,17 +1,15 @@
/*
- * Copyright 2010 The Apache Software Foundation
- *
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
- *distributed with this work for additional information
+ * distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
- * "License"); you maynot use this file except in compliance
+ * "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
- * Unless required by applicablelaw or agreed to in writing, software
+ * Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
[16/47] phoenix git commit: PHOENIX-2057 Acquire lock in
MetaDataEndPointImpl.addRowsToChildViews() before calling doGetTable()
Posted by ma...@apache.org.
PHOENIX-2057 Acquire lock in MetaDataEndPointImpl.addRowsToChildViews() before calling doGetTable()
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/466eeb35
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/466eeb35
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/466eeb35
Branch: refs/heads/calcite
Commit: 466eeb35f904c1c768dd3da7b396001826a1b40c
Parents: e78eb6f
Author: Samarth <sa...@salesforce.com>
Authored: Thu Jun 18 20:14:53 2015 -0700
Committer: Samarth <sa...@salesforce.com>
Committed: Thu Jun 18 20:14:53 2015 -0700
----------------------------------------------------------------------
.../phoenix/coprocessor/MetaDataEndpointImpl.java | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/466eeb35/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index 077e325..b848565 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -1160,13 +1160,15 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
}
- private static void acquireLock(Region region, byte[] key, List<RowLock> locks)
+
+ private static RowLock acquireLock(Region region, byte[] key, List<RowLock> locks)
throws IOException {
RowLock rowLock = region.getRowLock(key, true);
if (rowLock == null) {
throw new IOException("Failed to acquire lock on " + Bytes.toStringBinary(key));
}
locks.add(rowLock);
+ return rowLock;
}
private static final byte[] PHYSICAL_TABLE_BYTES = new byte[] {PTable.LinkType.PHYSICAL_TABLE.getSerializedValue()};
@@ -1579,18 +1581,16 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
byte[] viewSchemaName = rowViewKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX];
byte[] viewName = rowViewKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX];
byte[] viewKey = SchemaUtil.getTableKey(viewTenantId, viewSchemaName, viewName);
- PTable view = doGetTable(viewKey, clientTimeStamp);
+ // lock the rows corresponding to views so that no other thread can modify the view meta-data
+ RowLock viewRowLock = acquireLock(region, viewKey, locks);
+ PTable view = doGetTable(viewKey, clientTimeStamp, viewRowLock);
if (view.getBaseColumnCount() == QueryConstants.DIVORCED_VIEW_BASE_COLUMN_COUNT) {
// if a view has divorced itself from the base table, we don't allow schema changes
// to be propagated to it.
return;
}
- // lock the rows corresponding to views so that no other thread can modify the view meta-data
- acquireLock(region, viewKey, locks);
-
int deltaNumberOfColumns = 0;
-
for (Mutation m : tableMetadata) {
byte[][] rkmd = new byte[5][];
int pkCount = getVarChars(m.getRow(), rkmd);
[24/47] phoenix git commit: PHOENIX-2030 CsvBulkLoadTool should use
logical name of the table for output directory suffix(Rajeshbabu)
Posted by ma...@apache.org.
PHOENIX-2030 CsvBulkLoadTool should use logical name of the table for output directory suffix(Rajeshbabu)
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/3cf22a7d
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/3cf22a7d
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/3cf22a7d
Branch: refs/heads/calcite
Commit: 3cf22a7de4eaec6978763b6961d73aa9eaa07015
Parents: 50f3a04
Author: Rajeshbabu Chintaguntla <ra...@apache.org>
Authored: Thu Jun 25 01:16:51 2015 +0530
Committer: Rajeshbabu Chintaguntla <ra...@apache.org>
Committed: Thu Jun 25 01:16:51 2015 +0530
----------------------------------------------------------------------
.../phoenix/mapreduce/CsvBulkLoadToolIT.java | 19 +++++++++++++++++++
.../phoenix/mapreduce/CsvBulkLoadTool.java | 2 +-
2 files changed, 20 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/3cf22a7d/phoenix-core/src/it/java/org/apache/phoenix/mapreduce/CsvBulkLoadToolIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/mapreduce/CsvBulkLoadToolIT.java b/phoenix-core/src/it/java/org/apache/phoenix/mapreduce/CsvBulkLoadToolIT.java
index 392395d..6bcc221 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/mapreduce/CsvBulkLoadToolIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/mapreduce/CsvBulkLoadToolIT.java
@@ -39,6 +39,7 @@ import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
import org.apache.phoenix.jdbc.PhoenixDriver;
import org.apache.phoenix.util.DateUtil;
import org.apache.phoenix.util.PhoenixRuntime;
+import org.apache.phoenix.util.QueryUtil;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
@@ -206,6 +207,8 @@ public class CsvBulkLoadToolIT {
String ddl = "CREATE LOCAL INDEX TABLE6_IDX ON TABLE6 "
+ " (FIRST_NAME ASC)";
stmt.execute(ddl);
+ ddl = "CREATE LOCAL INDEX TABLE6_IDX2 ON TABLE6 " + " (LAST_NAME ASC)";
+ stmt.execute(ddl);
FileSystem fs = FileSystem.get(hbaseTestUtil.getConfiguration());
FSDataOutputStream outputStream = fs.create(new Path("/tmp/input3.csv"));
@@ -228,6 +231,22 @@ public class CsvBulkLoadToolIT {
assertEquals("FirstName 2", rs.getString(2));
rs.close();
+ rs =
+ stmt.executeQuery("EXPLAIN SELECT id, FIRST_NAME FROM TABLE6 where first_name='FirstName 2'");
+ assertEquals(
+ "CLIENT 1-CHUNK PARALLEL 1-WAY RANGE SCAN OVER _LOCAL_IDX_TABLE6 [-32768,'FirstName 2']\n"
+ + " SERVER FILTER BY FIRST KEY ONLY", QueryUtil.getExplainPlan(rs));
+ rs.close();
+ rs = stmt.executeQuery("SELECT id, LAST_NAME FROM TABLE6 where last_name='LastName 2'");
+ assertTrue(rs.next());
+ assertEquals(2, rs.getInt(1));
+ assertEquals("LastName 2", rs.getString(2));
+ rs.close();
+ rs =
+ stmt.executeQuery("EXPLAIN SELECT id, LAST_NAME FROM TABLE6 where last_name='LastName 2'");
+ assertEquals(
+ "CLIENT 1-CHUNK PARALLEL 1-WAY RANGE SCAN OVER _LOCAL_IDX_TABLE6 [-32767,'LastName 2']\n"
+ + " SERVER FILTER BY FIRST KEY ONLY", QueryUtil.getExplainPlan(rs));
stmt.close();
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/3cf22a7d/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkLoadTool.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkLoadTool.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkLoadTool.java
index 9e27bac..5270277 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkLoadTool.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkLoadTool.java
@@ -265,7 +265,7 @@ public class CsvBulkLoadTool extends Configured implements Tool {
JobManager.createThreadPoolExec(Integer.MAX_VALUE, 5, 20, useInstrumentedPool);
try{
for (TargetTableRef table : tablesToBeLoaded) {
- Path tablePath = new Path(outputPath, table.getPhysicalName());
+ Path tablePath = new Path(outputPath, table.getLogicalName());
Configuration jobConf = new Configuration(conf);
jobConf.set(CsvToKeyValueMapper.TABLE_NAME_CONFKEY, qualifiedTableName);
if (qualifiedTableName.compareToIgnoreCase(table.getLogicalName()) != 0) {
[14/47] phoenix git commit: PHOENIX-1504 Support adding column to a
table that has views (Samarth Jain/Dave Hacker)
Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/phoenix/blob/e78eb6fa/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
index 3a0b03b..d1b3b27 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
@@ -67,7 +67,6 @@ import org.apache.phoenix.util.KeyValueUtil;
import org.apache.phoenix.util.SchemaUtil;
import org.apache.phoenix.util.StringUtil;
-import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
@@ -209,6 +208,8 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData, org.apache.pho
public static final byte[] IS_VIEW_REFERENCED_BYTES = Bytes.toBytes(IS_VIEW_REFERENCED);
public static final String VIEW_INDEX_ID = "VIEW_INDEX_ID";
public static final byte[] VIEW_INDEX_ID_BYTES = Bytes.toBytes(VIEW_INDEX_ID);
+ public static final String BASE_COLUMN_COUNT = "BASE_COLUMN_COUNT";
+ public static final byte[] BASE_COLUMN_COUNT_BYTES = Bytes.toBytes(BASE_COLUMN_COUNT);
public static final String TABLE_FAMILY = QueryConstants.DEFAULT_COLUMN_FAMILY;
public static final byte[] TABLE_FAMILY_BYTES = QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES;
http://git-wip-us.apache.org/repos/asf/phoenix/blob/e78eb6fa/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index d6d5df9..c5dde10 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -20,6 +20,7 @@ package org.apache.phoenix.query;
import static org.apache.hadoop.hbase.HColumnDescriptor.TTL;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES;
import static org.apache.phoenix.query.QueryServicesOptions.DEFAULT_DROP_METADATA;
+import static org.apache.phoenix.util.UpgradeUtil.upgradeTo4_5_0;
import java.io.IOException;
import java.sql.SQLException;
@@ -116,6 +117,7 @@ import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
import org.apache.phoenix.jdbc.PhoenixEmbeddedDriver.ConnectionInfo;
import org.apache.phoenix.parse.PFunction;
import org.apache.phoenix.protobuf.ProtobufUtil;
+import org.apache.phoenix.schema.ColumnAlreadyExistsException;
import org.apache.phoenix.schema.ColumnFamilyNotFoundException;
import org.apache.phoenix.schema.EmptySequenceCacheException;
import org.apache.phoenix.schema.FunctionNotFoundException;
@@ -141,6 +143,7 @@ import org.apache.phoenix.schema.stats.PTableStats;
import org.apache.phoenix.schema.stats.StatisticsUtil;
import org.apache.phoenix.schema.types.PBoolean;
import org.apache.phoenix.schema.types.PDataType;
+import org.apache.phoenix.schema.types.PInteger;
import org.apache.phoenix.schema.types.PLong;
import org.apache.phoenix.schema.types.PUnsignedTinyint;
import org.apache.phoenix.util.ByteUtil;
@@ -1823,21 +1826,16 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
}
- /**
- * Keeping this to use for further upgrades. This method closes the oldMetaConnection.
- */
- private PhoenixConnection addColumnsIfNotExists(PhoenixConnection oldMetaConnection,
- String tableName, long timestamp, String columns) throws SQLException {
-
+ private PhoenixConnection addColumn(PhoenixConnection oldMetaConnection, String tableName, long timestamp, String columns, boolean addIfNotExists) throws SQLException {
Properties props = new Properties(oldMetaConnection.getClientInfo());
props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(timestamp));
// Cannot go through DriverManager or you end up in an infinite loop because it'll call init again
PhoenixConnection metaConnection = new PhoenixConnection(this, oldMetaConnection.getURL(), props, oldMetaConnection.getMetaDataCache());
SQLException sqlE = null;
try {
- metaConnection.createStatement().executeUpdate("ALTER TABLE " + tableName + " ADD IF NOT EXISTS " + columns );
+ metaConnection.createStatement().executeUpdate("ALTER TABLE " + tableName + " ADD " + (addIfNotExists ? " IF NOT EXISTS " : "") + columns );
} catch (SQLException e) {
- logger.warn("addColumnsIfNotExists failed due to:" + e);
+ logger.warn("Add column failed due to:" + e);
sqlE = e;
} finally {
try {
@@ -1855,6 +1853,14 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
}
return metaConnection;
}
+
+ /**
+ * Keeping this to use for further upgrades. This method closes the oldMetaConnection.
+ */
+ private PhoenixConnection addColumnsIfNotExists(PhoenixConnection oldMetaConnection,
+ String tableName, long timestamp, String columns) throws SQLException {
+ return addColumn(oldMetaConnection, tableName, timestamp, columns, true);
+ }
@Override
public void init(final String url, final Properties props) throws SQLException {
@@ -1926,21 +1932,36 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
}
}
- // If the server side schema is at before MIN_SYSTEM_TABLE_TIMESTAMP_4_1_0 then
- // we need to add INDEX_TYPE and INDEX_DISABLE_TIMESTAMP columns too.
- // TODO: Once https://issues.apache.org/jira/browse/PHOENIX-1614 is fixed,
- // we should just have a ALTER TABLE ADD IF NOT EXISTS statement with all
- // the column names that have been added to SYSTEM.CATALOG since 4.0.
+ // If the server side schema is before MIN_SYSTEM_TABLE_TIMESTAMP_4_1_0 then
+ // we need to add INDEX_TYPE and INDEX_DISABLE_TIMESTAMP columns too.
+ // TODO: Once https://issues.apache.org/jira/browse/PHOENIX-1614 is fixed,
+ // we should just have a ALTER TABLE ADD IF NOT EXISTS statement with all
+ // the column names that have been added to SYSTEM.CATALOG since 4.0.
if (currentServerSideTableTimeStamp < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_1_0) {
columnsToAdd += ", " + PhoenixDatabaseMetaData.INDEX_TYPE + " " + PUnsignedTinyint.INSTANCE.getSqlTypeName()
+ ", " + PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP + " " + PLong.INSTANCE.getSqlTypeName();
}
-
// Ugh..need to assign to another local variable to keep eclipse happy.
PhoenixConnection newMetaConnection = addColumnsIfNotExists(metaConnection,
PhoenixDatabaseMetaData.SYSTEM_CATALOG,
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP, columnsToAdd);
metaConnection = newMetaConnection;
+
+ if (currentServerSideTableTimeStamp < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_5_0) {
+ columnsToAdd = PhoenixDatabaseMetaData.BASE_COLUMN_COUNT + " "
+ + PInteger.INSTANCE.getSqlTypeName();
+ try {
+ addColumn(metaConnection, PhoenixDatabaseMetaData.SYSTEM_CATALOG,
+ MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP, columnsToAdd, false);
+ upgradeTo4_5_0(metaConnection);
+ } catch (ColumnAlreadyExistsException ignored) {
+ /*
+ * Upgrade to 4.5 is a slightly special case. We use the fact that the column
+ * BASE_COLUMN_COUNT is already part of the meta-data schema as the signal that
+ * the server side upgrade has finished or is in progress.
+ */
+ }
+ }
}
int nSaltBuckets = ConnectionQueryServicesImpl.this.props.getInt(QueryServices.SEQUENCE_SALT_BUCKETS_ATTRIB,
QueryServicesOptions.DEFAULT_SEQUENCE_TABLE_SALT_BUCKETS);
@@ -1983,6 +2004,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
} else {
nSequenceSaltBuckets = getSaltBuckets(e);
}
+
}
try {
metaConnection.createStatement().executeUpdate(
@@ -2002,6 +2024,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
} catch (NewerTableAlreadyExistsException e) {
} catch (TableAlreadyExistsException e) {
}
+
} catch (Exception e) {
if (e instanceof SQLException) {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/e78eb6fa/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java
index 73d1123..f82c594 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java
@@ -18,10 +18,13 @@
package org.apache.phoenix.query;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.ARG_POSITION;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.ARRAY_SIZE;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.BASE_COLUMN_COUNT;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.BUFFER_LENGTH;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.CACHE_SIZE;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.CHAR_OCTET_LENGTH;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.CLASS_NAME;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_COUNT;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_DEF;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_FAMILY;
@@ -33,7 +36,9 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DATA_TABLE_NAME;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DATA_TYPE;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DECIMAL_DIGITS;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DEFAULT_COLUMN_FAMILY_NAME;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DEFAULT_VALUE;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DISABLE_WAL;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.FUNCTION_NAME;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.GUIDE_POSTS;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.GUIDE_POSTS_COUNT;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.GUIDE_POSTS_ROW_COUNT;
@@ -43,9 +48,12 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.INCREMENT_BY;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.INDEX_STATE;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.INDEX_TYPE;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.IS_ARRAY;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.IS_AUTOINCREMENT;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.IS_CONSTANT;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.IS_NULLABLE;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.IS_VIEW_REFERENCED;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.JAR_PATH;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.KEY_SEQ;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.LAST_STATS_UPDATE_TIME;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.LIMIT_REACHED_FLAG;
@@ -56,6 +64,7 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.MIN_KEY;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.MIN_VALUE;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.MULTI_TENANT;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.NULLABLE;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.NUM_ARGS;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.NUM_PREC_RADIX;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.ORDINAL_POSITION;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.PHYSICAL_NAME;
@@ -63,7 +72,7 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.PK_NAME;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.REF_GENERATION;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.REGION_NAME;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.REMARKS;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.STORE_NULLS;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.RETURN_TYPE;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SALT_BUCKETS;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SCOPE_CATALOG;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SCOPE_SCHEMA;
@@ -76,27 +85,19 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SOURCE_DATA_TYPE;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SQL_DATA_TYPE;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SQL_DATETIME_SUB;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.START_WITH;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.STORE_NULLS;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_FUNCTION_TABLE;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_STATS_TABLE;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_NAME;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_SCHEM;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_SEQ_NUM;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_TYPE;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TENANT_ID;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TYPE;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TYPE_NAME;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TYPE_SEQUENCE;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.CLASS_NAME;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_FUNCTION_TABLE;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.ARG_POSITION;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.FUNCTION_NAME;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.JAR_PATH;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.NUM_ARGS;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.IS_ARRAY;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.IS_CONSTANT;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DEFAULT_VALUE;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TYPE;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.RETURN_TYPE;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_CONSTANT;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_INDEX_ID;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_STATEMENT;
@@ -184,6 +185,8 @@ public interface QueryConstants {
public static final BigDecimal BD_MILLIS_IN_DAY = BigDecimal.valueOf(QueryConstants.MILLIS_IN_DAY);
public static final int MAX_ALLOWED_NANOS = 999999999;
public static final int NANOS_IN_SECOND = BigDecimal.valueOf(Math.pow(10, 9)).intValue();
+ public static final int DIVORCED_VIEW_BASE_COLUMN_COUNT = -100;
+ public static final int BASE_TABLE_BASE_COLUMN_COUNT = -1;
public static final String CREATE_TABLE_METADATA =
// Do not use IF NOT EXISTS as we sometimes catch the TableAlreadyExists
// exception and add columns to the SYSTEM.TABLE dynamically.
@@ -241,7 +244,8 @@ public interface QueryConstants {
IS_AUTOINCREMENT + " VARCHAR," +
INDEX_TYPE + " UNSIGNED_TINYINT," +
INDEX_DISABLE_TIMESTAMP + " BIGINT," +
- STORE_NULLS + " BOOLEAN," +
+ STORE_NULLS + " BOOLEAN," +
+ BASE_COLUMN_COUNT + " INTEGER," +
"CONSTRAINT " + SYSTEM_TABLE_PK_NAME + " PRIMARY KEY (" + TENANT_ID + ","
+ TABLE_SCHEM + "," + TABLE_NAME + "," + COLUMN_NAME + "," + COLUMN_FAMILY + "))\n" +
HConstants.VERSIONS + "=" + MetaDataProtocol.DEFAULT_MAX_META_DATA_VERSIONS + ",\n" +
http://git-wip-us.apache.org/repos/asf/phoenix/blob/e78eb6fa/phoenix-core/src/main/java/org/apache/phoenix/schema/DelegateTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/DelegateTable.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/DelegateTable.java
index b719aae..2a43679 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/DelegateTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/DelegateTable.java
@@ -236,4 +236,9 @@ public class DelegateTable implements PTable {
public PName getParentSchemaName() {
return delegate.getParentSchemaName();
}
+
+ @Override
+ public int getBaseColumnCount() {
+ return delegate.getBaseColumnCount();
+ }
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/e78eb6fa/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index 75678fd..e7c3cd5 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -22,7 +22,10 @@ import static com.google.common.collect.Sets.newLinkedHashSet;
import static com.google.common.collect.Sets.newLinkedHashSetWithExpectedSize;
import static org.apache.hadoop.hbase.HColumnDescriptor.TTL;
import static org.apache.phoenix.exception.SQLExceptionCode.INSUFFICIENT_MULTI_TENANT_COLUMNS;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.ARG_POSITION;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.ARRAY_SIZE;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.BASE_COLUMN_COUNT;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.CLASS_NAME;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_COUNT;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_DEF;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_FAMILY;
@@ -34,6 +37,7 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DECIMAL_DIGITS;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DEFAULT_COLUMN_FAMILY_NAME;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DEFAULT_VALUE;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DISABLE_WAL;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.FUNCTION_NAME;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.IMMUTABLE_ROWS;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.INDEX_STATE;
@@ -41,6 +45,7 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.INDEX_TYPE;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.IS_ARRAY;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.IS_CONSTANT;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.IS_VIEW_REFERENCED;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.JAR_PATH;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.KEY_SEQ;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.LAST_STATS_UPDATE_TIME;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.LINK_TYPE;
@@ -48,39 +53,34 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.MAX_VALUE;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.MIN_VALUE;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.MULTI_TENANT;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.NULLABLE;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.NUM_ARGS;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.ORDINAL_POSITION;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.PARENT_TENANT_ID;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.PHYSICAL_NAME;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.PK_NAME;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.REGION_NAME;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.RETURN_TYPE;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SALT_BUCKETS;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SORT_ORDER;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.STORE_NULLS;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_FUNCTION_TABLE;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_NAME;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_SCHEM;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_SEQ_NUM;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_TYPE;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TENANT_ID;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TYPE;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_CONSTANT;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_INDEX_ID;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_STATEMENT;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_TYPE;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.ARG_POSITION;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.NUM_ARGS;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_FUNCTION_TABLE;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.FUNCTION_NAME;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.CLASS_NAME;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.JAR_PATH;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TYPE;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.RETURN_TYPE;
+import static org.apache.phoenix.query.QueryConstants.BASE_TABLE_BASE_COLUMN_COUNT;
import static org.apache.phoenix.query.QueryServices.DROP_METADATA_ATTRIB;
import static org.apache.phoenix.query.QueryServicesOptions.DEFAULT_DROP_METADATA;
import java.io.IOException;
-import java.lang.reflect.Constructor;
-import java.sql.Array;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.ParameterMetaData;
@@ -115,7 +115,6 @@ import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.io.TimeRange;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.DynamicClassLoader;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.phoenix.compile.ColumnResolver;
import org.apache.phoenix.compile.ExplainPlan;
@@ -137,8 +136,6 @@ import org.apache.phoenix.execute.MutationState;
import org.apache.phoenix.expression.Determinism;
import org.apache.phoenix.expression.Expression;
import org.apache.phoenix.expression.RowKeyColumnExpression;
-import org.apache.phoenix.expression.function.FunctionExpression;
-import org.apache.phoenix.expression.function.ScalarFunction;
import org.apache.phoenix.hbase.index.covered.update.ColumnReference;
import org.apache.phoenix.index.IndexMaintainer;
import org.apache.phoenix.jdbc.PhoenixConnection;
@@ -158,18 +155,16 @@ import org.apache.phoenix.parse.DropFunctionStatement;
import org.apache.phoenix.parse.DropIndexStatement;
import org.apache.phoenix.parse.DropSequenceStatement;
import org.apache.phoenix.parse.DropTableStatement;
-import org.apache.phoenix.parse.FunctionParseNode;
import org.apache.phoenix.parse.IndexKeyConstraint;
import org.apache.phoenix.parse.NamedTableNode;
import org.apache.phoenix.parse.PFunction;
+import org.apache.phoenix.parse.PFunction.FunctionArgument;
import org.apache.phoenix.parse.ParseNode;
import org.apache.phoenix.parse.ParseNodeFactory;
import org.apache.phoenix.parse.PrimaryKeyConstraint;
import org.apache.phoenix.parse.TableName;
import org.apache.phoenix.parse.UpdateStatisticsStatement;
-import org.apache.phoenix.parse.PFunction.FunctionArgument;
import org.apache.phoenix.query.ConnectionQueryServices.Feature;
-import org.apache.phoenix.query.HBaseFactoryProvider;
import org.apache.phoenix.query.QueryConstants;
import org.apache.phoenix.query.QueryServices;
import org.apache.phoenix.query.QueryServicesOptions;
@@ -225,8 +220,9 @@ public class MetaDataClient {
VIEW_TYPE + "," +
VIEW_INDEX_ID + "," +
INDEX_TYPE + "," +
- STORE_NULLS +
- ") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)";
+ STORE_NULLS + "," +
+ BASE_COLUMN_COUNT +
+ ") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)";
private static final String CREATE_LINK =
"UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\"( " +
TENANT_ID + "," +
@@ -1878,6 +1874,11 @@ public class MetaDataClient {
tableUpsert.setByte(18, indexType.getSerializedValue());
}
tableUpsert.setBoolean(19, storeNulls);
+ if (parent != null && tableType == PTableType.VIEW) {
+ tableUpsert.setInt(20, parent.getColumns().size());
+ } else {
+ tableUpsert.setInt(20, BASE_TABLE_BASE_COLUMN_COUNT);
+ }
tableUpsert.execute();
tableMetaData.addAll(connection.getMutationState().toMutations().next().getSecond());
http://git-wip-us.apache.org/repos/asf/phoenix/blob/e78eb6fa/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java
index e46dcb7..b983074 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java
@@ -322,4 +322,5 @@ public interface PTable extends PMetaDataEntity {
IndexType getIndexType();
PTableStats getTableStats();
+ int getBaseColumnCount();
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/e78eb6fa/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
index b62dbf5..25b3e87 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
@@ -125,6 +125,7 @@ public class PTableImpl implements PTable {
private int estimatedSize;
private IndexType indexType;
private PTableStats tableStats = PTableStats.EMPTY_STATS;
+ private int baseColumnCount;
public PTableImpl() {
this.indexes = Collections.emptyList();
@@ -193,7 +194,7 @@ public class PTableImpl implements PTable {
table.getTenantId(), table.getSchemaName(), table.getTableName(), table.getType(), table.getIndexState(), timeStamp,
table.getSequenceNumber(), table.getPKName(), table.getBucketNum(), getColumnsToClone(table), parentSchemaName, table.getParentTableName(),
indexes, table.isImmutableRows(), table.getPhysicalNames(), table.getDefaultFamilyName(), viewStatement,
- table.isWALDisabled(), table.isMultiTenant(), table.getStoreNulls(), table.getViewType(), table.getViewIndexId(), table.getIndexType(), table.getTableStats());
+ table.isWALDisabled(), table.isMultiTenant(), table.getStoreNulls(), table.getViewType(), table.getViewIndexId(), table.getIndexType(), table.getTableStats(), table.getBaseColumnCount());
}
public static PTableImpl makePTable(PTable table, List<PColumn> columns) throws SQLException {
@@ -201,7 +202,7 @@ public class PTableImpl implements PTable {
table.getTenantId(), table.getSchemaName(), table.getTableName(), table.getType(), table.getIndexState(), table.getTimeStamp(),
table.getSequenceNumber(), table.getPKName(), table.getBucketNum(), columns, table.getParentSchemaName(), table.getParentTableName(),
table.getIndexes(), table.isImmutableRows(), table.getPhysicalNames(), table.getDefaultFamilyName(), table.getViewStatement(),
- table.isWALDisabled(), table.isMultiTenant(), table.getStoreNulls(), table.getViewType(), table.getViewIndexId(), table.getIndexType(), table.getTableStats());
+ table.isWALDisabled(), table.isMultiTenant(), table.getStoreNulls(), table.getViewType(), table.getViewIndexId(), table.getIndexType(), table.getTableStats(), table.getBaseColumnCount());
}
public static PTableImpl makePTable(PTable table, long timeStamp, long sequenceNumber, List<PColumn> columns) throws SQLException {
@@ -209,7 +210,7 @@ public class PTableImpl implements PTable {
table.getTenantId(), table.getSchemaName(), table.getTableName(), table.getType(), table.getIndexState(), timeStamp,
sequenceNumber, table.getPKName(), table.getBucketNum(), columns, table.getParentSchemaName(), table.getParentTableName(), table.getIndexes(),
table.isImmutableRows(), table.getPhysicalNames(), table.getDefaultFamilyName(), table.getViewStatement(), table.isWALDisabled(),
- table.isMultiTenant(), table.getStoreNulls(), table.getViewType(), table.getViewIndexId(), table.getIndexType(), table.getTableStats());
+ table.isMultiTenant(), table.getStoreNulls(), table.getViewType(), table.getViewIndexId(), table.getIndexType(), table.getTableStats(), table.getBaseColumnCount());
}
public static PTableImpl makePTable(PTable table, long timeStamp, long sequenceNumber, List<PColumn> columns, boolean isImmutableRows) throws SQLException {
@@ -217,7 +218,7 @@ public class PTableImpl implements PTable {
table.getTenantId(), table.getSchemaName(), table.getTableName(), table.getType(), table.getIndexState(), timeStamp,
sequenceNumber, table.getPKName(), table.getBucketNum(), columns, table.getParentSchemaName(), table.getParentTableName(),
table.getIndexes(), isImmutableRows, table.getPhysicalNames(), table.getDefaultFamilyName(), table.getViewStatement(),
- table.isWALDisabled(), table.isMultiTenant(), table.getStoreNulls(), table.getViewType(), table.getViewIndexId(), table.getIndexType(), table.getTableStats());
+ table.isWALDisabled(), table.isMultiTenant(), table.getStoreNulls(), table.getViewType(), table.getViewIndexId(), table.getIndexType(), table.getTableStats(), table.getBaseColumnCount());
}
public static PTableImpl makePTable(PTable table, long timeStamp, long sequenceNumber, List<PColumn> columns, boolean isImmutableRows, boolean isWalDisabled, boolean isMultitenant, boolean storeNulls) throws SQLException {
@@ -225,7 +226,7 @@ public class PTableImpl implements PTable {
table.getTenantId(), table.getSchemaName(), table.getTableName(), table.getType(), table.getIndexState(), timeStamp,
sequenceNumber, table.getPKName(), table.getBucketNum(), columns, table.getParentSchemaName(), table.getParentTableName(),
table.getIndexes(), isImmutableRows, table.getPhysicalNames(), table.getDefaultFamilyName(), table.getViewStatement(),
- isWalDisabled, isMultitenant, storeNulls, table.getViewType(), table.getViewIndexId(), table.getIndexType(), table.getTableStats());
+ isWalDisabled, isMultitenant, storeNulls, table.getViewType(), table.getViewIndexId(), table.getIndexType(), table.getTableStats(), table.getBaseColumnCount());
}
public static PTableImpl makePTable(PTable table, PIndexState state) throws SQLException {
@@ -234,7 +235,7 @@ public class PTableImpl implements PTable {
table.getSequenceNumber(), table.getPKName(), table.getBucketNum(), getColumnsToClone(table),
table.getParentSchemaName(), table.getParentTableName(), table.getIndexes(),
table.isImmutableRows(), table.getPhysicalNames(), table.getDefaultFamilyName(), table.getViewStatement(),
- table.isWALDisabled(), table.isMultiTenant(), table.getStoreNulls(), table.getViewType(), table.getViewIndexId(), table.getIndexType(), table.getTableStats());
+ table.isWALDisabled(), table.isMultiTenant(), table.getStoreNulls(), table.getViewType(), table.getViewIndexId(), table.getIndexType(), table.getTableStats(), table.getBaseColumnCount());
}
public static PTableImpl makePTable(PTable table, PTableStats stats) throws SQLException {
@@ -243,7 +244,7 @@ public class PTableImpl implements PTable {
table.getSequenceNumber(), table.getPKName(), table.getBucketNum(), getColumnsToClone(table),
table.getParentSchemaName(), table.getParentTableName(), table.getIndexes(),
table.isImmutableRows(), table.getPhysicalNames(), table.getDefaultFamilyName(), table.getViewStatement(),
- table.isWALDisabled(), table.isMultiTenant(), table.getStoreNulls(), table.getViewType(), table.getViewIndexId(), table.getIndexType(), stats);
+ table.isWALDisabled(), table.isMultiTenant(), table.getStoreNulls(), table.getViewType(), table.getViewIndexId(), table.getIndexType(), stats, table.getBaseColumnCount());
}
public static PTableImpl makePTable(PName tenantId, PName schemaName, PName tableName, PTableType type, PIndexState state, long timeStamp, long sequenceNumber,
@@ -253,18 +254,18 @@ public class PTableImpl implements PTable {
return new PTableImpl(tenantId, schemaName, tableName, type, state, timeStamp, sequenceNumber, pkName, bucketNum, columns, dataSchemaName,
dataTableName, indexes, isImmutableRows, physicalNames, defaultFamilyName,
viewExpression, disableWAL, multiTenant, storeNulls, viewType, viewIndexId,
- indexType, PTableStats.EMPTY_STATS);
+ indexType, PTableStats.EMPTY_STATS, QueryConstants.BASE_TABLE_BASE_COLUMN_COUNT);
}
public static PTableImpl makePTable(PName tenantId, PName schemaName, PName tableName, PTableType type,
PIndexState state, long timeStamp, long sequenceNumber, PName pkName, Integer bucketNum,
List<PColumn> columns, PName dataSchemaName, PName dataTableName, List<PTable> indexes,
boolean isImmutableRows, List<PName> physicalNames, PName defaultFamilyName, String viewExpression,
- boolean disableWAL, boolean multiTenant, boolean storeNulls, ViewType viewType, Short viewIndexId, IndexType indexType, @NotNull PTableStats stats)
+ boolean disableWAL, boolean multiTenant, boolean storeNulls, ViewType viewType, Short viewIndexId, IndexType indexType, @NotNull PTableStats stats, int baseColumnCount)
throws SQLException {
return new PTableImpl(tenantId, schemaName, tableName, type, state, timeStamp, sequenceNumber, pkName,
bucketNum, columns, dataSchemaName, dataTableName, indexes, isImmutableRows, physicalNames,
- defaultFamilyName, viewExpression, disableWAL, multiTenant, storeNulls, viewType, viewIndexId, indexType, stats);
+ defaultFamilyName, viewExpression, disableWAL, multiTenant, storeNulls, viewType, viewIndexId, indexType, stats, baseColumnCount);
}
private PTableImpl(PName tenantId, PName schemaName, PName tableName, PTableType type, PIndexState state,
@@ -272,10 +273,10 @@ public class PTableImpl implements PTable {
PName parentSchemaName, PName parentTableName, List<PTable> indexes, boolean isImmutableRows,
List<PName> physicalNames, PName defaultFamilyName, String viewExpression, boolean disableWAL, boolean multiTenant,
boolean storeNulls, ViewType viewType, Short viewIndexId, IndexType indexType,
- PTableStats stats) throws SQLException {
+ PTableStats stats, int baseColumnCount) throws SQLException {
init(tenantId, schemaName, tableName, type, state, timeStamp, sequenceNumber, pkName, bucketNum, columns,
stats, schemaName, parentTableName, indexes, isImmutableRows, physicalNames, defaultFamilyName,
- viewExpression, disableWAL, multiTenant, storeNulls, viewType, viewIndexId, indexType);
+ viewExpression, disableWAL, multiTenant, storeNulls, viewType, viewIndexId, indexType, baseColumnCount);
}
@Override
@@ -303,7 +304,7 @@ public class PTableImpl implements PTable {
PName pkName, Integer bucketNum, List<PColumn> columns, PTableStats stats, PName parentSchemaName, PName parentTableName,
List<PTable> indexes, boolean isImmutableRows, List<PName> physicalNames, PName defaultFamilyName, String viewExpression, boolean disableWAL,
boolean multiTenant, boolean storeNulls, ViewType viewType, Short viewIndexId,
- IndexType indexType ) throws SQLException {
+ IndexType indexType , int baseColumnCount) throws SQLException {
Preconditions.checkNotNull(schemaName);
Preconditions.checkArgument(tenantId==null || tenantId.getBytes().length > 0); // tenantId should be null or not empty
int estimatedSize = SizedUtil.OBJECT_SIZE * 2 + 23 * SizedUtil.POINTER_SIZE + 4 * SizedUtil.INT_SIZE + 2 * SizedUtil.LONG_SIZE + 2 * SizedUtil.INT_OBJECT_SIZE +
@@ -437,6 +438,7 @@ public class PTableImpl implements PTable {
}
this.estimatedSize = estimatedSize;
+ this.baseColumnCount = baseColumnCount;
}
@Override
@@ -968,13 +970,18 @@ public class PTableImpl implements PTable {
physicalNames.add(PNameFactory.newName(table.getPhysicalNames(i).toByteArray()));
}
}
+
+ int baseColumnCount = -1;
+ if (table.hasBaseColumnCount()) {
+ baseColumnCount = table.getBaseColumnCount();
+ }
try {
PTableImpl result = new PTableImpl();
result.init(tenantId, schemaName, tableName, tableType, indexState, timeStamp, sequenceNumber, pkName,
(bucketNum == NO_SALTING) ? null : bucketNum, columns, stats, schemaName,dataTableName, indexes,
isImmutableRows, physicalNames, defaultFamilyName, viewStatement, disableWAL,
- multiTenant, storeNulls, viewType, viewIndexId, indexType);
+ multiTenant, storeNulls, viewType, viewIndexId, indexType, baseColumnCount);
return result;
} catch (SQLException e) {
throw new RuntimeException(e); // Impossible
@@ -1063,6 +1070,7 @@ public class PTableImpl implements PTable {
builder.addPhysicalNames(ByteStringer.wrap(table.getPhysicalNames().get(i).getBytes()));
}
}
+ builder.setBaseColumnCount(table.getBaseColumnCount());
return builder.build();
}
@@ -1082,4 +1090,8 @@ public class PTableImpl implements PTable {
return parentSchemaName;
}
+ @Override
+ public int getBaseColumnCount() {
+ return baseColumnCount;
+ }
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/e78eb6fa/phoenix-core/src/main/java/org/apache/phoenix/util/ByteUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ByteUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/ByteUtil.java
index 1e3516d..1f4a285 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/ByteUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ByteUtil.java
@@ -253,13 +253,17 @@ public class ByteUtil {
public static byte[] concat(byte[] first, byte[]... rest) {
int totalLength = first.length;
for (byte[] array : rest) {
- totalLength += array.length;
+ if (array != null) {
+ totalLength += array.length;
+ }
}
byte[] result = Arrays.copyOf(first, totalLength);
int offset = first.length;
for (byte[] array : rest) {
- System.arraycopy(array, 0, result, offset, array.length);
- offset += array.length;
+ if (array != null) {
+ System.arraycopy(array, 0, result, offset, array.length);
+ offset += array.length;
+ }
}
return result;
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/e78eb6fa/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
index 86da5cc..dff6598 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
@@ -17,9 +17,35 @@
*/
package org.apache.phoenix.util;
+import static com.google.common.base.Preconditions.checkNotNull;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.ARRAY_SIZE;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_FAMILY;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_NAME;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_SIZE;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DATA_TYPE;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DECIMAL_DIGITS;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.LINK_TYPE;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.ORDINAL_POSITION;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SORT_ORDER;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_NAME;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_SCHEM;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TENANT_ID;
+import static org.apache.phoenix.query.QueryConstants.BASE_TABLE_BASE_COLUMN_COUNT;
+import static org.apache.phoenix.query.QueryConstants.DIVORCED_VIEW_BASE_COLUMN_COUNT;
+
import java.io.IOException;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.HashMap;
import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import javax.annotation.Nonnull;
+import javax.annotation.Nullable;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
@@ -38,21 +64,43 @@ import org.apache.phoenix.coprocessor.MetaDataProtocol;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
import org.apache.phoenix.query.QueryConstants;
-import org.apache.phoenix.schema.types.PInteger;
-import org.apache.phoenix.schema.types.PLong;
import org.apache.phoenix.schema.PName;
import org.apache.phoenix.schema.PNameFactory;
import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTable.LinkType;
import org.apache.phoenix.schema.SaltingUtil;
+import org.apache.phoenix.schema.types.PInteger;
+import org.apache.phoenix.schema.types.PLong;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import com.google.common.base.Objects;
import com.google.common.collect.Lists;
public class UpgradeUtil {
private static final Logger logger = LoggerFactory.getLogger(UpgradeUtil.class);
private static final byte[] SEQ_PREFIX_BYTES = ByteUtil.concat(QueryConstants.SEPARATOR_BYTE_ARRAY, Bytes.toBytes("_SEQ_"));
+
+ public static String UPSERT_BASE_COLUMN_COUNT_IN_HEADER_ROW = "UPSERT "
+ + "INTO SYSTEM.CATALOG "
+ + "(TENANT_ID, TABLE_SCHEM, TABLE_NAME, COLUMN_NAME, COLUMN_FAMILY, BASE_COLUMN_COUNT) "
+ + "VALUES (?, ?, ?, ?, ?, ?) ";
+ public static String SELECT_BASE_COLUMN_COUNT_FROM_HEADER_ROW = "SELECT "
+ + "BASE_COLUMN_COUNT "
+ + "FROM SYSTEM.CATALOG "
+ + "WHERE "
+ + "COLUMN_NAME IS NULL "
+ + "AND "
+ + "COLUMN_FAMILY IS NULL "
+ + "AND "
+ + "TENANT_ID %s "
+ + "AND "
+ + "TABLE_SCHEM %s "
+ + "AND "
+ + "TABLE_NAME = ? "
+ ;
+
private UpgradeUtil() {
}
@@ -394,5 +442,348 @@ public class UpgradeUtil {
keyValue.getTimestamp(), KeyValue.Type.codeToType(keyValue.getType()),
buf, keyValue.getValueOffset(), keyValue.getValueLength());
}
+
+ public static void upgradeTo4_5_0(PhoenixConnection metaConnection) throws SQLException {
+ String getBaseTableAndViews = "SELECT "
+ + COLUMN_FAMILY + " AS BASE_PHYSICAL_TABLE, "
+ + TENANT_ID + ", "
+ + TABLE_SCHEM + " AS VIEW_SCHEMA, "
+ + TABLE_NAME + " AS VIEW_NAME "
+ + "FROM " + SYSTEM_CATALOG_NAME
+ + " WHERE " + COLUMN_FAMILY + " IS NOT NULL " // column_family column points to the physical table name.
+ + " AND " + COLUMN_NAME + " IS NULL "
+ + " AND " + LINK_TYPE + " = ? ";
+ // Build a map of base table name -> list of views on the table.
+ Map<String, List<ViewKey>> parentTableViewsMap = new HashMap<>();
+ try (PreparedStatement stmt = metaConnection.prepareStatement(getBaseTableAndViews)) {
+ // Get back view rows that have links back to the base physical table. This takes care
+ // of cases when we have a hierarchy of views too.
+ stmt.setByte(1, LinkType.PHYSICAL_TABLE.getSerializedValue());
+ try (ResultSet rs = stmt.executeQuery()) {
+ while (rs.next()) {
+ // this is actually SCHEMANAME.TABLENAME
+ String parentTable = rs.getString("BASE_PHYSICAL_TABLE");
+ String tenantId = rs.getString(TENANT_ID);
+ String viewSchema = rs.getString("VIEW_SCHEMA");
+ String viewName = rs.getString("VIEW_NAME");
+ List<ViewKey> viewKeysList = parentTableViewsMap.get(parentTable);
+ if (viewKeysList == null) {
+ viewKeysList = new ArrayList<>();
+ parentTableViewsMap.put(parentTable, viewKeysList);
+ }
+ viewKeysList.add(new ViewKey(tenantId, viewSchema, viewName));
+ }
+ }
+ }
+
+ for (Entry<String, List<ViewKey>> entry : parentTableViewsMap.entrySet()) {
+ // Fetch column information for the base physical table
+ String physicalTable = entry.getKey();
+ String baseTableSchemaName = SchemaUtil.getSchemaNameFromFullName(physicalTable).equals(StringUtil.EMPTY_STRING) ? null : SchemaUtil.getSchemaNameFromFullName(physicalTable);
+ String baseTableName = SchemaUtil.getTableNameFromFullName(physicalTable);
+ List<ColumnDetails> basePhysicalTableColumns = new ArrayList<>();
+
+ // Columns fetched in order of ordinal position
+ String fetchColumnInfoForBasePhysicalTable = "SELECT " +
+ COLUMN_NAME + "," +
+ COLUMN_FAMILY + "," +
+ DATA_TYPE + "," +
+ COLUMN_SIZE + "," +
+ DECIMAL_DIGITS + "," +
+ ORDINAL_POSITION + "," +
+ SORT_ORDER + "," +
+ ARRAY_SIZE + " " +
+ "FROM SYSTEM.CATALOG " +
+ "WHERE " +
+ "TABLE_SCHEM %s " +
+ "AND TABLE_NAME = ? " +
+ "AND COLUMN_NAME IS NOT NULL " +
+ "ORDER BY " +
+ ORDINAL_POSITION;
+
+ PreparedStatement stmt = null;
+ if (baseTableSchemaName == null) {
+ fetchColumnInfoForBasePhysicalTable =
+ String.format(fetchColumnInfoForBasePhysicalTable, "IS NULL ");
+ stmt = metaConnection.prepareStatement(fetchColumnInfoForBasePhysicalTable);
+ stmt.setString(1, baseTableName);
+ } else {
+ fetchColumnInfoForBasePhysicalTable =
+ String.format(fetchColumnInfoForBasePhysicalTable, " = ? ");
+ stmt = metaConnection.prepareStatement(fetchColumnInfoForBasePhysicalTable);
+ stmt.setString(1, baseTableSchemaName);
+ stmt.setString(2, baseTableName);
+ }
+
+ try (ResultSet rs = stmt.executeQuery()) {
+ while (rs.next()) {
+ basePhysicalTableColumns.add(new ColumnDetails(rs.getString(COLUMN_FAMILY), rs
+ .getString(COLUMN_NAME), rs.getInt(ORDINAL_POSITION), rs
+ .getInt(DATA_TYPE), rs.getInt(COLUMN_SIZE), rs.getInt(DECIMAL_DIGITS),
+ rs.getInt(SORT_ORDER), rs.getInt(ARRAY_SIZE)));
+ }
+ }
+
+ // Fetch column information for all the views on the base physical table ordered by ordinal position.
+ List<ViewKey> viewKeys = entry.getValue();
+ StringBuilder sb = new StringBuilder();
+ sb.append("SELECT " +
+ TENANT_ID + "," +
+ TABLE_SCHEM + "," +
+ TABLE_NAME + "," +
+ COLUMN_NAME + "," +
+ COLUMN_FAMILY + "," +
+ DATA_TYPE + "," +
+ COLUMN_SIZE + "," +
+ DECIMAL_DIGITS + "," +
+ ORDINAL_POSITION + "," +
+ SORT_ORDER + "," +
+ ARRAY_SIZE + " " +
+ "FROM SYSTEM.CATALOG " +
+ "WHERE " +
+ COLUMN_NAME + " IS NOT NULL " +
+ "AND " +
+ ORDINAL_POSITION + " <= ? " + // fetch only those columns that would impact setting of base column count
+ "AND " +
+ "(" + TENANT_ID+ ", " + TABLE_SCHEM + ", " + TABLE_NAME + ") IN (");
+
+ int numViews = viewKeys.size();
+ for (int i = 0; i < numViews; i++) {
+ sb.append(" (?, ?, ?) ");
+ if (i < numViews - 1) {
+ sb.append(", ");
+ }
+ }
+ sb.append(" ) ");
+ sb.append(" GROUP BY " +
+ TENANT_ID + "," +
+ TABLE_SCHEM + "," +
+ TABLE_NAME + "," +
+ COLUMN_NAME + "," +
+ COLUMN_FAMILY + "," +
+ DATA_TYPE + "," +
+ COLUMN_SIZE + "," +
+ DECIMAL_DIGITS + "," +
+ ORDINAL_POSITION + "," +
+ SORT_ORDER + "," +
+ ARRAY_SIZE + " " +
+ "ORDER BY " +
+ TENANT_ID + "," + TABLE_SCHEM + ", " + TABLE_NAME + ", " + ORDINAL_POSITION);
+ String fetchViewColumnsSql = sb.toString();
+ stmt = metaConnection.prepareStatement(fetchViewColumnsSql);
+ int numColsInBaseTable = basePhysicalTableColumns.size();
+ stmt.setInt(1, numColsInBaseTable);
+ int paramIndex = 1;
+ stmt.setInt(paramIndex++, numColsInBaseTable);
+ for (ViewKey view : viewKeys) {
+ stmt.setString(paramIndex++, view.tenantId);
+ stmt.setString(paramIndex++, view.schema);
+ stmt.setString(paramIndex++, view.name);
+ }
+ String currentTenantId = null;
+ String currentViewSchema = null;
+ String currentViewName = null;
+ try (ResultSet rs = stmt.executeQuery()) {
+ int numBaseTableColsMatched = 0;
+ boolean ignore = false;
+ boolean baseColumnCountUpserted = false;
+ while (rs.next()) {
+ String viewTenantId = rs.getString(TENANT_ID);
+ String viewSchema = rs.getString(TABLE_SCHEM);
+ String viewName = rs.getString(TABLE_NAME);
+ if (!(Objects.equal(viewTenantId, currentTenantId) && Objects.equal(viewSchema, currentViewSchema) && Objects.equal(viewName, currentViewName))) {
+ // We are about to iterate through columns of a different view. Check whether base column count was upserted.
+ // If it wasn't then it is likely the case that a column inherited from the base table was dropped from view.
+ if (currentViewName != null && !baseColumnCountUpserted && numBaseTableColsMatched < numColsInBaseTable) {
+ upsertBaseColumnCountInHeaderRow(metaConnection, currentTenantId, currentViewSchema, currentViewName, DIVORCED_VIEW_BASE_COLUMN_COUNT);
+ }
+ // reset the values as we are now going to iterate over columns of a new view.
+ numBaseTableColsMatched = 0;
+ currentTenantId = viewTenantId;
+ currentViewSchema = viewSchema;
+ currentViewName = viewName;
+ ignore = false;
+ baseColumnCountUpserted = false;
+ }
+ if (!ignore) {
+ /*
+ * Iterate over all the columns of the base physical table and the columns of the view. Compare the
+ * two till one of the following happens:
+ *
+ * 1) We run into a view column which is different from column in the base physical table.
+ * This means that the view has divorced itself from the base physical table. In such a case
+ * we will set a special value for the base column count. That special value will also be used
+ * on the server side to filter out the divorced view so that meta-data changes on the base
+ * physical table are not propagated to it.
+ *
+ * 2) Every physical table column is present in the view. In that case we set the base column count
+ * as the number of columns in the base physical table. At that point we ignore rest of the columns
+ * of the view.
+ *
+ */
+ ColumnDetails baseTableColumn = basePhysicalTableColumns.get(numBaseTableColsMatched);
+ String columName = rs.getString(COLUMN_NAME);
+ String columnFamily = rs.getString(COLUMN_FAMILY);
+ int ordinalPos = rs.getInt(ORDINAL_POSITION);
+ int dataType = rs.getInt(DATA_TYPE);
+ int columnSize = rs.getInt(COLUMN_SIZE);
+ int decimalDigits = rs.getInt(DECIMAL_DIGITS);
+ int sortOrder = rs.getInt(SORT_ORDER);
+ int arraySize = rs.getInt(ARRAY_SIZE);
+ ColumnDetails viewColumn = new ColumnDetails(columnFamily, columName, ordinalPos, dataType, columnSize, decimalDigits, sortOrder, arraySize);
+ if (baseTableColumn.equals(viewColumn)) {
+ numBaseTableColsMatched++;
+ if (numBaseTableColsMatched == numColsInBaseTable) {
+ upsertBaseColumnCountInHeaderRow(metaConnection, viewTenantId, viewSchema, viewName, numColsInBaseTable);
+ // No need to ignore the rest of the columns of the view here since the
+ // query retrieved only those columns that had ordinal position <= numColsInBaseTable
+ baseColumnCountUpserted = true;
+ }
+ } else {
+ // special value to denote that the view has divorced itself from the base physical table.
+ upsertBaseColumnCountInHeaderRow(metaConnection, viewTenantId, viewSchema, viewName, DIVORCED_VIEW_BASE_COLUMN_COUNT);
+ baseColumnCountUpserted = true;
+ // ignore rest of the rows for the view.
+ ignore = true;
+ }
+ }
+ }
+ }
+ // set base column count for the header row of the base table too. We use this information
+ // to figure out whether the upgrade is in progress or hasn't started.
+ upsertBaseColumnCountInHeaderRow(metaConnection, null, baseTableSchemaName, baseTableName, BASE_TABLE_BASE_COLUMN_COUNT);
+ metaConnection.commit();
+ }
+ }
+
+ private static void upsertBaseColumnCountInHeaderRow(PhoenixConnection metaConnection,
+ String tenantId, String schemaName, String viewOrTableName, int baseColumnCount)
+ throws SQLException {
+ try (PreparedStatement stmt =
+ metaConnection.prepareStatement(UPSERT_BASE_COLUMN_COUNT_IN_HEADER_ROW)) {
+ stmt.setString(1, tenantId);
+ stmt.setString(2, schemaName);
+ stmt.setString(3, viewOrTableName);
+ stmt.setString(4, null);
+ stmt.setString(5, null);
+ stmt.setInt(6, baseColumnCount);
+ stmt.executeUpdate();
+ }
+ }
+
+ private static class ColumnDetails {
+
+ @Override
+ public int hashCode() {
+ final int prime = 31;
+ int result = 1;
+ result = prime * result + columnName.hashCode();
+ result = prime * result + ((columnFamily == null) ? 0 : columnFamily.hashCode());
+ result = prime * result + arraySize;
+ result = prime * result + dataType;
+ result = prime * result + maxLength;
+ result = prime * result + ordinalValue;
+ result = prime * result + scale;
+ result = prime * result + sortOrder;
+ return result;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) return true;
+ if (obj == null) return false;
+ if (getClass() != obj.getClass()) return false;
+ ColumnDetails other = (ColumnDetails) obj;
+ if (!columnName.equals(other.columnName)) return false;
+ if (columnFamily == null) {
+ if (other.columnFamily != null) return false;
+ } else if (!columnFamily.equals(other.columnFamily)) return false;
+ if (arraySize != other.arraySize) return false;
+ if (dataType != other.dataType) return false;
+ if (maxLength != other.maxLength) return false;
+ if (ordinalValue != other.ordinalValue) return false;
+ if (scale != other.scale) return false;
+ if (sortOrder != other.sortOrder) return false;
+ return true;
+ }
+
+ @Nullable
+ private final String columnFamily;
+
+ @Nonnull
+ private final String columnName;
+
+ private final int ordinalValue;
+
+ private final int dataType;
+
+ private final int maxLength;
+
+ private final int scale;
+
+ private final int sortOrder;
+
+ private final int arraySize;
+
+ ColumnDetails(String columnFamily, String columnName, int ordinalValue, int dataType,
+ int maxLength, int scale, int sortOrder, int arraySize) {
+ checkNotNull(columnName);
+ checkNotNull(ordinalValue);
+ checkNotNull(dataType);
+ this.columnFamily = columnFamily;
+ this.columnName = columnName;
+ this.ordinalValue = ordinalValue;
+ this.dataType = dataType;
+ this.maxLength = maxLength;
+ this.scale = scale;
+ this.sortOrder = sortOrder;
+ this.arraySize = arraySize;
+ }
+
+ }
+
+ private static class ViewKey {
+
+ @Override
+ public int hashCode() {
+ final int prime = 31;
+ int result = 1;
+ result = prime * result + ((tenantId == null) ? 0 : tenantId.hashCode());
+ result = prime * result + name.hashCode();
+ result = prime * result + ((schema == null) ? 0 : schema.hashCode());
+ return result;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) return true;
+ if (obj == null) return false;
+ if (getClass() != obj.getClass()) return false;
+ ViewKey other = (ViewKey) obj;
+ if (tenantId == null) {
+ if (other.tenantId != null) return false;
+ } else if (!tenantId.equals(other.tenantId)) return false;
+ if (!name.equals(other.name)) return false;
+ if (schema == null) {
+ if (other.schema != null) return false;
+ } else if (!schema.equals(other.schema)) return false;
+ return true;
+ }
+
+ @Nullable
+ private final String tenantId;
+
+ @Nullable
+ private final String schema;
+
+ @Nonnull
+ private final String name;
+
+ private ViewKey(String tenantId, String schema, String viewName) {
+ this.tenantId = tenantId;
+ this.schema = schema;
+ this.name = viewName;
+ }
+ }
}
[18/47] phoenix git commit: PHOENIX-1920 - Pherf - Add support for
mixed r/w workloads
Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7175dcbc/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultManager.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultManager.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultManager.java
index 523feb4..39d6a9c 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultManager.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultManager.java
@@ -33,17 +33,13 @@ public class ResultManager {
private final ResultUtil util;
private final PherfConstants.RunMode runMode;
-
public ResultManager(String fileNameSeed, PherfConstants.RunMode runMode) {
- this(runMode, Arrays.asList(
- new XMLResultHandler(fileNameSeed, ResultFileDetails.XML),
+ this(runMode, Arrays.asList(new XMLResultHandler(fileNameSeed, ResultFileDetails.XML),
new ImageResultHandler(fileNameSeed, ResultFileDetails.IMAGE),
- new CSVResultHandler(
- fileNameSeed,
- runMode == RunMode.PERFORMANCE ? ResultFileDetails.CSV_DETAILED_PERFORMANCE
- : ResultFileDetails.CSV_DETAILED_FUNCTIONAL),
- new CSVResultHandler(fileNameSeed, ResultFileDetails.CSV_AGGREGATE_PERFORMANCE)
- ));
+ new CSVResultHandler(fileNameSeed, runMode == RunMode.PERFORMANCE ?
+ ResultFileDetails.CSV_DETAILED_PERFORMANCE :
+ ResultFileDetails.CSV_DETAILED_FUNCTIONAL),
+ new CSVResultHandler(fileNameSeed, ResultFileDetails.CSV_AGGREGATE_PERFORMANCE)));
}
public ResultManager(PherfConstants.RunMode runMode, List<ResultHandler> resultHandlers) {
@@ -81,6 +77,7 @@ public class ResultManager {
/**
* Write a combined set of results for each result in the list.
+ *
* @param dataModelResults List<{@link DataModelResult > </>}
* @throws Exception
*/
@@ -89,7 +86,9 @@ public class ResultManager {
CSVResultHandler detailsCSVWriter = null;
try {
- detailsCSVWriter = new CSVResultHandler(PherfConstants.COMBINED_FILE_NAME, ResultFileDetails.CSV_DETAILED_PERFORMANCE);
+ detailsCSVWriter =
+ new CSVResultHandler(PherfConstants.COMBINED_FILE_NAME,
+ ResultFileDetails.CSV_DETAILED_PERFORMANCE);
for (DataModelResult dataModelResult : dataModelResults) {
util.write(detailsCSVWriter, dataModelResult, runMode);
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7175dcbc/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultUtil.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultUtil.java
index fd960d1..07dfa86 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultUtil.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultUtil.java
@@ -22,15 +22,16 @@ import org.apache.phoenix.pherf.PherfConstants;
import org.apache.phoenix.pherf.PherfConstants.RunMode;
import org.apache.phoenix.pherf.result.file.ResultFileDetails;
import org.apache.phoenix.pherf.result.impl.CSVResultHandler;
-import org.apache.phoenix.pherf.result.impl.ImageResultHandler;
-import org.apache.phoenix.pherf.result.impl.XMLResultHandler;
import org.apache.phoenix.pherf.util.PhoenixUtil;
-import java.io.*;
+import java.io.File;
+import java.io.IOException;
import java.text.Format;
import java.text.SimpleDateFormat;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Date;
import java.util.List;
+import java.util.Map;
public class ResultUtil {
@@ -54,7 +55,10 @@ public class ResultUtil {
List<ResultValue> rowValues = new ArrayList<>();
rowValues.add(new ResultValue(PhoenixUtil.getZookeeper()));
rowValues.addAll(writeThreadTime.getCsvRepresentation(this));
- Result result = new Result(ResultFileDetails.CSV_DETAILED_PERFORMANCE, "ZK," + dataLoadThreadTime.getCsvTitle(), rowValues);
+ Result
+ result =
+ new Result(ResultFileDetails.CSV_DETAILED_PERFORMANCE,
+ "ZK," + dataLoadThreadTime.getCsvTitle(), rowValues);
writer.write(result);
}
}
@@ -83,7 +87,10 @@ public class ResultUtil {
List<ResultValue> rowValues = new ArrayList<>();
rowValues.add(new ResultValue(PhoenixUtil.getZookeeper()));
rowValues.addAll(loadTime.getCsvRepresentation(this));
- Result result = new Result(resultFileDetails, resultFileDetails.getHeader().toString(), rowValues);
+ Result
+ result =
+ new Result(resultFileDetails, resultFileDetails.getHeader().toString(),
+ rowValues);
writer.write(result);
}
} finally {
@@ -94,23 +101,29 @@ public class ResultUtil {
}
}
- public synchronized void write(ResultHandler resultHandler, DataModelResult dataModelResult, RunMode runMode) throws Exception {
+ public synchronized void write(ResultHandler resultHandler, DataModelResult dataModelResult,
+ RunMode runMode) throws Exception {
ResultFileDetails resultFileDetails = resultHandler.getResultFileDetails();
switch (resultFileDetails) {
- case CSV_AGGREGATE_PERFORMANCE:
- case CSV_DETAILED_PERFORMANCE:
- case CSV_DETAILED_FUNCTIONAL:
- List<List<ResultValue>> rowDetails = getCSVResults(dataModelResult, resultFileDetails, runMode);
- for (List<ResultValue> row : rowDetails) {
- Result result = new Result(resultFileDetails, resultFileDetails.getHeader().toString(), row);
- resultHandler.write(result);
- }
- break;
- default:
- List<ResultValue> resultValue = new ArrayList();
- resultValue.add(new ResultValue<>(dataModelResult));
- resultHandler.write(new Result(resultFileDetails, null, resultValue));
- break;
+ case CSV_AGGREGATE_PERFORMANCE:
+ case CSV_DETAILED_PERFORMANCE:
+ case CSV_DETAILED_FUNCTIONAL:
+ List<List<ResultValue>>
+ rowDetails =
+ getCSVResults(dataModelResult, resultFileDetails, runMode);
+ for (List<ResultValue> row : rowDetails) {
+ Result
+ result =
+ new Result(resultFileDetails, resultFileDetails.getHeader().toString(),
+ row);
+ resultHandler.write(result);
+ }
+ break;
+ default:
+ List<ResultValue> resultValue = new ArrayList();
+ resultValue.add(new ResultValue<>(dataModelResult));
+ resultHandler.write(new Result(resultFileDetails, null, resultValue));
+ break;
}
}
@@ -146,40 +159,47 @@ public class ResultUtil {
return str;
}
- private List<List<ResultValue>> getCSVResults(DataModelResult dataModelResult, ResultFileDetails resultFileDetails, RunMode runMode) {
+ private List<List<ResultValue>> getCSVResults(DataModelResult dataModelResult,
+ ResultFileDetails resultFileDetails, RunMode runMode) {
List<List<ResultValue>> rowList = new ArrayList<>();
for (ScenarioResult result : dataModelResult.getScenarioResult()) {
for (QuerySetResult querySetResult : result.getQuerySetResult()) {
for (QueryResult queryResult : querySetResult.getQueryResults()) {
switch (resultFileDetails) {
- case CSV_AGGREGATE_PERFORMANCE:
- List<ResultValue> csvResult = queryResult.getCsvRepresentation(this);
- rowList.add(csvResult);
- break;
- case CSV_DETAILED_PERFORMANCE:
- case CSV_DETAILED_FUNCTIONAL:
- List<List<ResultValue>> detailedRows = queryResult.getCsvDetailedRepresentation(this, runMode);
- for (List<ResultValue> detailedRowList : detailedRows) {
- List<ResultValue> valueList = new ArrayList<>();
- valueList.add(new ResultValue(convertNull(result.getTableName())));
- valueList.add(new ResultValue(convertNull(result.getName())));
- valueList.add(new ResultValue(convertNull(dataModelResult.getZookeeper())));
- valueList.add(new ResultValue(convertNull(String.valueOf(result.getRowCount()))));
- valueList.add(new ResultValue(convertNull(String.valueOf(querySetResult.getNumberOfExecutions()))));
- valueList.add(new ResultValue(convertNull(String.valueOf(querySetResult.getExecutionType()))));
- if (result.getPhoenixProperties() != null) {
- String props = buildProperty(result);
- valueList.add(new ResultValue(convertNull(props)));
- } else {
- valueList.add(new ResultValue("null"));
- }
- valueList.addAll(detailedRowList);
- rowList.add(valueList);
+ case CSV_AGGREGATE_PERFORMANCE:
+ List<ResultValue> csvResult = queryResult.getCsvRepresentation(this);
+ rowList.add(csvResult);
+ break;
+ case CSV_DETAILED_PERFORMANCE:
+ case CSV_DETAILED_FUNCTIONAL:
+ List<List<ResultValue>>
+ detailedRows =
+ queryResult.getCsvDetailedRepresentation(this, runMode);
+ for (List<ResultValue> detailedRowList : detailedRows) {
+ List<ResultValue> valueList = new ArrayList<>();
+ valueList.add(new ResultValue(convertNull(result.getTableName())));
+ valueList.add(new ResultValue(convertNull(result.getName())));
+ valueList.add(new ResultValue(
+ convertNull(dataModelResult.getZookeeper())));
+ valueList.add(new ResultValue(
+ convertNull(String.valueOf(result.getRowCount()))));
+ valueList.add(new ResultValue(convertNull(
+ String.valueOf(querySetResult.getNumberOfExecutions()))));
+ valueList.add(new ResultValue(convertNull(
+ String.valueOf(querySetResult.getExecutionType()))));
+ if (result.getPhoenixProperties() != null) {
+ String props = buildProperty(result);
+ valueList.add(new ResultValue(convertNull(props)));
+ } else {
+ valueList.add(new ResultValue("null"));
}
- break;
- default:
- break;
+ valueList.addAll(detailedRowList);
+ rowList.add(valueList);
+ }
+ break;
+ default:
+ break;
}
}
}
@@ -192,8 +212,7 @@ public class ResultUtil {
boolean firstPartialSeparator = true;
for (Map.Entry<String, String> entry : result.getPhoenixProperties().entrySet()) {
- if (!firstPartialSeparator)
- sb.append("|");
+ if (!firstPartialSeparator) sb.append("|");
firstPartialSeparator = false;
sb.append(entry.getKey() + "=" + entry.getValue());
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7175dcbc/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultValue.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultValue.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultValue.java
index 38abd65..78364d9 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultValue.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultValue.java
@@ -20,6 +20,7 @@ package org.apache.phoenix.pherf.result;
/**
* Generic box container for a result value. This class allows for writing results of any type easily
+ *
* @param <T>
*/
public class ResultValue<T> {
@@ -33,8 +34,7 @@ public class ResultValue<T> {
return resultValue;
}
- @Override
- public String toString() {
+ @Override public String toString() {
return resultValue.toString();
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7175dcbc/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/RunTime.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/RunTime.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/RunTime.java
index 690f7e6..3aa45fa 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/RunTime.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/RunTime.java
@@ -18,104 +18,91 @@
package org.apache.phoenix.pherf.result;
+import javax.xml.bind.annotation.XmlAttribute;
import java.util.Comparator;
import java.util.Date;
-import javax.xml.bind.annotation.XmlAttribute;
-
public class RunTime implements Comparator<RunTime>, Comparable<RunTime> {
- private Date startTime;
- private Integer elapsedDurationInMs;
- private String message;
- private Long resultRowCount;
- private String explainPlan;
-
- @SuppressWarnings("unused")
- public RunTime() {
- }
-
- @SuppressWarnings("unused")
- public RunTime(Integer elapsedDurationInMs) {
- this(null, elapsedDurationInMs);
- }
-
- public RunTime(Long resultRowCount, Integer elapsedDurationInMs) {
- this(null, resultRowCount, elapsedDurationInMs);
- }
-
- public RunTime(Date startTime, Long resultRowCount, Integer elapsedDurationInMs) {
- this(null, null, startTime, resultRowCount, elapsedDurationInMs);
- }
-
- public RunTime(String message, Date startTime, Long resultRowCount, Integer elapsedDurationInMs) {
- this(message, null, startTime, resultRowCount, elapsedDurationInMs);
- }
-
- public RunTime(String message, String explainPlan, Date startTime, Long resultRowCount, Integer elapsedDurationInMs) {
- this.elapsedDurationInMs = elapsedDurationInMs;
- this.startTime = startTime;
- this.resultRowCount = resultRowCount;
- this.message = message;
- this.explainPlan = explainPlan;
- }
-
- @XmlAttribute()
- public Date getStartTime() {
- return startTime;
- }
-
- @SuppressWarnings("unused")
- public void setStartTime(Date startTime) {
- this.startTime = startTime;
- }
-
- @XmlAttribute()
- public Integer getElapsedDurationInMs() {
- return elapsedDurationInMs;
- }
-
- @SuppressWarnings("unused")
- public void setElapsedDurationInMs(Integer elapsedDurationInMs) {
- this.elapsedDurationInMs = elapsedDurationInMs;
- }
-
- @Override
- public int compare(RunTime r1, RunTime r2) {
- return r1.getElapsedDurationInMs().compareTo(r2.getElapsedDurationInMs());
- }
-
- @Override
- public int compareTo(RunTime o) {
- return compare(this, o);
- }
-
- @XmlAttribute()
- public String getMessage() {
- return message;
- }
-
- @SuppressWarnings("unused")
- public void setMessage(String message) {
- this.message = message;
- }
-
- @XmlAttribute()
- public String getExplainPlan() {
- return explainPlan;
- }
-
- @SuppressWarnings("unused")
- public void setExplainPlan(String explainPlan) {
- this.explainPlan = explainPlan;
- }
-
- @XmlAttribute()
- public Long getResultRowCount() {
- return resultRowCount;
- }
-
- @SuppressWarnings("unused")
- public void setResultRowCount(Long resultRowCount) {
- this.resultRowCount = resultRowCount;
- }
+ private Date startTime;
+ private Integer elapsedDurationInMs;
+ private String message;
+ private Long resultRowCount;
+ private String explainPlan;
+
+ @SuppressWarnings("unused") public RunTime() {
+ }
+
+ @SuppressWarnings("unused") public RunTime(Integer elapsedDurationInMs) {
+ this(null, elapsedDurationInMs);
+ }
+
+ public RunTime(Long resultRowCount, Integer elapsedDurationInMs) {
+ this(null, resultRowCount, elapsedDurationInMs);
+ }
+
+ public RunTime(Date startTime, Long resultRowCount, Integer elapsedDurationInMs) {
+ this(null, null, startTime, resultRowCount, elapsedDurationInMs);
+ }
+
+ public RunTime(String message, Date startTime, Long resultRowCount,
+ Integer elapsedDurationInMs) {
+ this(message, null, startTime, resultRowCount, elapsedDurationInMs);
+ }
+
+ public RunTime(String message, String explainPlan, Date startTime, Long resultRowCount,
+ Integer elapsedDurationInMs) {
+ this.elapsedDurationInMs = elapsedDurationInMs;
+ this.startTime = startTime;
+ this.resultRowCount = resultRowCount;
+ this.message = message;
+ this.explainPlan = explainPlan;
+ }
+
+ @XmlAttribute() public Date getStartTime() {
+ return startTime;
+ }
+
+ @SuppressWarnings("unused") public void setStartTime(Date startTime) {
+ this.startTime = startTime;
+ }
+
+ @XmlAttribute() public Integer getElapsedDurationInMs() {
+ return elapsedDurationInMs;
+ }
+
+ @SuppressWarnings("unused") public void setElapsedDurationInMs(Integer elapsedDurationInMs) {
+ this.elapsedDurationInMs = elapsedDurationInMs;
+ }
+
+ @Override public int compare(RunTime r1, RunTime r2) {
+ return r1.getElapsedDurationInMs().compareTo(r2.getElapsedDurationInMs());
+ }
+
+ @Override public int compareTo(RunTime o) {
+ return compare(this, o);
+ }
+
+ @XmlAttribute() public String getMessage() {
+ return message;
+ }
+
+ @SuppressWarnings("unused") public void setMessage(String message) {
+ this.message = message;
+ }
+
+ @XmlAttribute() public String getExplainPlan() {
+ return explainPlan;
+ }
+
+ @SuppressWarnings("unused") public void setExplainPlan(String explainPlan) {
+ this.explainPlan = explainPlan;
+ }
+
+ @XmlAttribute() public Long getResultRowCount() {
+ return resultRowCount;
+ }
+
+ @SuppressWarnings("unused") public void setResultRowCount(Long resultRowCount) {
+ this.resultRowCount = resultRowCount;
+ }
}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7175dcbc/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ScenarioResult.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ScenarioResult.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ScenarioResult.java
index b57e424..9cac1c7 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ScenarioResult.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ScenarioResult.java
@@ -18,31 +18,31 @@
package org.apache.phoenix.pherf.result;
+import org.apache.phoenix.pherf.configuration.Scenario;
+
import java.util.ArrayList;
import java.util.List;
-import org.apache.phoenix.pherf.configuration.Scenario;
public class ScenarioResult extends Scenario {
- private List<QuerySetResult> querySetResult = new ArrayList<QuerySetResult>();
-
- public List<QuerySetResult> getQuerySetResult() {
- return querySetResult;
- }
-
- @SuppressWarnings("unused")
- public void setQuerySetResult(List<QuerySetResult> querySetResult) {
- this.querySetResult = querySetResult;
- }
-
- public ScenarioResult() {
- }
-
- public ScenarioResult(Scenario scenario) {
- this.setDataOverride(scenario.getDataOverride());
- this.setPhoenixProperties(scenario.getPhoenixProperties());
- this.setRowCount(scenario.getRowCount());
- this.setTableName(scenario.getTableName());
- this.setName(scenario.getName());
- }
+ private List<QuerySetResult> querySetResult = new ArrayList<>();
+
+ public List<QuerySetResult> getQuerySetResult() {
+ return querySetResult;
+ }
+
+ @SuppressWarnings("unused") public void setQuerySetResult(List<QuerySetResult> querySetResult) {
+ this.querySetResult = querySetResult;
+ }
+
+ public ScenarioResult() {
+ }
+
+ public ScenarioResult(Scenario scenario) {
+ this.setDataOverride(scenario.getDataOverride());
+ this.setPhoenixProperties(scenario.getPhoenixProperties());
+ this.setRowCount(scenario.getRowCount());
+ this.setTableName(scenario.getTableName());
+ this.setName(scenario.getName());
+ }
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7175dcbc/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ThreadTime.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ThreadTime.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ThreadTime.java
index f043bec..03b5664 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ThreadTime.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ThreadTime.java
@@ -18,13 +18,12 @@
package org.apache.phoenix.pherf.result;
+import javax.xml.bind.annotation.XmlAttribute;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Date;
import java.util.List;
-import javax.xml.bind.annotation.XmlAttribute;
-
public class ThreadTime {
private List<RunTime> runTimesInMs = Collections.synchronizedList(new ArrayList<RunTime>());
private String threadName;
@@ -84,23 +83,22 @@ public class ThreadTime {
return Collections.max(getRunTimesInMs());
}
- @XmlAttribute()
- public String getThreadName() {
+ @XmlAttribute() public String getThreadName() {
return threadName;
}
public void setThreadName(String threadName) {
this.threadName = threadName;
}
-
+
private String parseThreadName(boolean getConcurrency) {
- if (getThreadName() == null || !getThreadName().contains(",")) return null;
- String[] threadNameSet = getThreadName().split(",");
- if (getConcurrency) {
- return threadNameSet[1];}
- else {
- return threadNameSet[0];
- }
+ if (getThreadName() == null || !getThreadName().contains(",")) return null;
+ String[] threadNameSet = getThreadName().split(",");
+ if (getConcurrency) {
+ return threadNameSet[1];
+ } else {
+ return threadNameSet[0];
+ }
}
public List<List<ResultValue>> getCsvPerformanceRepresentation(ResultUtil util) {
@@ -110,11 +108,14 @@ public class ThreadTime {
List<ResultValue> rowValues = new ArrayList(getRunTimesInMs().size());
rowValues.add(new ResultValue(util.convertNull(parseThreadName(false))));
rowValues.add(new ResultValue(util.convertNull(parseThreadName(true))));
- rowValues.add(new ResultValue(String.valueOf(getRunTimesInMs().get(i).getResultRowCount())));
+ rowValues.add(new ResultValue(
+ String.valueOf(getRunTimesInMs().get(i).getResultRowCount())));
if (getRunTimesInMs().get(i).getMessage() == null) {
- rowValues.add(new ResultValue(util.convertNull(String.valueOf(getRunTimesInMs().get(i).getElapsedDurationInMs()))));
+ rowValues.add(new ResultValue(util.convertNull(
+ String.valueOf(getRunTimesInMs().get(i).getElapsedDurationInMs()))));
} else {
- rowValues.add(new ResultValue(util.convertNull(getRunTimesInMs().get(i).getMessage())));
+ rowValues.add(new ResultValue(
+ util.convertNull(getRunTimesInMs().get(i).getMessage())));
}
rows.add(rowValues);
}
@@ -129,7 +130,8 @@ public class ThreadTime {
rowValues.add(new ResultValue(util.convertNull(parseThreadName(false))));
rowValues.add(new ResultValue(util.convertNull(parseThreadName(true))));
rowValues.add(new ResultValue(util.convertNull(getRunTimesInMs().get(i).getMessage())));
- rowValues.add(new ResultValue(util.convertNull(getRunTimesInMs().get(i).getExplainPlan())));
+ rowValues.add(new ResultValue(
+ util.convertNull(getRunTimesInMs().get(i).getExplainPlan())));
rows.add(rowValues);
}
return rows;
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7175dcbc/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/file/Extension.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/file/Extension.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/file/Extension.java
index 0df383c..e6a7308 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/file/Extension.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/file/Extension.java
@@ -31,8 +31,7 @@ public enum Extension {
this.extension = extension;
}
- @Override
- public String toString() {
+ @Override public String toString() {
return extension;
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7175dcbc/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/file/Header.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/file/Header.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/file/Header.java
index 98e7b30..15e2b9a 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/file/Header.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/file/Header.java
@@ -20,9 +20,11 @@ package org.apache.phoenix.pherf.result.file;
public enum Header {
EMPTY(""),
- AGGREGATE_PERFORMANCE("START_TIME,QUERY_GROUP,QUERY,TENANT_ID,AVG_MAX_TIME_MS,AVG_TIME_MS,AVG_MIN_TIME_MS,RUN_COUNT"),
- DETAILED_BASE("BASE_TABLE_NAME,SCENARIO_NAME,ZOOKEEPER,ROW_COUNT,EXECUTION_COUNT,EXECUTION_TYPE,PHOENIX_PROPERTIES"
- + ",START_TIME,QUERY_GROUP,QUERY,TENANT_ID,THREAD_NUMBER,CONCURRENCY_LEVEL"),
+ AGGREGATE_PERFORMANCE(
+ "START_TIME,QUERY_GROUP,QUERY,TENANT_ID,AVG_MAX_TIME_MS,AVG_TIME_MS,AVG_MIN_TIME_MS,RUN_COUNT"),
+ DETAILED_BASE(
+ "BASE_TABLE_NAME,SCENARIO_NAME,ZOOKEEPER,ROW_COUNT,EXECUTION_COUNT,EXECUTION_TYPE,PHOENIX_PROPERTIES"
+ + ",START_TIME,QUERY_GROUP,QUERY,TENANT_ID,THREAD_NUMBER,CONCURRENCY_LEVEL"),
DETAILED_PERFORMANCE(DETAILED_BASE + ",RESULT_ROW_COUNT,RUN_TIME_MS"),
DETAILED_FUNCTIONAL(DETAILED_BASE + ",DIFF_STATUS,EXPLAIN_PLAN"),
AGGREGATE_DATA_LOAD("ZK,TABLE_NAME,ROW_COUNT,LOAD_DURATION_IN_MS"),
@@ -34,8 +36,7 @@ public enum Header {
this.header = header;
}
- @Override
- public String toString() {
+ @Override public String toString() {
return header;
}
}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7175dcbc/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/impl/CSVResultHandler.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/impl/CSVResultHandler.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/impl/CSVResultHandler.java
index e7fbb48..e69f600 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/impl/CSVResultHandler.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/impl/CSVResultHandler.java
@@ -18,13 +18,6 @@
package org.apache.phoenix.pherf.result.impl;
-import java.io.File;
-import java.io.IOException;
-import java.io.PrintWriter;
-import java.nio.charset.Charset;
-import java.util.ArrayList;
-import java.util.List;
-
import org.apache.commons.csv.CSVFormat;
import org.apache.commons.csv.CSVParser;
import org.apache.commons.csv.CSVPrinter;
@@ -36,6 +29,13 @@ import org.apache.phoenix.pherf.result.ResultUtil;
import org.apache.phoenix.pherf.result.ResultValue;
import org.apache.phoenix.pherf.result.file.ResultFileDetails;
+import java.io.File;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.nio.charset.Charset;
+import java.util.ArrayList;
+import java.util.List;
+
/**
* TODO Doc this class. Note that each instance that has a non unique file name will overwrite the last
*/
@@ -51,22 +51,22 @@ public class CSVResultHandler implements ResultHandler {
this(resultFileName, resultFileDetails, true);
}
- public CSVResultHandler(String resultFileName, ResultFileDetails resultFileDetails, boolean generateFullFileName) {
+ public CSVResultHandler(String resultFileName, ResultFileDetails resultFileDetails,
+ boolean generateFullFileName) {
this.util = new ResultUtil();
PherfConstants constants = PherfConstants.create();
String resultDir = constants.getProperty("pherf.default.results.dir");
- this.resultFileName = generateFullFileName ?
- resultDir + PherfConstants.PATH_SEPARATOR
- + PherfConstants.RESULT_PREFIX
- + resultFileName + util.getSuffix()
- + resultFileDetails.getExtension().toString()
- : resultFileName;
+ this.resultFileName =
+ generateFullFileName ?
+ resultDir + PherfConstants.PATH_SEPARATOR + PherfConstants.RESULT_PREFIX
+ + resultFileName + util.getSuffix() + resultFileDetails
+ .getExtension().toString() :
+ resultFileName;
this.resultFileDetails = resultFileDetails;
}
- @Override
- public synchronized void write(Result result) throws IOException {
+ @Override public synchronized void write(Result result) throws IOException {
util.ensureBaseResultDirExists();
open(result);
@@ -74,15 +74,13 @@ public class CSVResultHandler implements ResultHandler {
flush();
}
- @Override
- public synchronized void flush() throws IOException {
+ @Override public synchronized void flush() throws IOException {
if (csvPrinter != null) {
csvPrinter.flush();
}
}
- @Override
- public synchronized void close() throws IOException {
+ @Override public synchronized void close() throws IOException {
if (csvPrinter != null) {
csvPrinter.flush();
csvPrinter.close();
@@ -90,8 +88,7 @@ public class CSVResultHandler implements ResultHandler {
}
}
- @Override
- public synchronized List<Result> read() throws IOException {
+ @Override public synchronized List<Result> read() throws IOException {
CSVParser parser = null;
util.ensureBaseResultDirExists();
try {
@@ -131,13 +128,11 @@ public class CSVResultHandler implements ResultHandler {
isClosed = false;
}
- @Override
- public synchronized boolean isClosed() {
+ @Override public synchronized boolean isClosed() {
return isClosed;
}
- @Override
- public ResultFileDetails getResultFileDetails() {
+ @Override public ResultFileDetails getResultFileDetails() {
return resultFileDetails;
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7175dcbc/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/impl/ImageResultHandler.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/impl/ImageResultHandler.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/impl/ImageResultHandler.java
index ad3c8fb..5c3eac1 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/impl/ImageResultHandler.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/impl/ImageResultHandler.java
@@ -19,8 +19,8 @@
package org.apache.phoenix.pherf.result.impl;
import org.apache.phoenix.pherf.PherfConstants;
-import org.apache.phoenix.pherf.result.file.ResultFileDetails;
import org.apache.phoenix.pherf.result.*;
+import org.apache.phoenix.pherf.result.file.ResultFileDetails;
import org.jfree.chart.ChartFactory;
import org.jfree.chart.ChartUtilities;
import org.jfree.chart.JFreeChart;
@@ -42,22 +42,22 @@ public class ImageResultHandler implements ResultHandler {
this(resultFileName, resultFileDetails, true);
}
- public ImageResultHandler(String resultFileName, ResultFileDetails resultFileDetails, boolean generateFullFileName) {
+ public ImageResultHandler(String resultFileName, ResultFileDetails resultFileDetails,
+ boolean generateFullFileName) {
ResultUtil util = new ResultUtil();
PherfConstants constants = PherfConstants.create();
String resultDir = constants.getProperty("pherf.default.results.dir");
- this.resultFileName = generateFullFileName ?
- resultDir + PherfConstants.PATH_SEPARATOR
- + PherfConstants.RESULT_PREFIX
- + resultFileName + util.getSuffix()
- + resultFileDetails.getExtension().toString()
- : resultFileName;
+ this.resultFileName =
+ generateFullFileName ?
+ resultDir + PherfConstants.PATH_SEPARATOR + PherfConstants.RESULT_PREFIX
+ + resultFileName + util.getSuffix() + resultFileDetails
+ .getExtension().toString() :
+ resultFileName;
this.resultFileDetails = resultFileDetails;
}
- @Override
- public synchronized void write(Result result) throws Exception {
+ @Override public synchronized void write(Result result) throws Exception {
TimeSeriesCollection timeSeriesCollection = new TimeSeriesCollection();
int rowCount = 0;
int maxLegendCount = 20;
@@ -70,12 +70,16 @@ public class ImageResultHandler implements ResultHandler {
for (QuerySetResult querySetResult : scenarioResult.getQuerySetResult()) {
for (QueryResult queryResult : querySetResult.getQueryResults()) {
for (ThreadTime tt : queryResult.getThreadTimes()) {
- TimeSeries timeSeries = new TimeSeries(queryResult.getStatement() + " :: " + tt.getThreadName());
+ TimeSeries
+ timeSeries =
+ new TimeSeries(
+ queryResult.getStatement() + " :: " + tt.getThreadName());
rowCount++;
synchronized (tt.getRunTimesInMs()) {
for (RunTime rt : tt.getRunTimesInMs()) {
if (rt.getStartTime() != null) {
- timeSeries.add(new Millisecond(rt.getStartTime()), rt.getElapsedDurationInMs());
+ timeSeries.add(new Millisecond(rt.getStartTime()),
+ rt.getElapsedDurationInMs());
}
}
}
@@ -85,10 +89,14 @@ public class ImageResultHandler implements ResultHandler {
}
}
boolean legend = rowCount > maxLegendCount ? false : true;
- JFreeChart chart = ChartFactory.createTimeSeriesChart(dataModelResult.getName()
- , "Time", "Query Time (ms)", timeSeriesCollection,
- legend, true, false);
- StandardXYItemRenderer renderer = new StandardXYItemRenderer(StandardXYItemRenderer.SHAPES_AND_LINES);
+ JFreeChart
+ chart =
+ ChartFactory
+ .createTimeSeriesChart(dataModelResult.getName(), "Time", "Query Time (ms)",
+ timeSeriesCollection, legend, true, false);
+ StandardXYItemRenderer
+ renderer =
+ new StandardXYItemRenderer(StandardXYItemRenderer.SHAPES_AND_LINES);
chart.getXYPlot().setRenderer(renderer);
chart.getXYPlot().setBackgroundPaint(Color.WHITE);
chart.getXYPlot().setRangeGridlinePaint(Color.BLACK);
@@ -96,35 +104,31 @@ public class ImageResultHandler implements ResultHandler {
chart.getXYPlot().getRenderer().setSeriesStroke(i, new BasicStroke(3f));
}
try {
- ChartUtilities.saveChartAsJPEG(new File(resultFileName), chart, chartDimension, chartDimension);
+ ChartUtilities.saveChartAsJPEG(new File(resultFileName), chart, chartDimension,
+ chartDimension);
} catch (IOException e) {
e.printStackTrace();
}
}
- @Override
- public synchronized void flush() throws Exception {
+ @Override public synchronized void flush() throws Exception {
}
- @Override
- public synchronized void close() throws Exception {
+ @Override public synchronized void close() throws Exception {
}
- @Override
- public List<Result> read() throws Exception {
+ @Override public List<Result> read() throws Exception {
return null;
}
- @Override
- public boolean isClosed() {
+ @Override public boolean isClosed() {
return false;
}
- @Override
- public ResultFileDetails getResultFileDetails() {
+ @Override public ResultFileDetails getResultFileDetails() {
return resultFileDetails;
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7175dcbc/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/impl/XMLResultHandler.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/impl/XMLResultHandler.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/impl/XMLResultHandler.java
index 8a913ed..009ae21 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/impl/XMLResultHandler.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/impl/XMLResultHandler.java
@@ -19,8 +19,8 @@
package org.apache.phoenix.pherf.result.impl;
import org.apache.phoenix.pherf.PherfConstants;
-import org.apache.phoenix.pherf.result.file.ResultFileDetails;
import org.apache.phoenix.pherf.result.*;
+import org.apache.phoenix.pherf.result.file.ResultFileDetails;
import javax.xml.bind.JAXBContext;
import javax.xml.bind.Marshaller;
@@ -30,7 +30,6 @@ import java.io.FileOutputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
-import java.util.Properties;
public class XMLResultHandler implements ResultHandler {
private final String resultFileName;
@@ -40,22 +39,22 @@ public class XMLResultHandler implements ResultHandler {
this(resultFileName, resultFileDetails, true);
}
- public XMLResultHandler(String resultFileName, ResultFileDetails resultFileDetails, boolean generateFullFileName) {
+ public XMLResultHandler(String resultFileName, ResultFileDetails resultFileDetails,
+ boolean generateFullFileName) {
ResultUtil util = new ResultUtil();
PherfConstants constants = PherfConstants.create();
String resultDir = constants.getProperty("pherf.default.results.dir");
- this.resultFileName = generateFullFileName ?
- resultDir + PherfConstants.PATH_SEPARATOR
- + PherfConstants.RESULT_PREFIX
- + resultFileName + util.getSuffix()
- + resultFileDetails.getExtension().toString()
- : resultFileName;
+ this.resultFileName =
+ generateFullFileName ?
+ resultDir + PherfConstants.PATH_SEPARATOR + PherfConstants.RESULT_PREFIX
+ + resultFileName + util.getSuffix() + resultFileDetails
+ .getExtension().toString() :
+ resultFileName;
this.resultFileDetails = resultFileDetails;
}
- @Override
- public synchronized void write(Result result) throws Exception {
+ @Override public synchronized void write(Result result) throws Exception {
FileOutputStream os = null;
JAXBContext jaxbContext = JAXBContext.newInstance(DataModelResult.class);
Marshaller jaxbMarshaller = jaxbContext.createMarshaller();
@@ -72,18 +71,15 @@ public class XMLResultHandler implements ResultHandler {
}
}
- @Override
- public synchronized void flush() throws IOException {
+ @Override public synchronized void flush() throws IOException {
return;
}
- @Override
- public synchronized void close() throws IOException {
+ @Override public synchronized void close() throws IOException {
return;
}
- @Override
- public synchronized List<Result> read() throws Exception {
+ @Override public synchronized List<Result> read() throws Exception {
JAXBContext jaxbContext = JAXBContext.newInstance(DataModelResult.class);
Unmarshaller jaxbUnmarshaller = jaxbContext.createUnmarshaller();
@@ -95,13 +91,11 @@ public class XMLResultHandler implements ResultHandler {
return results;
}
- @Override
- public boolean isClosed() {
+ @Override public boolean isClosed() {
return true;
}
- @Override
- public ResultFileDetails getResultFileDetails() {
+ @Override public ResultFileDetails getResultFileDetails() {
return resultFileDetails;
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7175dcbc/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/schema/SchemaReader.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/schema/SchemaReader.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/schema/SchemaReader.java
index 4761211..439f87e 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/schema/SchemaReader.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/schema/SchemaReader.java
@@ -45,7 +45,7 @@ public class SchemaReader {
* @throws Exception
*/
public SchemaReader(final String searchPattern) throws Exception {
- this(new PhoenixUtil(), searchPattern);
+ this(PhoenixUtil.create(), searchPattern);
}
public SchemaReader(PhoenixUtil util, final String searchPattern) throws Exception {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7175dcbc/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/util/PhoenixUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/util/PhoenixUtil.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/util/PhoenixUtil.java
index 83e324d..0156149 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/util/PhoenixUtil.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/util/PhoenixUtil.java
@@ -30,6 +30,8 @@ import java.util.*;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.phoenix.pherf.configuration.Query;
+import org.apache.phoenix.pherf.configuration.QuerySet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -39,15 +41,25 @@ public class PhoenixUtil {
private static String zookeeper;
private static int rowCountOverride = 0;
private boolean testEnabled;
+ private static PhoenixUtil instance;
- public PhoenixUtil() {
+ private PhoenixUtil() {
this(false);
}
- public PhoenixUtil(final boolean testEnabled) {
+ private PhoenixUtil(final boolean testEnabled) {
this.testEnabled = testEnabled;
}
+ public static PhoenixUtil create() {
+ return create(false);
+ }
+
+ public static PhoenixUtil create(final boolean testEnabled) {
+ instance = instance != null ? instance : new PhoenixUtil(testEnabled);
+ return instance;
+ }
+
public Connection getConnection() throws Exception{
return getConnection(null);
}
@@ -56,7 +68,7 @@ public class PhoenixUtil {
return getConnection(tenantId, testEnabled);
}
- public Connection getConnection(String tenantId, boolean testEnabled) throws Exception {
+ private Connection getConnection(String tenantId, boolean testEnabled) throws Exception {
if (null == zookeeper) {
throw new IllegalArgumentException(
"Zookeeper must be set before initializing connection!");
@@ -115,17 +127,6 @@ public class PhoenixUtil {
return result;
}
- @SuppressWarnings("unused")
- public ResultSet executeQuery(PreparedStatement preparedStatement, Connection connection) {
- ResultSet resultSet = null;
- try {
- resultSet = preparedStatement.executeQuery();
- } catch (SQLException e) {
- e.printStackTrace();
- }
- return resultSet;
- }
-
/**
* Delete existing tables with schema name set as {@link PherfConstants#PHERF_SCHEMA_NAME} with regex comparison
*
@@ -133,14 +134,14 @@ public class PhoenixUtil {
* @throws SQLException
* @throws Exception
*/
- public void deleteTables(String regexMatch) throws SQLException, Exception {
+ public void deleteTables(String regexMatch) throws Exception {
regexMatch = regexMatch.toUpperCase().replace("ALL", ".*");
Connection conn = getConnection();
try {
ResultSet resultSet = getTableMetaData(PherfConstants.PHERF_SCHEMA_NAME, null, conn);
while (resultSet.next()) {
- String tableName = resultSet.getString("TABLE_SCHEM") == null ? resultSet.getString("TABLE_NAME") :
- resultSet.getString("TABLE_SCHEM") + "." + resultSet.getString("TABLE_NAME");
+ String tableName = resultSet.getString("TABLE_SCHEMA") == null ? resultSet.getString("TABLE_NAME") :
+ resultSet.getString("TABLE_SCHEMA") + "." + resultSet.getString("TABLE_NAME");
if (tableName.matches(regexMatch)) {
logger.info("\nDropping " + tableName);
executeStatement("DROP TABLE " + tableName + " CASCADE", conn);
@@ -183,8 +184,33 @@ public class PhoenixUtil {
return Collections.unmodifiableList(columnList);
}
-
- public static String getZookeeper() {
+
+ /**
+ * Execute all querySet DDLs first based on tenantId if specified. This is executed
+ * first since we don't want to run DDLs in parallel to executing queries.
+ *
+ * @param querySet
+ * @throws Exception
+ */
+ public void executeQuerySetDdls(QuerySet querySet) throws Exception {
+ for (Query query : querySet.getQuery()) {
+ if (null != query.getDdl()) {
+ Connection conn = null;
+ try {
+ logger.info("\nExecuting DDL:" + query.getDdl() + " on tenantId:" + query
+ .getTenantId());
+ executeStatement(query.getDdl(),
+ conn = getConnection(query.getTenantId()));
+ } finally {
+ if (null != conn) {
+ conn.close();
+ }
+ }
+ }
+ }
+ }
+
+ public static String getZookeeper() {
return zookeeper;
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7175dcbc/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/MultiThreadedRunner.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/MultiThreadedRunner.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/MultiThreadedRunner.java
new file mode 100644
index 0000000..efb3da9
--- /dev/null
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/MultiThreadedRunner.java
@@ -0,0 +1,153 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.phoenix.pherf.workload;
+
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.util.Calendar;
+import java.util.Date;
+
+import org.apache.phoenix.pherf.PherfConstants.RunMode;
+
+import org.apache.phoenix.pherf.result.DataModelResult;
+import org.apache.phoenix.pherf.result.ResultManager;
+import org.apache.phoenix.pherf.result.RunTime;
+import org.apache.phoenix.pherf.result.ThreadTime;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.phoenix.pherf.configuration.Query;
+import org.apache.phoenix.pherf.util.PhoenixUtil;
+
+class MultiThreadedRunner implements Runnable {
+ private static final Logger logger = LoggerFactory.getLogger(MultiThreadedRunner.class);
+ private Query query;
+ private ThreadTime threadTime;
+ private PhoenixUtil pUtil = PhoenixUtil.create();
+ private String threadName;
+ private DataModelResult dataModelResult;
+ private long numberOfExecutions;
+ private long executionDurationInMs;
+ private static long lastResultWritten = System.currentTimeMillis() - 1000;
+ private final ResultManager resultManager;
+
+ /**
+ * MultiThreadedRunner
+ *
+ * @param threadName
+ * @param query
+ * @param dataModelResult
+ * @param threadTime
+ * @param numberOfExecutions
+ * @param executionDurationInMs
+ */
+ MultiThreadedRunner(String threadName, Query query, DataModelResult dataModelResult,
+ ThreadTime threadTime, long numberOfExecutions, long executionDurationInMs) {
+ this.query = query;
+ this.threadName = threadName;
+ this.threadTime = threadTime;
+ this.dataModelResult = dataModelResult;
+ this.numberOfExecutions = numberOfExecutions;
+ this.executionDurationInMs = executionDurationInMs;
+ this.resultManager = new ResultManager(dataModelResult.getName(), RunMode.PERFORMANCE);
+ }
+
+ /**
+ * Executes run for a minimum of number of execution or execution duration
+ */
+ public void run() {
+ logger.info("\n\nThread Starting " + threadName + " ; " + query.getStatement() + " for "
+ + numberOfExecutions + "times\n\n");
+ Long start = System.currentTimeMillis();
+ for (long i = numberOfExecutions; (i > 0 && ((System.currentTimeMillis() - start)
+ < executionDurationInMs)); i--) {
+ try {
+ synchronized (resultManager) {
+ timedQuery();
+ if ((System.currentTimeMillis() - lastResultWritten) > 1000) {
+ resultManager.write(dataModelResult);
+ lastResultWritten = System.currentTimeMillis();
+ }
+ }
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+ }
+ logger.info("\n\nThread exiting." + threadName + "\n\n");
+ }
+
+ private synchronized ThreadTime getThreadTime() {
+ return threadTime;
+ }
+
+ /**
+ * Timed query execution
+ *
+ * @throws Exception
+ */
+ private void timedQuery() throws Exception {
+ boolean
+ isSelectCountStatement =
+ query.getStatement().toUpperCase().trim().contains("COUNT(*)") ? true : false;
+
+ Connection conn = null;
+ PreparedStatement statement = null;
+ ResultSet rs = null;
+ Long start = System.currentTimeMillis();
+ Date startDate = Calendar.getInstance().getTime();
+ String exception = null;
+ long resultRowCount = 0;
+
+ try {
+ conn = pUtil.getConnection(query.getTenantId());
+ statement = conn.prepareStatement(query.getStatement());
+ boolean isQuery = statement.execute();
+ if (isQuery) {
+ rs = statement.getResultSet();
+ while (rs.next()) {
+ if (null != query.getExpectedAggregateRowCount()) {
+ if (rs.getLong(1) != query.getExpectedAggregateRowCount())
+ throw new RuntimeException(
+ "Aggregate count " + rs.getLong(1) + " does not match expected "
+ + query.getExpectedAggregateRowCount());
+ }
+
+ if (isSelectCountStatement) {
+ resultRowCount = rs.getLong(1);
+ } else {
+ resultRowCount++;
+ }
+ }
+ } else {
+ conn.commit();
+ }
+ } catch (Exception e) {
+ e.printStackTrace();
+ exception = e.getMessage();
+ } finally {
+ getThreadTime().getRunTimesInMs().add(new RunTime(exception, startDate, resultRowCount,
+ (int) (System.currentTimeMillis() - start)));
+
+ if (rs != null) rs.close();
+ if (statement != null) statement.close();
+ if (conn != null) conn.close();
+ }
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7175dcbc/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/MultithreadedDiffer.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/MultithreadedDiffer.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/MultithreadedDiffer.java
index c78db90..1735754 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/MultithreadedDiffer.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/MultithreadedDiffer.java
@@ -30,84 +30,69 @@ import org.apache.phoenix.pherf.result.RunTime;
import org.apache.phoenix.pherf.result.ThreadTime;
class MultithreadedDiffer implements Runnable {
- private static final Logger logger = LoggerFactory
- .getLogger(MultithreadedRunner.class);
- private Thread t;
- private Query query;
- private ThreadTime threadTime;
- private String threadName;
- private long numberOfExecutions;
- private long executionDurationInMs;
- private QueryVerifier queryVerifier = new QueryVerifier(true);
+ private static final Logger logger = LoggerFactory.getLogger(MultiThreadedRunner.class);
+ private Thread t;
+ private Query query;
+ private ThreadTime threadTime;
+ private String threadName;
+ private long numberOfExecutions;
+ private long executionDurationInMs;
+ private QueryVerifier queryVerifier = new QueryVerifier(true);
- private synchronized ThreadTime getThreadTime() {
+ private synchronized ThreadTime getThreadTime() {
return threadTime;
}
/**
- * Query Verification
- * @throws Exception
- */
- private void diffQuery() throws Exception {
- Long start = System.currentTimeMillis();
- Date startDate = Calendar.getInstance().getTime();
- String newCSV = queryVerifier.exportCSV(query);
- boolean verifyResult = queryVerifier.doDiff(query, newCSV);
- String explainPlan = queryVerifier.getExplainPlan(query);
- getThreadTime().getRunTimesInMs().add(
- new RunTime(verifyResult == true ? PherfConstants.DIFF_PASS : PherfConstants.DIFF_FAIL,
- explainPlan, startDate, -1L,
- (int)(System.currentTimeMillis() - start)));
- }
-
- /**
- * Multithreaded Differ
- * @param threadName
- * @param query
- * @param threadName
- * @param threadTime
- * @param numberOfExecutions
- * @param executionDurationInMs
- */
- MultithreadedDiffer(String threadName,
- Query query,
- ThreadTime threadTime,
- long numberOfExecutions,
- long executionDurationInMs) {
- this.query = query;
- this.threadName = threadName;
- this.threadTime = threadTime;
- this.numberOfExecutions = numberOfExecutions;
- this.executionDurationInMs = executionDurationInMs;
- }
+ * Query Verification
+ *
+ * @throws Exception
+ */
+ private void diffQuery() throws Exception {
+ Long start = System.currentTimeMillis();
+ Date startDate = Calendar.getInstance().getTime();
+ String newCSV = queryVerifier.exportCSV(query);
+ boolean verifyResult = queryVerifier.doDiff(query, newCSV);
+ String explainPlan = queryVerifier.getExplainPlan(query);
+ getThreadTime().getRunTimesInMs().add(new RunTime(
+ verifyResult == true ? PherfConstants.DIFF_PASS : PherfConstants.DIFF_FAIL,
+ explainPlan, startDate, -1L, (int) (System.currentTimeMillis() - start)));
+ }
- /**
- * Executes verification runs for a minimum of number of execution or execution duration
- */
- public void run() {
- logger.info("\n\nThread Starting " + t.getName() + " ; " + query.getStatement() + " for "
- + numberOfExecutions + "times\n\n");
- Long start = System.currentTimeMillis();
- for (long i = numberOfExecutions; (i > 0 && ((System
- .currentTimeMillis() - start) < executionDurationInMs)); i--) {
- try {
- diffQuery();
- } catch (Exception e) {
- e.printStackTrace();
- }
- }
- logger.info("\n\nThread exiting." + t.getName() + "\n\n");
- }
+ /**
+ * Multithreaded Differ
+ *
+ * @param threadName
+ * @param query
+ * @param threadName
+ * @param threadTime
+ * @param numberOfExecutions
+ * @param executionDurationInMs
+ */
+ MultithreadedDiffer(String threadName, Query query, ThreadTime threadTime,
+ long numberOfExecutions, long executionDurationInMs) {
+ this.query = query;
+ this.threadName = threadName;
+ this.threadTime = threadTime;
+ this.numberOfExecutions = numberOfExecutions;
+ this.executionDurationInMs = executionDurationInMs;
+ }
- /**
- * Thread start
- * @return
- */
- public Thread start() {
- if (t == null) {
- t = new Thread(this, threadName);
- t.start();
- }
- return t;
- }
+ /**
+ * Executes verification runs for a minimum of number of execution or execution duration
+ */
+ public void run() {
+ logger.info("\n\nThread Starting " + t.getName() + " ; " + query.getStatement() + " for "
+ + numberOfExecutions + "times\n\n");
+ Long start = System.currentTimeMillis();
+ for (long i = numberOfExecutions; (i > 0 && ((System.currentTimeMillis() - start)
+ < executionDurationInMs)); i--) {
+ try {
+ diffQuery();
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+ }
+ logger.info("\n\nThread exiting." + t.getName() + "\n\n");
+ }
}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7175dcbc/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/MultithreadedRunner.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/MultithreadedRunner.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/MultithreadedRunner.java
deleted file mode 100644
index 237fc17..0000000
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/MultithreadedRunner.java
+++ /dev/null
@@ -1,170 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.phoenix.pherf.workload;
-
-import java.sql.Connection;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.util.Calendar;
-import java.util.Date;
-
-import org.apache.phoenix.pherf.PherfConstants.RunMode;
-
-import org.apache.phoenix.pherf.result.DataModelResult;
-import org.apache.phoenix.pherf.result.ResultManager;
-import org.apache.phoenix.pherf.result.RunTime;
-import org.apache.phoenix.pherf.result.ThreadTime;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.phoenix.pherf.configuration.Query;
-import org.apache.phoenix.pherf.util.PhoenixUtil;
-
-class MultithreadedRunner implements Runnable {
- private static final Logger logger = LoggerFactory
- .getLogger(MultithreadedRunner.class);
- private Thread t;
- private Query query;
- private ThreadTime threadTime;
- private PhoenixUtil pUtil = new PhoenixUtil();
- private String threadName;
- private DataModelResult dataModelResult;
- private long numberOfExecutions;
- private long executionDurationInMs;
- private static long lastResultWritten = System.currentTimeMillis() - 1000;
- private final ResultManager resultManager;
-
- /**
- * Multithreaded runner
- *
- * @param threadName
- * @param query
- * @param dataModelResult
- * @param threadTime
- * @param numberOfExecutions
- * @param executionDurationInMs
- */
- MultithreadedRunner(String threadName,
- Query query,
- DataModelResult dataModelResult,
- ThreadTime threadTime,
- long numberOfExecutions,
- long executionDurationInMs) {
- this.query = query;
- this.threadName = threadName;
- this.threadTime = threadTime;
- this.dataModelResult = dataModelResult;
- this.numberOfExecutions = numberOfExecutions;
- this.executionDurationInMs = executionDurationInMs;
- this.resultManager = new ResultManager(dataModelResult.getName(), RunMode.PERFORMANCE);
- }
-
- /**
- * Executes run for a minimum of number of execution or execution duration
- */
- public void run() {
- logger.info("\n\nThread Starting " + t.getName() + " ; " + query.getStatement() + " for "
- + numberOfExecutions + "times\n\n");
- Long start = System.currentTimeMillis();
- for (long i = numberOfExecutions; (i > 0 && ((System
- .currentTimeMillis() - start) < executionDurationInMs)); i--) {
- try {
- synchronized (resultManager) {
- timedQuery();
- if ((System.currentTimeMillis() - lastResultWritten) > 1000) {
- resultManager.write(dataModelResult);
- lastResultWritten = System.currentTimeMillis();
- }
- }
- } catch (Exception e) {
- e.printStackTrace();
- }
- }
- logger.info("\n\nThread exiting." + t.getName() + "\n\n");
- }
-
- /**
- * Thread start
- * @return
- */
- public Thread start() {
- if (t == null) {
- t = new Thread(this, threadName);
- t.start();
- }
- return t;
- }
-
- private synchronized ThreadTime getThreadTime() {
- return threadTime;
- }
-
- /**
- * Timed query execution
- *
- * @throws Exception
- */
- private void timedQuery() throws Exception {
- boolean isSelectCountStatement = query.getStatement().toUpperCase().trim()
- .contains("COUNT(*)") ? true : false;
-
- Connection conn = null;
- PreparedStatement statement = null;
- ResultSet rs = null;
- Long start = System.currentTimeMillis();
- Date startDate = Calendar.getInstance().getTime();
- String exception = null;
- long resultRowCount = 0;
-
- try {
- conn = pUtil.getConnection(query.getTenantId());
- statement = conn.prepareStatement(query.getStatement());
- boolean isQuery = statement.execute();
- if (isQuery) {
- rs = statement.getResultSet();
- while (rs.next()) {
- if (null != query.getExpectedAggregateRowCount()) {
- if (rs.getLong(1) != query.getExpectedAggregateRowCount())
- throw new RuntimeException("Aggregate count "
- + rs.getLong(1) + " does not match expected "
- + query.getExpectedAggregateRowCount());
- }
-
- if (isSelectCountStatement) {
- resultRowCount = rs.getLong(1);
- } else {
- resultRowCount++;
- }
- }
- } else {
- conn.commit();
- }
- } catch (Exception e) {
- e.printStackTrace();
- exception = e.getMessage();
- } finally {
- getThreadTime().getRunTimesInMs().add(
- new RunTime(exception, startDate, resultRowCount, (int) (System.currentTimeMillis() - start)));
-
- if (rs != null) rs.close();
- if (statement != null) statement.close();
- if (conn != null) conn.close();
- }
- }
-}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7175dcbc/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/QueryExecutor.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/QueryExecutor.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/QueryExecutor.java
index 6f6e000..624188c 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/QueryExecutor.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/QueryExecutor.java
@@ -18,227 +18,256 @@
package org.apache.phoenix.pherf.workload;
-import java.sql.Connection;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.phoenix.pherf.PherfConstants.RunMode;
-import org.apache.phoenix.pherf.configuration.XMLConfigParser;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.phoenix.pherf.PherfConstants.RunMode;
+import org.apache.phoenix.pherf.configuration.*;
import org.apache.phoenix.pherf.result.*;
+import org.apache.phoenix.pherf.util.PhoenixUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import org.apache.phoenix.pherf.configuration.DataModel;
-import org.apache.phoenix.pherf.configuration.ExecutionType;
-import org.apache.phoenix.pherf.configuration.Query;
-import org.apache.phoenix.pherf.configuration.QuerySet;
-import org.apache.phoenix.pherf.configuration.Scenario;
-import org.apache.phoenix.pherf.util.PhoenixUtil;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Future;
+
+public class QueryExecutor implements Workload {
+ private static final Logger logger = LoggerFactory.getLogger(QueryExecutor.class);
+ private List<DataModel> dataModels;
+ private String queryHint;
+ private final RunMode runMode;
+ private final boolean exportCSV;
+ private final ExecutorService pool;
+ private final XMLConfigParser parser;
+ private final PhoenixUtil util;
+
+ public QueryExecutor(XMLConfigParser parser, PhoenixUtil util, ExecutorService pool) {
+ this(parser, util, pool, parser.getDataModels(), null, false, RunMode.PERFORMANCE);
+ }
+
+ public QueryExecutor(XMLConfigParser parser, PhoenixUtil util, ExecutorService pool,
+ List<DataModel> dataModels, String queryHint, boolean exportCSV, RunMode runMode) {
+ this.parser = parser;
+ this.queryHint = queryHint;
+ this.exportCSV = exportCSV;
+ this.runMode = runMode;
+ this.dataModels = dataModels;
+ this.pool = pool;
+ this.util = util;
+ }
+
+ @Override public void complete() {
+
+ }
+
+ /**
+ * Calls in Multithreaded Query Executor for all datamodels
+ *
+ * @throws Exception
+ */
+ public Runnable execute() throws Exception {
+ Runnable runnable = null;
+ for (DataModel dataModel : dataModels) {
+ if (exportCSV) {
+ runnable = exportAllScenarios(dataModel);
+ } else {
+ runnable = executeAllScenarios(dataModel);
+ }
+ }
+ return runnable;
+ }
+
+ /**
+ * Export all queries results to CSV
+ *
+ * @param dataModel
+ * @throws Exception
+ */
+ protected Runnable exportAllScenarios(final DataModel dataModel) throws Exception {
+ return new Runnable() {
+ @Override public void run() {
+ try {
+
+ List<Scenario> scenarios = dataModel.getScenarios();
+ QueryVerifier exportRunner = new QueryVerifier(false);
+ for (Scenario scenario : scenarios) {
+ for (QuerySet querySet : scenario.getQuerySet()) {
+ util.executeQuerySetDdls(querySet);
+ for (Query query : querySet.getQuery()) {
+ exportRunner.exportCSV(query);
+ }
+ }
+ }
+ } catch (Exception e) {
+ logger.warn("", e);
+ }
+ }
+ };
+ }
+
+ /**
+ * Execute all scenarios
+ *
+ * @param dataModel
+ * @throws Exception
+ */
+ protected Runnable executeAllScenarios(final DataModel dataModel) throws Exception {
+ return new Runnable() {
+ @Override public void run() {
+ List<DataModelResult> dataModelResults = new ArrayList<>();
+ DataModelResult
+ dataModelResult =
+ new DataModelResult(dataModel, PhoenixUtil.getZookeeper());
+ ResultManager
+ resultManager =
+ new ResultManager(dataModelResult.getName(), QueryExecutor.this.runMode);
+
+ dataModelResults.add(dataModelResult);
+ List<Scenario> scenarios = dataModel.getScenarios();
+ Configuration conf = HBaseConfiguration.create();
+ Map<String, String> phoenixProperty = conf.getValByRegex("phoenix");
+ try {
+
+ for (Scenario scenario : scenarios) {
+ ScenarioResult scenarioResult = new ScenarioResult(scenario);
+ scenarioResult.setPhoenixProperties(phoenixProperty);
+ dataModelResult.getScenarioResult().add(scenarioResult);
+ WriteParams writeParams = scenario.getWriteParams();
+
+ if (writeParams != null) {
+ int writerThreadCount = writeParams.getWriterThreadCount();
+ for (int i = 0; i < writerThreadCount; i++) {
+ logger.debug("Inserting write workload ( " + i + " ) of ( "
+ + writerThreadCount + " )");
+ Workload writes = new WriteWorkload(PhoenixUtil.create(), parser);
+ pool.submit(writes.execute());
+ }
+ }
+
+ for (QuerySet querySet : scenario.getQuerySet()) {
+ QuerySetResult querySetResult = new QuerySetResult(querySet);
+ scenarioResult.getQuerySetResult().add(querySetResult);
+
+ util.executeQuerySetDdls(querySet);
+ if (querySet.getExecutionType() == ExecutionType.SERIAL) {
+ executeQuerySetSerial(dataModelResult, querySet, querySetResult);
+ } else {
+ executeQuerySetParallel(dataModelResult, querySet, querySetResult);
+ }
+ }
+ resultManager.write(dataModelResult);
+ }
+ resultManager.write(dataModelResults);
+ } catch (Exception e) {
+ logger.warn("", e);
+ }
+ }
+ };
+ }
-public class QueryExecutor {
- private static final Logger logger = LoggerFactory.getLogger(QueryExecutor.class);
- private List<DataModel> dataModels;
- private String queryHint;
- private RunMode runMode;
+ /**
+ * Execute query set serially
+ *
+ * @param dataModelResult
+ * @param querySet
+ * @param querySetResult
+ * @throws InterruptedException
+ */
+ protected void executeQuerySetSerial(DataModelResult dataModelResult, QuerySet querySet,
+ QuerySetResult querySetResult) throws InterruptedException {
+ for (Query query : querySet.getQuery()) {
+ QueryResult queryResult = new QueryResult(query);
+ querySetResult.getQueryResults().add(queryResult);
+
+ for (int cr = querySet.getMinConcurrency(); cr <= querySet.getMaxConcurrency(); cr++) {
+
+ List<Future> threads = new ArrayList<>();
+
+ for (int i = 0; i < cr; i++) {
+
+ Runnable
+ thread =
+ executeRunner((i + 1) + "," + cr, dataModelResult, queryResult,
+ querySetResult);
+ threads.add(pool.submit(thread));
+ }
+
+ for (Future thread : threads) {
+ try {
+ thread.get();
+ } catch (ExecutionException e) {
+ logger.error("", e);
+ }
+ }
+ }
+ }
+ }
+
+ /**
+ * Execute query set in parallel
+ *
+ * @param dataModelResult
+ * @param querySet
+ * @param querySetResult
+ * @throws InterruptedException
+ */
+ protected void executeQuerySetParallel(DataModelResult dataModelResult, QuerySet querySet,
+ QuerySetResult querySetResult) throws InterruptedException {
+ for (int cr = querySet.getMinConcurrency(); cr <= querySet.getMaxConcurrency(); cr++) {
+ List<Future> threads = new ArrayList<>();
+ for (int i = 0; i < cr; i++) {
+ for (Query query : querySet.getQuery()) {
+ QueryResult queryResult = new QueryResult(query);
+ querySetResult.getQueryResults().add(queryResult);
+
+ Runnable
+ thread =
+ executeRunner((i + 1) + "," + cr, dataModelResult, queryResult,
+ querySetResult);
+ threads.add(pool.submit(thread));
+ }
+
+ for (Future thread : threads) {
+ try {
+ thread.get();
+ } catch (ExecutionException e) {
+ logger.error("", e);
+ }
+ }
+ }
+ }
+ }
- public QueryExecutor(XMLConfigParser parser) {
- this.dataModels = parser.getDataModels();
+ /**
+ * Execute multi-thread runner
+ *
+ * @param name
+ * @param dataModelResult
+ * @param queryResult
+ * @param querySet
+ * @return
+ */
+ protected Runnable executeRunner(String name, DataModelResult dataModelResult,
+ QueryResult queryResult, QuerySet querySet) {
+ ThreadTime threadTime = new ThreadTime();
+ queryResult.getThreadTimes().add(threadTime);
+ threadTime.setThreadName(name);
+ queryResult.setHint(this.queryHint);
+ logger.info("\nExecuting query " + queryResult.getStatement());
+ Runnable thread;
+ if (this.runMode == RunMode.FUNCTIONAL) {
+ thread =
+ new MultithreadedDiffer(threadTime.getThreadName(), queryResult, threadTime,
+ querySet.getNumberOfExecutions(), querySet.getExecutionDurationInMs());
+ } else {
+ thread =
+ new MultiThreadedRunner(threadTime.getThreadName(), queryResult,
+ dataModelResult, threadTime, querySet.getNumberOfExecutions(),
+ querySet.getExecutionDurationInMs());
+ }
+ return thread;
}
-
- /**
- * Calls in Multithreaded Query Executor for all datamodels
- * @throws Exception
- */
- public void execute(String queryHint, boolean exportCSV, RunMode runMode) throws Exception {
- this.queryHint = queryHint;
- this.runMode = runMode;
- for (DataModel dataModel: dataModels) {
- if (exportCSV) {
- exportAllScenarios(dataModel);
- } else {
- executeAllScenarios(dataModel);
- }
- }
- }
-
- /**
- * Export all queries results to CSV
- * @param dataModel
- * @throws Exception
- */
- protected void exportAllScenarios(DataModel dataModel) throws Exception {
- List<Scenario> scenarios = dataModel.getScenarios();
- QueryVerifier exportRunner = new QueryVerifier(false);
- for (Scenario scenario : scenarios) {
- for (QuerySet querySet : scenario.getQuerySet()) {
- executeQuerySetDdls(querySet);
- for (Query query : querySet.getQuery()) {
- exportRunner.exportCSV(query);
- }
- }
- }
- }
-
- /**
- * Execute all scenarios
- * @param dataModel
- * @throws Exception
- */
- protected void executeAllScenarios(DataModel dataModel) throws Exception {
- List<DataModelResult> dataModelResults = new ArrayList<DataModelResult>();
- DataModelResult dataModelResult = new DataModelResult(dataModel, PhoenixUtil.getZookeeper());
- ResultManager resultManager = new ResultManager(dataModelResult.getName(), this.runMode);
-
-
- dataModelResults.add(dataModelResult);
- List<Scenario> scenarios = dataModel.getScenarios();
- Configuration conf = HBaseConfiguration.create();
- Map<String, String> phoenixProperty = conf.getValByRegex("phoenix");
- phoenixProperty.putAll(conf.getValByRegex("sfdc"));
-
- for (Scenario scenario : scenarios) {
- ScenarioResult scenarioResult = new ScenarioResult(scenario);
- scenarioResult.setPhoenixProperties(phoenixProperty);
- dataModelResult.getScenarioResult().add(scenarioResult);
-
- for (QuerySet querySet : scenario.getQuerySet()) {
- QuerySetResult querySetResult = new QuerySetResult(querySet);
- scenarioResult.getQuerySetResult().add(querySetResult);
-
- executeQuerySetDdls(querySet);
-
- if (querySet.getExecutionType() == ExecutionType.SERIAL) {
- execcuteQuerySetSerial(dataModelResult, querySet, querySetResult, scenarioResult);
- } else {
- execcuteQuerySetParallel(dataModelResult, querySet, querySetResult, scenarioResult);
- }
- }
- resultManager.write(dataModelResult);
- }
- resultManager.write(dataModelResults);
- }
-
- /**
- * Execute all querySet DDLs first based on tenantId if specified. This is executed
- * first since we don't want to run DDLs in parallel to executing queries.
- *
- * @param querySet
- * @throws Exception
- */
- protected void executeQuerySetDdls(QuerySet querySet) throws Exception {
- PhoenixUtil pUtil = new PhoenixUtil();
- for (Query query : querySet.getQuery()) {
- if (null != query.getDdl()) {
- Connection conn = null;
- try {
- logger.info("\nExecuting DDL:" + query.getDdl() + " on tenantId:" + query.getTenantId());
- pUtil.executeStatement(query.getDdl(), conn = pUtil.getConnection(query.getTenantId()));
- } finally {
- if (null != conn) {
- conn.close();
- }
- }
- }
- }
- }
-
- /**
- * Execute query set serially
- * @param dataModelResult
- * @param querySet
- * @param querySetResult
- * @param scenario
- * @throws InterruptedException
- */
- protected void execcuteQuerySetSerial(DataModelResult dataModelResult, QuerySet querySet, QuerySetResult querySetResult, Scenario scenario) throws InterruptedException {
- for (Query query : querySet.getQuery()) {
- QueryResult queryResult = new QueryResult(query);
- querySetResult.getQueryResults().add(queryResult);
-
- for (int cr = querySet.getMinConcurrency(); cr <= querySet
- .getMaxConcurrency(); cr++) {
-
- List<Thread> threads = new ArrayList<Thread>();
-
- for (int i = 0; i < cr; i++) {
-
- Thread thread = executeRunner((i + 1) + ","
- + cr, dataModelResult, queryResult,
- querySetResult);
- threads.add(thread);
- }
-
- for (Thread thread : threads) {
- thread.join();
- }
- }
- }
- }
-
- /**
- * Execute query set in parallel
- * @param dataModelResult
- * @param querySet
- * @param querySetResult
- * @param scenario
- * @throws InterruptedException
- */
- protected void execcuteQuerySetParallel(DataModelResult dataModelResult, QuerySet querySet, QuerySetResult querySetResult, Scenario scenario)
- throws InterruptedException {
- for (int cr = querySet.getMinConcurrency(); cr <= querySet
- .getMaxConcurrency(); cr++) {
- List<Thread> threads = new ArrayList<Thread>();
- for (int i = 0; i < cr; i++) {
- for (Query query : querySet.getQuery()) {
- QueryResult queryResult = new QueryResult(query);
- querySetResult.getQueryResults().add(queryResult);
-
- Thread thread = executeRunner((i + 1) + ","
- + cr, dataModelResult, queryResult,
- querySetResult);
- threads.add(thread);
- }
- }
- for (Thread thread : threads) {
- thread.join();
- }
- }
- }
-
- /**
- * Execute multi-thread runner
- * @param name
- * @param dataModelResult
- * @param queryResult
- * @param querySet
- * @return
- */
- protected Thread executeRunner(String name, DataModelResult dataModelResult, QueryResult queryResult, QuerySet querySet) {
- ThreadTime threadTime = new ThreadTime();
- queryResult.getThreadTimes().add(threadTime);
- threadTime.setThreadName(name);
- queryResult.setHint(this.queryHint);
- logger.info("\nExecuting query "
- + queryResult.getStatement());
- Thread thread;
- if (this.runMode == RunMode.FUNCTIONAL) {
- thread = new MultithreadedDiffer(
- threadTime.getThreadName(),
- queryResult,
- threadTime, querySet.getNumberOfExecutions(), querySet.getExecutionDurationInMs())
- .start();
- } else {
- thread = new MultithreadedRunner(
- threadTime.getThreadName(),
- queryResult,
- dataModelResult,
- threadTime, querySet.getNumberOfExecutions(), querySet.getExecutionDurationInMs())
- .start();
- }
- return thread;
- }
-}
+}
\ No newline at end of file
[25/47] phoenix git commit: PHOENIX-1975 Detect and use HBASE_HOME
when set
Posted by ma...@apache.org.
PHOENIX-1975 Detect and use HBASE_HOME when set
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/3e493398
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/3e493398
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/3e493398
Branch: refs/heads/calcite
Commit: 3e49339828e64842891879a18248a91601740dd0
Parents: 3cf22a7
Author: Nick Dimiduk <nd...@apache.org>
Authored: Wed Jun 24 13:59:00 2015 -0700
Committer: Nick Dimiduk <nd...@apache.org>
Committed: Wed Jun 24 13:59:00 2015 -0700
----------------------------------------------------------------------
bin/phoenix_utils.py | 10 +++++++++-
1 file changed, 9 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/3e493398/bin/phoenix_utils.py
----------------------------------------------------------------------
diff --git a/bin/phoenix_utils.py b/bin/phoenix_utils.py
index 383e0e1..bfb4737 100755
--- a/bin/phoenix_utils.py
+++ b/bin/phoenix_utils.py
@@ -65,7 +65,15 @@ def setPath():
phoenix_class_path = os.getenv('PHOENIX_CLASS_PATH','')
global hbase_conf_dir
- hbase_conf_dir = os.getenv('HBASE_CONF_DIR', os.getenv('HBASE_CONF_PATH', '.'))
+ # if HBASE_CONF_DIR set explicitly, use that
+ hbase_conf_dir = os.getenv('HBASE_CONF_DIR', os.getenv('HBASE_CONF_PATH'))
+ if not hbase_conf_dir:
+ # else fall back to HBASE_HOME
+ if os.getenv('HBASE_HOME'):
+ hbase_conf_dir = os.path.join(os.getenv('HBASE_HOME'), "conf")
+ else:
+ # default to pwd
+ hbase_conf_dir = '.'
global hbase_conf_path # keep conf_path around for backward compatibility
hbase_conf_path = hbase_conf_dir
[47/47] phoenix git commit: Fix compilation errors after merge
Posted by ma...@apache.org.
Fix compilation errors after merge
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/1327c726
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/1327c726
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/1327c726
Branch: refs/heads/calcite
Commit: 1327c726aebf5940ef13d334193d56e0e562f2cc
Parents: b586007 b2fb04b
Author: maryannxue <we...@intel.com>
Authored: Thu Jul 2 17:00:44 2015 -0400
Committer: maryannxue <we...@intel.com>
Committed: Thu Jul 2 17:00:44 2015 -0400
----------------------------------------------------------------------
.gitignore | 2 +
bin/phoenix_utils.py | 10 +-
phoenix-assembly/pom.xml | 4 +
phoenix-assembly/src/build/server.xml | 1 +
.../phoenix/end2end/AbsFunctionEnd2EndIT.java | 108 +++
.../apache/phoenix/end2end/AlterTableIT.java | 553 ++++++++++++-
.../phoenix/end2end/ArrayAppendFunctionIT.java | 17 -
.../phoenix/end2end/ArrayConcatFunctionIT.java | 578 +++++++++++++
.../phoenix/end2end/ArrayFillFunctionIT.java | 531 ++++++++++++
.../phoenix/end2end/ArraysWithNullsIT.java | 2 +-
.../org/apache/phoenix/end2end/BaseViewIT.java | 2 +
.../phoenix/end2end/CSVCommonsLoaderIT.java | 18 +-
.../phoenix/end2end/CbrtFunctionEnd2EndIT.java | 143 ++++
.../phoenix/end2end/End2EndTestDriver.java | 19 +-
.../phoenix/end2end/ExpFunctionEnd2EndIT.java | 128 +++
.../phoenix/end2end/LnLogFunctionEnd2EndIT.java | 143 ++++
.../phoenix/end2end/PhoenixMetricsIT.java | 147 ----
.../phoenix/end2end/PowerFunctionEnd2EndIT.java | 144 ++++
.../end2end/QueryDatabaseMetaDataIT.java | 31 +
.../phoenix/end2end/RowValueConstructorIT.java | 28 +
.../end2end/TenantSpecificTablesDDLIT.java | 40 +-
.../org/apache/phoenix/end2end/UpgradeIT.java | 332 ++++++++
.../phoenix/end2end/UserDefinedFunctionsIT.java | 34 +-
.../java/org/apache/phoenix/end2end/ViewIT.java | 99 +++
.../apache/phoenix/execute/PartialCommitIT.java | 1 +
.../phoenix/mapreduce/CsvBulkLoadToolIT.java | 19 +
.../apache/phoenix/mapreduce/IndexToolIT.java | 47 +-
.../phoenix/monitoring/PhoenixMetricsIT.java | 815 +++++++++++++++++++
.../org/apache/phoenix/rpc/UpdateCacheIT.java | 139 ++++
phoenix-core/src/it/resources/hbase-default.xml | 36 -
phoenix-core/src/it/resources/hbase-site.xml | 36 +
phoenix-core/src/main/antlr3/PhoenixSQL.g | 3 +-
.../apache/phoenix/cache/ServerCacheClient.java | 7 +
.../rel/PhoenixToEnumerableConverter.java | 9 +-
.../apache/phoenix/compile/DeleteCompiler.java | 50 +-
.../MutatingParallelIteratorFactory.java | 51 +-
.../apache/phoenix/compile/QueryCompiler.java | 2 +-
.../org/apache/phoenix/compile/QueryPlan.java | 3 +
.../phoenix/compile/StatementContext.java | 49 +-
.../apache/phoenix/compile/TraceQueryPlan.java | 9 +-
.../apache/phoenix/compile/UpsertCompiler.java | 80 +-
.../apache/phoenix/compile/WhereOptimizer.java | 28 +-
.../coprocessor/MetaDataEndpointImpl.java | 549 ++++++++++---
.../phoenix/coprocessor/MetaDataProtocol.java | 4 +-
.../coprocessor/generated/PTableProtos.java | 103 ++-
.../phoenix/exception/SQLExceptionCode.java | 2 -
.../apache/phoenix/execute/AggregatePlan.java | 11 +-
.../apache/phoenix/execute/BaseQueryPlan.java | 14 +-
.../phoenix/execute/ClientAggregatePlan.java | 5 +-
.../phoenix/execute/ClientProcessingPlan.java | 9 +
.../apache/phoenix/execute/ClientScanPlan.java | 5 +-
.../phoenix/execute/DegenerateQueryPlan.java | 3 +-
.../apache/phoenix/execute/HashJoinPlan.java | 16 +-
.../apache/phoenix/execute/MutationState.java | 290 ++++---
.../org/apache/phoenix/execute/ScanPlan.java | 7 +-
.../phoenix/execute/SortMergeJoinPlan.java | 13 +-
.../phoenix/execute/TupleProjectionPlan.java | 11 +-
.../org/apache/phoenix/execute/UnionPlan.java | 14 +-
.../phoenix/expression/ExpressionType.java | 18 +-
.../expression/function/AbsFunction.java | 66 ++
.../function/ArrayAppendFunction.java | 53 +-
.../function/ArrayConcatFunction.java | 83 ++
.../expression/function/ArrayFillFunction.java | 79 ++
.../function/ArrayModifierFunction.java | 155 +++-
.../function/ArrayPrependFunction.java | 54 +-
.../expression/function/CbrtFunction.java | 55 ++
.../expression/function/ExpFunction.java | 55 ++
.../function/JavaMathOneArgumentFunction.java | 43 +-
.../function/JavaMathTwoArgumentFunction.java | 69 ++
.../phoenix/expression/function/LnFunction.java | 55 ++
.../expression/function/LogFunction.java | 56 ++
.../expression/function/PowerFunction.java | 51 ++
.../expression/function/ScalarFunction.java | 4 +-
.../expression/function/SqrtFunction.java | 8 +-
.../phoenix/iterate/BaseResultIterators.java | 39 +-
.../phoenix/iterate/ChunkedResultIterator.java | 21 +-
.../iterate/DefaultParallelScanGrouper.java | 62 ++
.../iterate/MapReduceParallelScanGrouper.java | 45 +
.../iterate/ParallelIteratorFactory.java | 4 +-
.../phoenix/iterate/ParallelIterators.java | 34 +-
.../phoenix/iterate/ParallelScanGrouper.java | 41 +
.../iterate/RoundRobinResultIterator.java | 4 +-
.../phoenix/iterate/ScanningResultIterator.java | 38 +-
.../apache/phoenix/iterate/SerialIterators.java | 27 +-
.../phoenix/iterate/SpoolingResultIterator.java | 49 +-
.../phoenix/iterate/TableResultIterator.java | 17 +-
.../phoenix/iterate/UnionResultIterators.java | 70 +-
.../apache/phoenix/jdbc/PhoenixConnection.java | 27 +-
.../phoenix/jdbc/PhoenixDatabaseMetaData.java | 25 +-
.../apache/phoenix/jdbc/PhoenixResultSet.java | 48 +-
.../apache/phoenix/jdbc/PhoenixStatement.java | 27 +-
.../java/org/apache/phoenix/job/JobManager.java | 60 +-
.../phoenix/mapreduce/CsvBulkLoadTool.java | 12 +-
.../phoenix/mapreduce/PhoenixInputFormat.java | 6 +-
.../phoenix/mapreduce/PhoenixInputSplit.java | 1 +
.../phoenix/mapreduce/PhoenixRecordReader.java | 12 +-
.../phoenix/mapreduce/index/IndexTool.java | 15 +-
.../phoenix/memory/GlobalMemoryManager.java | 5 -
.../apache/phoenix/monitoring/AtomicMetric.java | 70 ++
.../phoenix/monitoring/CombinableMetric.java | 77 ++
.../monitoring/CombinableMetricImpl.java | 77 ++
.../org/apache/phoenix/monitoring/Counter.java | 85 --
.../phoenix/monitoring/GlobalClientMetrics.java | 117 +++
.../apache/phoenix/monitoring/GlobalMetric.java | 37 +
.../phoenix/monitoring/GlobalMetricImpl.java | 74 ++
.../phoenix/monitoring/MemoryMetricsHolder.java | 43 +
.../org/apache/phoenix/monitoring/Metric.java | 45 +-
.../apache/phoenix/monitoring/MetricType.java | 55 ++
.../phoenix/monitoring/MetricsStopWatch.java | 59 ++
.../phoenix/monitoring/MutationMetricQueue.java | 131 +++
.../phoenix/monitoring/NonAtomicMetric.java | 71 ++
.../phoenix/monitoring/OverAllQueryMetrics.java | 121 +++
.../phoenix/monitoring/PhoenixMetrics.java | 118 ---
.../phoenix/monitoring/ReadMetricQueue.java | 180 ++++
.../phoenix/monitoring/SizeStatistic.java | 78 --
.../monitoring/SpoolingMetricsHolder.java | 43 +
.../monitoring/TaskExecutionMetricsHolder.java | 68 ++
.../apache/phoenix/parse/FunctionParseNode.java | 9 +-
.../org/apache/phoenix/parse/PFunction.java | 12 +-
.../phoenix/query/BaseQueryServicesImpl.java | 2 +-
.../query/ConnectionQueryServicesImpl.java | 63 +-
.../apache/phoenix/query/QueryConstants.java | 30 +-
.../org/apache/phoenix/query/QueryServices.java | 3 +-
.../phoenix/query/QueryServicesOptions.java | 25 +-
.../apache/phoenix/schema/DelegateTable.java | 5 +
.../apache/phoenix/schema/MetaDataClient.java | 99 +--
.../java/org/apache/phoenix/schema/PTable.java | 1 +
.../org/apache/phoenix/schema/PTableImpl.java | 40 +-
.../phoenix/schema/types/PArrayDataType.java | 163 +++-
.../org/apache/phoenix/schema/types/PChar.java | 3 -
.../apache/phoenix/schema/types/PDecimal.java | 11 +
.../phoenix/schema/types/PNumericType.java | 8 +
.../phoenix/schema/types/PRealNumber.java | 8 +
.../phoenix/schema/types/PWholeNumber.java | 8 +
.../phoenix/trace/PhoenixMetricsSink.java | 36 +-
.../java/org/apache/phoenix/util/JDBCUtil.java | 6 +-
.../org/apache/phoenix/util/PhoenixRuntime.java | 175 +++-
.../java/org/apache/phoenix/util/QueryUtil.java | 2 +-
.../org/apache/phoenix/util/UpgradeUtil.java | 395 ++++++++-
.../phoenix/util/csv/CsvUpsertExecutor.java | 5 +-
.../phoenix/compile/QueryCompilerTest.java | 68 +-
.../phoenix/compile/WhereCompilerTest.java | 10 +-
.../phoenix/compile/WhereOptimizerTest.java | 38 +
.../phoenix/expression/AbsFunctionTest.java | 180 ++++
.../expression/ArrayConcatFunctionTest.java | 584 +++++++++++++
.../expression/ArrayFillFunctionTest.java | 221 +++++
.../phoenix/expression/CbrtFunctionTest.java | 127 +++
.../phoenix/expression/ExpFunctionTest.java | 150 ++++
.../phoenix/expression/LnLogFunctionTest.java | 182 +++++
.../phoenix/expression/PowerFunctionTest.java | 182 +++++
.../iterate/SpoolingResultIteratorTest.java | 4 +-
.../java/org/apache/phoenix/query/BaseTest.java | 5 +-
.../phoenix/query/ConnectionlessTest.java | 14 +-
.../query/ParallelIteratorsSplitTest.java | 6 +
.../src/test/resources/hbase-default.xml | 36 -
phoenix-pherf/pom.xml | 10 +-
.../org/apache/phoenix/pherf/DataIngestIT.java | 134 ++-
.../org/apache/phoenix/pherf/PherfMainIT.java | 36 +
.../apache/phoenix/pherf/ResultBaseTestIT.java | 31 +-
.../apache/phoenix/pherf/SchemaReaderIT.java | 17 +-
.../java/org/apache/phoenix/pherf/Pherf.java | 179 ++--
.../apache/phoenix/pherf/PherfConstants.java | 8 +-
.../phoenix/pherf/configuration/DataModel.java | 10 -
.../phoenix/pherf/configuration/Scenario.java | 12 +-
.../pherf/configuration/WriteParams.java | 72 ++
.../pherf/configuration/XMLConfigParser.java | 25 +-
.../phoenix/pherf/jmx/MonitorManager.java | 153 ++--
.../phoenix/pherf/loaddata/DataLoader.java | 332 --------
.../pherf/result/DataLoadThreadTime.java | 87 +-
.../pherf/result/DataLoadTimeSummary.java | 54 +-
.../phoenix/pherf/result/DataModelResult.java | 68 +-
.../phoenix/pherf/result/QueryResult.java | 17 +-
.../phoenix/pherf/result/QuerySetResult.java | 40 +-
.../org/apache/phoenix/pherf/result/Result.java | 11 +-
.../phoenix/pherf/result/ResultHandler.java | 5 +
.../phoenix/pherf/result/ResultManager.java | 19 +-
.../apache/phoenix/pherf/result/ResultUtil.java | 119 +--
.../phoenix/pherf/result/ResultValue.java | 4 +-
.../apache/phoenix/pherf/result/RunTime.java | 179 ++--
.../phoenix/pherf/result/ScenarioResult.java | 44 +-
.../apache/phoenix/pherf/result/ThreadTime.java | 34 +-
.../phoenix/pherf/result/file/Extension.java | 3 +-
.../phoenix/pherf/result/file/Header.java | 11 +-
.../pherf/result/impl/CSVResultHandler.java | 47 +-
.../pherf/result/impl/ImageResultHandler.java | 58 +-
.../pherf/result/impl/XMLResultHandler.java | 36 +-
.../phoenix/pherf/schema/SchemaReader.java | 2 +-
.../apache/phoenix/pherf/util/PhoenixUtil.java | 64 +-
.../pherf/workload/MultiThreadedRunner.java | 153 ++++
.../pherf/workload/MultithreadedDiffer.java | 134 ++-
.../pherf/workload/MultithreadedRunner.java | 170 ----
.../phoenix/pherf/workload/QueryExecutor.java | 459 ++++++-----
.../phoenix/pherf/workload/QueryVerifier.java | 265 +++---
.../apache/phoenix/pherf/workload/Workload.java | 10 +
.../pherf/workload/WorkloadExecutor.java | 109 +--
.../phoenix/pherf/workload/WriteWorkload.java | 403 +++++++++
.../scenario/prod_test_unsalted_scenario.xml | 35 +
.../phoenix/pherf/ConfigurationParserTest.java | 102 ++-
.../org/apache/phoenix/pherf/ResultTest.java | 5 +-
.../apache/phoenix/pherf/RuleGeneratorTest.java | 37 +-
.../test/resources/scenario/test_scenario.xml | 58 +-
.../org/apache/phoenix/pig/util/TypeUtil.java | 392 +++++----
.../apache/phoenix/pig/util/TypeUtilTest.java | 70 ++
phoenix-spark/pom.xml | 51 +-
pom.xml | 8 +-
205 files changed, 12569 insertions(+), 3425 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/1327c726/phoenix-core/src/main/java/org/apache/phoenix/calcite/rel/PhoenixToEnumerableConverter.java
----------------------------------------------------------------------
diff --cc phoenix-core/src/main/java/org/apache/phoenix/calcite/rel/PhoenixToEnumerableConverter.java
index 771cad9,0000000..57e5136
mode 100644,000000..100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/calcite/rel/PhoenixToEnumerableConverter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/calcite/rel/PhoenixToEnumerableConverter.java
@@@ -1,111 -1,0 +1,118 @@@
+package org.apache.phoenix.calcite.rel;
+
+import java.sql.SQLException;
+import java.util.List;
+
+import org.apache.calcite.adapter.enumerable.EnumerableConvention;
+import org.apache.calcite.adapter.enumerable.EnumerableRel;
+import org.apache.calcite.adapter.enumerable.EnumerableRelImplementor;
+import org.apache.calcite.adapter.enumerable.JavaRowFormat;
+import org.apache.calcite.adapter.enumerable.PhysType;
+import org.apache.calcite.adapter.enumerable.PhysTypeImpl;
+import org.apache.calcite.linq4j.tree.BlockBuilder;
+import org.apache.calcite.linq4j.tree.Expression;
+import org.apache.calcite.linq4j.tree.Expressions;
+import org.apache.calcite.linq4j.tree.MethodCallExpression;
+import org.apache.calcite.linq4j.tree.ParameterExpression;
+import org.apache.calcite.plan.ConventionTraitDef;
+import org.apache.calcite.plan.RelOptCluster;
+import org.apache.calcite.plan.RelOptCost;
+import org.apache.calcite.plan.RelOptPlanner;
+import org.apache.calcite.plan.RelTraitSet;
+import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.convert.ConverterImpl;
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.phoenix.calcite.BuiltInMethod;
+import org.apache.phoenix.compile.ExplainPlan;
+import org.apache.phoenix.compile.QueryPlan;
+import org.apache.phoenix.compile.RowProjector;
+import org.apache.phoenix.execute.DelegateQueryPlan;
++import org.apache.phoenix.iterate.DefaultParallelScanGrouper;
++import org.apache.phoenix.iterate.ParallelScanGrouper;
+import org.apache.phoenix.iterate.ResultIterator;
+
+/**
+ * Scan of a Phoenix table.
+ */
+public class PhoenixToEnumerableConverter extends ConverterImpl implements EnumerableRel {
+
+ public static PhoenixToEnumerableConverter create(RelNode input) {
+ RelOptCluster cluster = input.getCluster();
+ RelTraitSet traits = input.getTraitSet().replace(EnumerableConvention.INSTANCE);
+ return new PhoenixToEnumerableConverter(cluster, traits, input);
+ }
+
+ private PhoenixToEnumerableConverter(
+ RelOptCluster cluster,
+ RelTraitSet traits,
+ RelNode input) {
+ super(cluster, ConventionTraitDef.INSTANCE, traits, input);
+ }
+
+ @Override public RelNode copy(RelTraitSet traitSet, List<RelNode> inputs) {
+ return create(sole(inputs));
+ }
+
+ @Override public RelOptCost computeSelfCost(RelOptPlanner planner) {
+ return super.computeSelfCost(planner).multiplyBy(.1);
+ }
+
+ @Override
+ public Result implement(EnumerableRelImplementor implementor, Prefer pref) {
+ // Generates code that instantiates a result iterator, then converts it
+ // to an enumerable.
+ //
+ // ResultIterator iterator = root.get("x");
+ // return CalciteRuntime.toEnumerable(iterator);
+ final BlockBuilder list = new BlockBuilder();
+ QueryPlan plan = makePlan((PhoenixRel)getInput());
+ Expression var = stash(implementor, plan, QueryPlan.class);
+ final RelDataType rowType = getRowType();
+ final PhysType physType =
+ PhysTypeImpl.of(
+ implementor.getTypeFactory(), rowType,
+ pref.prefer(JavaRowFormat.ARRAY));
+ final Expression iterator_ =
+ list.append("iterator", var);
+ final Expression enumerable_ =
+ list.append("enumerable",
+ Expressions.call(BuiltInMethod.TO_ENUMERABLE.method,
+ iterator_));
+ list.add(Expressions.return_(null, enumerable_));
+ return implementor.result(physType, list.toBlock());
+ }
+
+ static QueryPlan makePlan(PhoenixRel rel) {
+ final PhoenixRel.Implementor phoenixImplementor = new PhoenixRelImplementorImpl();
+ final QueryPlan plan = phoenixImplementor.visitInput(0, rel);
+ return new DelegateQueryPlan(plan) {
+ @Override
+ public ResultIterator iterator() throws SQLException {
- return delegate.iterator();
++ return iterator(DefaultParallelScanGrouper.getInstance());
+ }
+ @Override
+ public ExplainPlan getExplainPlan() throws SQLException {
+ return delegate.getExplainPlan();
+ }
+ @Override
+ public RowProjector getProjector() {
+ return phoenixImplementor.createRowProjector();
+ }
++ @Override
++ public ResultIterator iterator(ParallelScanGrouper scanGrouper)
++ throws SQLException {
++ return delegate.iterator(scanGrouper);
++ }
+ };
+ }
+
+ @SuppressWarnings({ "rawtypes", "unchecked" })
+ static Expression stash(EnumerableRelImplementor implementor, Object o, Class clazz) {
+ ParameterExpression x = (ParameterExpression) implementor.stash(o, clazz);
+ MethodCallExpression e =
+ Expressions.call(implementor.getRootExpression(),
+ org.apache.calcite.util.BuiltInMethod.DATA_CONTEXT_GET.method,
+ Expressions.constant(x.name));
+ return Expressions.convert_(e, clazz);
+ }
+}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/1327c726/phoenix-core/src/main/java/org/apache/phoenix/execute/AggregatePlan.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/1327c726/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java
----------------------------------------------------------------------
diff --cc phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java
index 64cdf10,05ef1ec..ead6ec9
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java
@@@ -54,6 -57,8 +57,7 @@@ import org.apache.phoenix.jdbc.PhoenixC
import org.apache.phoenix.job.JobManager.JobCallable;
import org.apache.phoenix.join.HashCacheClient;
import org.apache.phoenix.join.HashJoinInfo;
+ import org.apache.phoenix.monitoring.TaskExecutionMetricsHolder;
-import org.apache.phoenix.parse.FilterableStatement;
import org.apache.phoenix.parse.ParseNode;
import org.apache.phoenix.parse.SQLParser;
import org.apache.phoenix.parse.SelectStatement;
@@@ -111,16 -116,13 +115,21 @@@ public class HashJoinPlan extends Deleg
this.recompileWhereClause = recompileWhereClause;
}
+ public HashJoinInfo getJoinInfo() {
+ return this.joinInfo;
+ }
+
+ public SubPlan[] getSubPlans() {
+ return this.subPlans;
+ }
+
@Override
public ResultIterator iterator() throws SQLException {
+ return iterator(DefaultParallelScanGrouper.getInstance());
+ }
+
+ @Override
+ public ResultIterator iterator(ParallelScanGrouper scanGrouper) throws SQLException {
int count = subPlans.length;
PhoenixConnection connection = getContext().getConnection();
ConnectionQueryServices services = connection.getQueryServices();
http://git-wip-us.apache.org/repos/asf/phoenix/blob/1327c726/phoenix-core/src/main/java/org/apache/phoenix/execute/ScanPlan.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/1327c726/phoenix-core/src/main/java/org/apache/phoenix/execute/TupleProjectionPlan.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/1327c726/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/1327c726/pom.xml
----------------------------------------------------------------------
[46/47] phoenix git commit: PHOENIX-2011 Default, min,
and max values should not require quotes around it in create
function(Rajeshbabu)
Posted by ma...@apache.org.
PHOENIX-2011 Default, min, and max values should not require quotes around it in create function(Rajeshbabu)
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/b2fb04b0
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/b2fb04b0
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/b2fb04b0
Branch: refs/heads/calcite
Commit: b2fb04b0c2234c5b573642d39589ab9d36469723
Parents: 54da7d1
Author: Rajeshbabu Chintaguntla <ra...@apache.org>
Authored: Thu Jul 2 15:51:18 2015 +0530
Committer: Rajeshbabu Chintaguntla <ra...@apache.org>
Committed: Thu Jul 2 15:51:18 2015 +0530
----------------------------------------------------------------------
.../apache/phoenix/end2end/UserDefinedFunctionsIT.java | 12 ++++++------
phoenix-core/src/main/antlr3/PhoenixSQL.g | 3 ++-
.../phoenix/coprocessor/MetaDataEndpointImpl.java | 9 +++++----
.../org/apache/phoenix/parse/FunctionParseNode.java | 9 +++------
.../main/java/org/apache/phoenix/parse/PFunction.java | 12 +++++++++---
.../java/org/apache/phoenix/schema/MetaDataClient.java | 6 +++---
6 files changed, 28 insertions(+), 23 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/b2fb04b0/phoenix-core/src/it/java/org/apache/phoenix/end2end/UserDefinedFunctionsIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UserDefinedFunctionsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UserDefinedFunctionsIT.java
index cee1c85..613231d 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UserDefinedFunctionsIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UserDefinedFunctionsIT.java
@@ -348,7 +348,7 @@ public class UserDefinedFunctionsIT extends BaseOwnClusterIT{
}
- tenant2Conn.createStatement().execute("create function myfunction(INTEGER, INTEGER CONSTANT defaultValue='10' minvalue='1' maxvalue='15' ) returns INTEGER as 'org.apache.phoenix.end2end."+MY_SUM_CLASS_NAME+"' using jar "
+ tenant2Conn.createStatement().execute("create function myfunction(INTEGER, INTEGER CONSTANT defaultValue=10 minvalue=1 maxvalue=15 ) returns INTEGER as 'org.apache.phoenix.end2end."+MY_SUM_CLASS_NAME+"' using jar "
+ "'"+util.getConfiguration().get(DYNAMIC_JARS_DIR_KEY) + "/myjar2.jar"+"'");
try {
tenant2Conn.createStatement().execute("create function myfunction(VARCHAR) returns VARCHAR as 'org.apache.phoenix.end2end.UnknownClass' using jar "
@@ -424,7 +424,7 @@ public class UserDefinedFunctionsIT extends BaseOwnClusterIT{
conn.commit();
conn.createStatement().execute("create table t2(k integer primary key, k1 integer, lastname_reverse varchar)");
conn.commit();
- stmt.execute("create function mysum3(INTEGER, INTEGER CONSTANT defaultValue='10' minvalue='1' maxvalue='15' ) returns INTEGER as 'org.apache.phoenix.end2end."+MY_SUM_CLASS_NAME+"' using jar "
+ stmt.execute("create function mysum3(INTEGER, INTEGER CONSTANT defaultValue=10 minvalue=1 maxvalue=15 ) returns INTEGER as 'org.apache.phoenix.end2end."+MY_SUM_CLASS_NAME+"' using jar "
+ "'"+util.getConfiguration().get(DYNAMIC_JARS_DIR_KEY) + "/myjar2.jar"+"'");
stmt.execute("create function myreverse3(VARCHAR) returns VARCHAR as 'org.apache.phoenix.end2end."+MY_REVERSE_CLASS_NAME+"' using jar "
+ "'"+util.getConfiguration().get(DYNAMIC_JARS_DIR_KEY) + "/myjar1.jar"+"'");
@@ -458,7 +458,7 @@ public class UserDefinedFunctionsIT extends BaseOwnClusterIT{
conn.createStatement().execute("create table t4(k integer primary key, k1 integer, lastname varchar)");
stmt.execute("upsert into t4 values(1,1,'jock')");
conn.commit();
- stmt.execute("create function mysum(INTEGER, INTEGER CONSTANT defaultValue='10' minvalue='1' maxvalue='15' ) returns INTEGER as 'org.apache.phoenix.end2end."+MY_SUM_CLASS_NAME+"' using jar "
+ stmt.execute("create function mysum(INTEGER, INTEGER CONSTANT defaultValue=10 minvalue=1 maxvalue=15 ) returns INTEGER as 'org.apache.phoenix.end2end."+MY_SUM_CLASS_NAME+"' using jar "
+ "'"+util.getConfiguration().get(DYNAMIC_JARS_DIR_KEY) + "/myjar2.jar"+"'");
ResultSet rs = stmt.executeQuery("select mysum(k,12) from t4");
assertTrue(rs.next());
@@ -481,7 +481,7 @@ public class UserDefinedFunctionsIT extends BaseOwnClusterIT{
conn.createStatement().execute("create table t9(k integer primary key, k1 integer, lastname varchar)");
stmt.execute("upsert into t9 values(1,1,'jock')");
conn.commit();
- stmt.execute("create temporary function mysum9(INTEGER, INTEGER CONSTANT defaultValue='10' minvalue='1' maxvalue='15' ) returns INTEGER as 'org.apache.phoenix.end2end."+MY_SUM_CLASS_NAME+"' using jar "
+ stmt.execute("create temporary function mysum9(INTEGER, INTEGER CONSTANT defaultValue=10 minvalue=1 maxvalue=15 ) returns INTEGER as 'org.apache.phoenix.end2end."+MY_SUM_CLASS_NAME+"' using jar "
+ "'"+util.getConfiguration().get(DYNAMIC_JARS_DIR_KEY) + "/myjar2.jar"+"'");
ResultSet rs = stmt.executeQuery("select mysum9(k,12) from t9");
assertTrue(rs.next());
@@ -523,7 +523,7 @@ public class UserDefinedFunctionsIT extends BaseOwnClusterIT{
ResultSet rs = stmt.executeQuery(query);
rs.next();
int numRowsBefore = rs.getInt(1);
- stmt.execute("create function mysum6(INTEGER, INTEGER CONSTANT defaultValue='10' minvalue='1' maxvalue='15' ) returns INTEGER as 'org.apache.phoenix.end2end."+MY_SUM_CLASS_NAME+"' using jar "
+ stmt.execute("create function mysum6(INTEGER, INTEGER CONSTANT defaultValue=10 minvalue=1 maxvalue=15 ) returns INTEGER as 'org.apache.phoenix.end2end."+MY_SUM_CLASS_NAME+"' using jar "
+ "'"+util.getConfiguration().get(DYNAMIC_JARS_DIR_KEY) + "/myjar2.jar"+"'");
rs = stmt.executeQuery(query);
rs.next();
@@ -551,7 +551,7 @@ public class UserDefinedFunctionsIT extends BaseOwnClusterIT{
} catch(FunctionNotFoundException e) {
fail("FunctionNotFoundException should not be thrown");
}
- stmt.execute("create function mysum6(INTEGER, INTEGER CONSTANT defaultValue='10' minvalue='1' maxvalue='15' ) returns INTEGER as 'org.apache.phoenix.end2end."+MY_SUM_CLASS_NAME+"' using jar "
+ stmt.execute("create function mysum6(INTEGER, INTEGER CONSTANT defaultValue=10 minvalue=1 maxvalue=15 ) returns INTEGER as 'org.apache.phoenix.end2end."+MY_SUM_CLASS_NAME+"' using jar "
+ "'"+util.getConfiguration().get(DYNAMIC_JARS_DIR_KEY) + "/myjar2.jar"+"'");
try {
rs = stmt.executeQuery("select mysum6(k1) from t6");
http://git-wip-us.apache.org/repos/asf/phoenix/blob/b2fb04b0/phoenix-core/src/main/antlr3/PhoenixSQL.g
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/antlr3/PhoenixSQL.g b/phoenix-core/src/main/antlr3/PhoenixSQL.g
index 4f7cb34..2a8d11b 100644
--- a/phoenix-core/src/main/antlr3/PhoenixSQL.g
+++ b/phoenix-core/src/main/antlr3/PhoenixSQL.g
@@ -921,12 +921,13 @@ zero_or_more_data_types returns [List<FunctionArgument> ret]
;
function_argument returns [FunctionArgument ret]
- : (dt = identifier (LPAREN l=NUMBER (COMMA s=NUMBER)? RPAREN)? ar=ARRAY? (lsq=LSQUARE (a=NUMBER)? RSQUARE)? (c = CONSTANT)? (DEFAULTVALUE EQ dv = value_expression)? (MINVALUE EQ minv = value_expression)? (MAXVALUE EQ maxv = value_expression)?
+ : (dt = identifier (LPAREN l=NUMBER (COMMA s=NUMBER)? RPAREN)? ar=ARRAY? (lsq=LSQUARE (a=NUMBER)? RSQUARE)? (c = CONSTANT)? (DEFAULTVALUE EQ dv = expression)? (MINVALUE EQ minv = expression)? (MAXVALUE EQ maxv = expression)?
{ $ret = new FunctionArgument(dt, ar != null || lsq != null, c!=null,
dv == null ? null : LiteralExpression.newConstant(((LiteralParseNode)dv).getValue()),
minv == null ? null : LiteralExpression.newConstant(((LiteralParseNode)minv).getValue()),
maxv == null ? null : LiteralExpression.newConstant(((LiteralParseNode)maxv).getValue()));})
;
+
value_expression_list returns [List<ParseNode> ret]
@init{ret = new ArrayList<ParseNode>(); }
: LPAREN e = value_expression {$ret.add(e);} (COMMA e = value_expression {$ret.add(e);} )* RPAREN
http://git-wip-us.apache.org/repos/asf/phoenix/blob/b2fb04b0/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index dc1a3b4..1fc1d7f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -138,6 +138,7 @@ import org.apache.phoenix.index.IndexMaintainer;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
import org.apache.phoenix.metrics.Metrics;
+import org.apache.phoenix.parse.LiteralParseNode;
import org.apache.phoenix.parse.PFunction;
import org.apache.phoenix.parse.PFunction.FunctionArgument;
import org.apache.phoenix.protobuf.ProtobufUtil;
@@ -598,7 +599,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
}
private void addArgumentToFunction(List<Cell> results, PName functionName, PName type,
- Cell[] functionKeyValues, List<FunctionArgument> arguments, short argPosition) {
+ Cell[] functionKeyValues, List<FunctionArgument> arguments, short argPosition) throws SQLException {
int i = 0;
int j = 0;
while (i < results.size() && j < FUNCTION_ARG_KV_COLUMNS.size()) {
@@ -645,9 +646,9 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
maxValueKv.getValueLength());
FunctionArgument arg =
new FunctionArgument(type.getString(), isArrayType, isConstant,
- defaultValue == null ? null : LiteralExpression.newConstant(defaultValue),
- minValue == null ? null : LiteralExpression.newConstant(minValue),
- maxValue == null ? null : LiteralExpression.newConstant(maxValue),
+ defaultValue == null ? null : LiteralExpression.newConstant((new LiteralParseNode(defaultValue)).getValue()),
+ minValue == null ? null : LiteralExpression.newConstant((new LiteralParseNode(minValue)).getValue()),
+ maxValue == null ? null : LiteralExpression.newConstant((new LiteralParseNode(maxValue)).getValue()),
argPosition);
arguments.add(arg);
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/b2fb04b0/phoenix-core/src/main/java/org/apache/phoenix/parse/FunctionParseNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/FunctionParseNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/FunctionParseNode.java
index be52d89..0dd021b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/FunctionParseNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/FunctionParseNode.java
@@ -419,14 +419,11 @@ public class FunctionParseNode extends CompoundParseNode {
this.allowedTypes = new Class[] { dataType.getClass() };
this.isConstant = arg.isConstant();
this.defaultValue =
- arg.getDefaultValue() == null ? null : getExpFromConstant((String) arg
- .getDefaultValue().getValue());
+ arg.getDefaultValue() == null ? null : arg.getDefaultValue();
this.minValue =
- arg.getMinValue() == null ? null : getExpFromConstant((String) arg
- .getMinValue().getValue());
+ arg.getMinValue() == null ? null : arg.getMinValue();
this.maxValue =
- arg.getMaxValue() == null ? null : getExpFromConstant((String) arg
- .getMaxValue().getValue());
+ arg.getMaxValue() == null ? null : arg.getMaxValue();
}
private LiteralExpression getExpFromConstant(String strValue) {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/b2fb04b0/phoenix-core/src/main/java/org/apache/phoenix/parse/PFunction.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/PFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/PFunction.java
index 8a95ae7..a1413de 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/PFunction.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/PFunction.java
@@ -30,6 +30,8 @@ import org.apache.phoenix.schema.PMetaDataEntity;
import org.apache.phoenix.schema.PName;
import org.apache.phoenix.schema.PNameFactory;
import org.apache.phoenix.schema.PTableKey;
+import org.apache.phoenix.schema.types.PDataType;
+import org.apache.phoenix.util.SchemaUtil;
import org.apache.phoenix.util.SizedUtil;
public class PFunction implements PMetaDataEntity {
@@ -233,14 +235,18 @@ public class PFunction implements PMetaDataEntity {
for(PFunctionArg arg: function.getArgumentsList()) {
String argType = arg.getArgumentType();
boolean isArrayType = arg.hasIsArrayType()?arg.getIsArrayType():false;
+ PDataType dataType = isArrayType ? PDataType.fromTypeId(PDataType
+ .sqlArrayType(SchemaUtil.normalizeIdentifier(SchemaUtil
+ .normalizeIdentifier(argType)))) : PDataType
+ .fromSqlTypeName(SchemaUtil.normalizeIdentifier(argType));
boolean isConstant = arg.hasIsConstant()?arg.getIsConstant():false;
String defaultValue = arg.hasDefaultValue()?arg.getDefaultValue():null;
String minValue = arg.hasMinValue()?arg.getMinValue():null;
String maxValue = arg.hasMaxValue()?arg.getMaxValue():null;
args.add(new FunctionArgument(argType, isArrayType, isConstant,
- defaultValue == null ? null : LiteralExpression.newConstant(defaultValue),
- minValue == null ? null : LiteralExpression.newConstant(minValue),
- maxValue == null ? null : LiteralExpression.newConstant(maxValue)));
+ defaultValue == null ? null : LiteralExpression.newConstant((new LiteralParseNode(dataType.toObject(defaultValue))).getValue()),
+ minValue == null ? null : LiteralExpression.newConstant((new LiteralParseNode(dataType.toObject(minValue))).getValue()),
+ maxValue == null ? null : LiteralExpression.newConstant((new LiteralParseNode(dataType.toObject(maxValue))).getValue())));
}
return new PFunction(tenantId,functionName, args, returnType, className, jarPath, timeStamp);
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/b2fb04b0/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index 0ad9b56..f24da44 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -680,9 +680,9 @@ public class MetaDataClient {
argUpsert.setBytes(4, bytes);
argUpsert.setBoolean(5, arg.isArrayType());
argUpsert.setBoolean(6, arg.isConstant());
- argUpsert.setString(7, arg.getDefaultValue() == null? null: (String)arg.getDefaultValue().getValue());
- argUpsert.setString(8, arg.getMinValue() == null? null: (String)arg.getMinValue().getValue());
- argUpsert.setString(9, arg.getMaxValue() == null? null: (String)arg.getMaxValue().getValue());
+ argUpsert.setString(7, arg.getDefaultValue() == null? null: arg.getDefaultValue().toString());
+ argUpsert.setString(8, arg.getMinValue() == null? null: arg.getMinValue().toString());
+ argUpsert.setString(9, arg.getMaxValue() == null? null: arg.getMaxValue().toString());
argUpsert.execute();
}
[04/47] phoenix git commit: PHOENIX-1981 : PhoenixHBase Load and
Store Funcs should handle all Pig data types
Posted by ma...@apache.org.
PHOENIX-1981 : PhoenixHBase Load and Store Funcs should handle all Pig data types
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/8076126a
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/8076126a
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/8076126a
Branch: refs/heads/calcite
Commit: 8076126a741a0cf2a5839b88904fa08bfdfb6cdb
Parents: b61ef77
Author: Prashant Kommireddi <pk...@pkommireddi-ltm.internal.salesforce.com>
Authored: Mon May 18 19:41:08 2015 -0700
Committer: Eli Levine <el...@apache.org>
Committed: Mon Jun 15 18:17:44 2015 -0700
----------------------------------------------------------------------
.../org/apache/phoenix/pig/util/TypeUtil.java | 415 +++++++++----------
.../apache/phoenix/pig/util/TypeUtilTest.java | 52 +++
2 files changed, 251 insertions(+), 216 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/8076126a/phoenix-pig/src/main/java/org/apache/phoenix/pig/util/TypeUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-pig/src/main/java/org/apache/phoenix/pig/util/TypeUtil.java b/phoenix-pig/src/main/java/org/apache/phoenix/pig/util/TypeUtil.java
index bdee3a4..6549445 100644
--- a/phoenix-pig/src/main/java/org/apache/phoenix/pig/util/TypeUtil.java
+++ b/phoenix-pig/src/main/java/org/apache/phoenix/pig/util/TypeUtil.java
@@ -1,19 +1,11 @@
/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE
+ * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by
+ * applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language
+ * governing permissions and limitations under the License.
*/
package org.apache.phoenix.pig.util;
@@ -29,11 +21,11 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.phoenix.pig.writable.PhoenixPigDBWritable;
import org.apache.phoenix.schema.types.PBinary;
-import org.apache.phoenix.schema.types.PChar;
-import org.apache.phoenix.schema.types.PDecimal;
import org.apache.phoenix.schema.types.PBoolean;
+import org.apache.phoenix.schema.types.PChar;
import org.apache.phoenix.schema.types.PDataType;
import org.apache.phoenix.schema.types.PDate;
+import org.apache.phoenix.schema.types.PDecimal;
import org.apache.phoenix.schema.types.PDouble;
import org.apache.phoenix.schema.types.PFloat;
import org.apache.phoenix.schema.types.PInteger;
@@ -56,7 +48,6 @@ import org.apache.phoenix.schema.types.PVarchar;
import org.apache.pig.PigException;
import org.apache.pig.ResourceSchema.ResourceFieldSchema;
import org.apache.pig.backend.hadoop.hbase.HBaseBinaryConverter;
-import org.apache.pig.builtin.Utf8StorageConverter;
import org.apache.pig.data.DataByteArray;
import org.apache.pig.data.DataType;
import org.apache.pig.data.Tuple;
@@ -68,258 +59,250 @@ import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableMap.Builder;
public final class TypeUtil {
-
+
private static final Log LOG = LogFactory.getLog(TypeUtil.class);
- private static final HBaseBinaryConverter binaryConverter = new HBaseBinaryConverter ();
- private static final ImmutableMap<PDataType,Byte> phoenixTypeToPigDataType = init();
-
- private TypeUtil(){
- }
-
- /**
- * A map of Phoenix to Pig data types.
- * @return
- */
- private static ImmutableMap<PDataType, Byte> init() {
- final ImmutableMap.Builder<PDataType,Byte> builder = new Builder<PDataType,Byte> ();
- builder.put(PLong.INSTANCE,DataType.LONG);
- builder.put(PVarbinary.INSTANCE,DataType.BYTEARRAY);
- builder.put(PChar.INSTANCE,DataType.CHARARRAY);
- builder.put(PVarchar.INSTANCE,DataType.CHARARRAY);
- builder.put(PDouble.INSTANCE,DataType.DOUBLE);
- builder.put(PFloat.INSTANCE,DataType.FLOAT);
- builder.put(PInteger.INSTANCE,DataType.INTEGER);
- builder.put(PTinyint.INSTANCE,DataType.INTEGER);
- builder.put(PSmallint.INSTANCE,DataType.INTEGER);
- builder.put(PDecimal.INSTANCE,DataType.BIGDECIMAL);
- builder.put(PTime.INSTANCE,DataType.DATETIME);
- builder.put(PTimestamp.INSTANCE,DataType.DATETIME);
- builder.put(PBoolean.INSTANCE,DataType.BOOLEAN);
- builder.put(PDate.INSTANCE,DataType.DATETIME);
- builder.put(PUnsignedDate.INSTANCE,DataType.DATETIME);
- builder.put(PUnsignedDouble.INSTANCE,DataType.DOUBLE);
- builder.put(PUnsignedFloat.INSTANCE,DataType.FLOAT);
- builder.put(PUnsignedInt.INSTANCE,DataType.INTEGER);
- builder.put(PUnsignedLong.INSTANCE,DataType.LONG);
- builder.put(PUnsignedSmallint.INSTANCE,DataType.INTEGER);
- builder.put(PUnsignedTime.INSTANCE,DataType.DATETIME);
- builder.put(PUnsignedTimestamp.INSTANCE,DataType.DATETIME);
- builder.put(PUnsignedTinyint.INSTANCE,DataType.INTEGER);
+ private static final HBaseBinaryConverter BINARY_CONVERTER = new HBaseBinaryConverter();
+ private static final ImmutableMap<PDataType, Byte> PHOENIX_TO_PIG_TYPE = init();
+ private static final TupleFactory TUPLE_FACTORY = TupleFactory.getInstance();
+
+ private TypeUtil() {}
+
+ /**
+ * A map of Phoenix to Pig data types.
+ *
+ * @return
+ */
+ private static ImmutableMap<PDataType, Byte> init() {
+ final ImmutableMap.Builder<PDataType, Byte> builder = new Builder<PDataType, Byte>();
+ builder.put(PLong.INSTANCE, DataType.LONG);
+ builder.put(PVarbinary.INSTANCE, DataType.BYTEARRAY);
+ builder.put(PChar.INSTANCE, DataType.CHARARRAY);
+ builder.put(PVarchar.INSTANCE, DataType.CHARARRAY);
+ builder.put(PDouble.INSTANCE, DataType.DOUBLE);
+ builder.put(PFloat.INSTANCE, DataType.FLOAT);
+ builder.put(PInteger.INSTANCE, DataType.INTEGER);
+ builder.put(PTinyint.INSTANCE, DataType.INTEGER);
+ builder.put(PSmallint.INSTANCE, DataType.INTEGER);
+ builder.put(PDecimal.INSTANCE, DataType.BIGDECIMAL);
+ builder.put(PTime.INSTANCE, DataType.DATETIME);
+ builder.put(PTimestamp.INSTANCE, DataType.DATETIME);
+ builder.put(PBoolean.INSTANCE, DataType.BOOLEAN);
+ builder.put(PDate.INSTANCE, DataType.DATETIME);
+ builder.put(PUnsignedDate.INSTANCE, DataType.DATETIME);
+ builder.put(PUnsignedDouble.INSTANCE, DataType.DOUBLE);
+ builder.put(PUnsignedFloat.INSTANCE, DataType.FLOAT);
+ builder.put(PUnsignedInt.INSTANCE, DataType.INTEGER);
+ builder.put(PUnsignedLong.INSTANCE, DataType.LONG);
+ builder.put(PUnsignedSmallint.INSTANCE, DataType.INTEGER);
+ builder.put(PUnsignedTime.INSTANCE, DataType.DATETIME);
+ builder.put(PUnsignedTimestamp.INSTANCE, DataType.DATETIME);
+ builder.put(PUnsignedTinyint.INSTANCE, DataType.INTEGER);
return builder.build();
}
+
+ /**
+ * This method returns the most appropriate PDataType associated with the incoming Pig type. Note for Pig DataType
+ * DATETIME, returns DATE as inferredSqlType. This is later used to make a cast to targetPhoenixType accordingly.
+ * See {@link #castPigTypeToPhoenix(Object, byte, PDataType)}
+ *
+ * @param obj
+ * @return PDataType
+ */
+ public static PDataType getType(Object obj, byte type) {
+ if (obj == null) { return null; }
+ PDataType sqlType;
+
+ switch (type) {
+ case DataType.BYTEARRAY:
+ sqlType = PVarbinary.INSTANCE;
+ break;
+ case DataType.CHARARRAY:
+ sqlType = PVarchar.INSTANCE;
+ break;
+ case DataType.DOUBLE:
+ case DataType.BIGDECIMAL:
+ sqlType = PDouble.INSTANCE;
+ break;
+ case DataType.FLOAT:
+ sqlType = PFloat.INSTANCE;
+ break;
+ case DataType.INTEGER:
+ sqlType = PInteger.INSTANCE;
+ break;
+ case DataType.LONG:
+ case DataType.BIGINTEGER:
+ sqlType = PLong.INSTANCE;
+ break;
+ case DataType.BOOLEAN:
+ sqlType = PBoolean.INSTANCE;
+ break;
+ case DataType.DATETIME:
+ sqlType = PDate.INSTANCE;
+ break;
+ case DataType.BYTE:
+ sqlType = PTinyint.INSTANCE;
+ break;
+ default:
+ throw new RuntimeException("Unknown type " + obj.getClass().getName() + " passed to PhoenixHBaseStorage");
+ }
+
+ return sqlType;
+
+ }
+
/**
- * This method returns the most appropriate PDataType associated with
- * the incoming Pig type. Note for Pig DataType DATETIME, returns DATE as
- * inferredSqlType.
- *
- * This is later used to make a cast to targetPhoenixType accordingly. See
- * {@link #castPigTypeToPhoenix(Object, byte, PDataType)}
- *
- * @param obj
- * @return PDataType
- */
- public static PDataType getType(Object obj, byte type) {
- if (obj == null) {
- return null;
- }
- PDataType sqlType;
+ * This method encodes a value with Phoenix data type. It begins with checking whether an object is BINARY and makes
+ * a call to {@link #castBytes(Object, PDataType)} to convery bytes to targetPhoenixType
+ *
+ * @param o
+ * @param targetPhoenixType
+ * @return Object
+ */
+ public static Object castPigTypeToPhoenix(Object o, byte objectType, PDataType targetPhoenixType) {
+ PDataType inferredPType = getType(o, objectType);
- switch (type) {
- case DataType.BYTEARRAY:
- sqlType = PVarbinary.INSTANCE;
- break;
- case DataType.CHARARRAY:
- sqlType = PVarchar.INSTANCE;
- break;
- case DataType.DOUBLE:
- case DataType.BIGDECIMAL:
- sqlType = PDouble.INSTANCE;
- break;
- case DataType.FLOAT:
- sqlType = PFloat.INSTANCE;
- break;
- case DataType.INTEGER:
- sqlType = PInteger.INSTANCE;
- break;
- case DataType.LONG:
- case DataType.BIGINTEGER:
- sqlType = PLong.INSTANCE;
- break;
- case DataType.BOOLEAN:
- sqlType = PBoolean.INSTANCE;
- break;
- case DataType.DATETIME:
- sqlType = PDate.INSTANCE;
- break;
- case DataType.BYTE:
- sqlType = PTinyint.INSTANCE;
- break;
- default:
- throw new RuntimeException("Unknown type " + obj.getClass().getName()
- + " passed to PhoenixHBaseStorage");
- }
+ if (inferredPType == null) { return null; }
- return sqlType;
+ if (inferredPType == PVarbinary.INSTANCE) {
+ try {
+ o = castBytes(o, targetPhoenixType);
+ if (targetPhoenixType != PVarbinary.INSTANCE && targetPhoenixType != PBinary.INSTANCE) {
+ inferredPType = getType(o, DataType.findType(o));
+ }
+ } catch (IOException e) {
+ throw new RuntimeException("Error while casting bytes for object " + o);
+ }
+ }
+ if (inferredPType == PDate.INSTANCE) {
+ int inferredSqlType = targetPhoenixType.getSqlType();
- }
+ if (inferredSqlType == Types.DATE) { return new Date(((DateTime)o).getMillis()); }
+ if (inferredSqlType == Types.TIME) { return new Time(((DateTime)o).getMillis()); }
+ if (inferredSqlType == Types.TIMESTAMP) { return new Timestamp(((DateTime)o).getMillis()); }
+ }
- /**
- * This method encodes a value with Phoenix data type. It begins
- * with checking whether an object is BINARY and makes a call to
- * {@link #castBytes(Object, PDataType)} to convery bytes to
- * targetPhoenixType
- *
- * @param o
- * @param targetPhoenixType
- * @return Object
- */
- public static Object castPigTypeToPhoenix(Object o, byte objectType, PDataType targetPhoenixType) {
- PDataType inferredPType = getType(o, objectType);
-
- if(inferredPType == null) {
- return null;
- }
+ if (targetPhoenixType == inferredPType || inferredPType.isCoercibleTo(targetPhoenixType)) { return inferredPType
+ .toObject(o, targetPhoenixType); }
- if(inferredPType == PVarbinary.INSTANCE) {
- try {
- o = castBytes(o, targetPhoenixType);
- if(targetPhoenixType != PVarbinary.INSTANCE && targetPhoenixType != PBinary.INSTANCE) {
- inferredPType = getType(o, DataType.findType(o));
- }
- } catch (IOException e) {
- throw new RuntimeException("Error while casting bytes for object " +o);
- }
- }
- if(inferredPType == PDate.INSTANCE) {
- int inferredSqlType = targetPhoenixType.getSqlType();
+ throw new RuntimeException(o.getClass().getName() + " cannot be coerced to " + targetPhoenixType.toString());
+ }
- if(inferredSqlType == Types.DATE) {
- return new Date(((DateTime)o).getMillis());
- }
- if(inferredSqlType == Types.TIME) {
- return new Time(((DateTime)o).getMillis());
- }
- if(inferredSqlType == Types.TIMESTAMP) {
- return new Timestamp(((DateTime)o).getMillis());
- }
- }
-
- if (targetPhoenixType == inferredPType || inferredPType.isCoercibleTo(targetPhoenixType)) {
- return inferredPType.toObject(o, targetPhoenixType);
- }
-
- throw new RuntimeException(o.getClass().getName()
- + " cannot be coerced to "+targetPhoenixType.toString());
- }
-
- /**
- * This method converts bytes to the target type required
- * for Phoenix. It uses {@link Utf8StorageConverter} for
- * the conversion.
- *
- * @param o
- * @param targetPhoenixType
- * @return Object
- * @throws IOException
- */
- private static Object castBytes(Object o, PDataType targetPhoenixType) throws IOException {
+ /**
+ * This method converts bytes to the target type required for Phoenix. It uses {@link HBaseBinaryConverter} for the
+ * conversion.
+ *
+ * @param o
+ * @param targetPhoenixType
+ * @return Object
+ * @throws IOException
+ */
+ private static Object castBytes(Object o, PDataType targetPhoenixType) throws IOException {
byte[] bytes = ((DataByteArray)o).get();
if (PDataType.equalsAny(targetPhoenixType, PChar.INSTANCE, PVarchar.INSTANCE)) {
- return binaryConverter.bytesToCharArray(bytes);
+ return BINARY_CONVERTER.bytesToCharArray(bytes);
} else if (PDataType.equalsAny(targetPhoenixType, PUnsignedSmallint.INSTANCE, PSmallint.INSTANCE)) {
- return binaryConverter.bytesToInteger(bytes).shortValue();
+ return BINARY_CONVERTER.bytesToInteger(bytes).shortValue();
} else if (PDataType.equalsAny(targetPhoenixType, PUnsignedTinyint.INSTANCE, PTinyint.INSTANCE)) {
- return binaryConverter.bytesToInteger(bytes).byteValue();
+ return BINARY_CONVERTER.bytesToInteger(bytes).byteValue();
} else if (PDataType.equalsAny(targetPhoenixType, PUnsignedInt.INSTANCE, PInteger.INSTANCE)) {
- return binaryConverter.bytesToInteger(bytes);
+ return BINARY_CONVERTER.bytesToInteger(bytes);
} else if (targetPhoenixType.equals(PBoolean.INSTANCE)) {
- return binaryConverter.bytesToBoolean(bytes);
+ return BINARY_CONVERTER.bytesToBoolean(bytes);
} else if (PDataType.equalsAny(targetPhoenixType, PFloat.INSTANCE, PUnsignedFloat.INSTANCE)) {
- return binaryConverter.bytesToFloat(bytes);
+ return BINARY_CONVERTER.bytesToFloat(bytes);
} else if (PDataType.equalsAny(targetPhoenixType, PDouble.INSTANCE, PUnsignedDouble.INSTANCE)) {
- return binaryConverter.bytesToDouble(bytes);
+ return BINARY_CONVERTER.bytesToDouble(bytes);
} else if (PDataType.equalsAny(targetPhoenixType, PUnsignedLong.INSTANCE, PLong.INSTANCE)) {
- return binaryConverter.bytesToLong(bytes);
+ return BINARY_CONVERTER.bytesToLong(bytes);
} else if (PDataType.equalsAny(targetPhoenixType, PVarbinary.INSTANCE, PBinary.INSTANCE)) {
return bytes;
} else {
return o;
- }
+ }
}
-
+
/**
* Transforms the PhoenixRecord to Pig {@link Tuple}.
+ *
* @param record
* @param projectedColumns
* @return
* @throws IOException
*/
- public static Tuple transformToTuple(final PhoenixPigDBWritable record, final ResourceFieldSchema[] projectedColumns) throws IOException {
-
+ public static Tuple transformToTuple(final PhoenixPigDBWritable record, final ResourceFieldSchema[] projectedColumns)
+ throws IOException {
+
List<Object> columnValues = record.getValues();
- if(columnValues == null || columnValues.size() == 0 || projectedColumns == null || projectedColumns.length != columnValues.size()) {
- return null;
- }
- int columns = columnValues.size();
- Tuple tuple = TupleFactory.getInstance().newTuple(columns);
+ if (columnValues == null || columnValues.size() == 0 || projectedColumns == null
+ || projectedColumns.length != columnValues.size()) { return null; }
+ int numColumns = columnValues.size();
+ Tuple tuple = TUPLE_FACTORY.newTuple(numColumns);
try {
- for(int i = 0 ; i < columns ; i++) {
+ for (int i = 0; i < numColumns; i++) {
final ResourceFieldSchema fieldSchema = projectedColumns[i];
Object object = columnValues.get(i);
if (object == null) {
tuple.set(i, null);
continue;
}
-
- switch(fieldSchema.getType()) {
- case DataType.BYTEARRAY:
- byte[] bytes = PDataType.fromTypeId(PBinary.INSTANCE.getSqlType()).toBytes(object);
- tuple.set(i,new DataByteArray(bytes,0,bytes.length));
- break;
- case DataType.CHARARRAY:
- tuple.set(i,DataType.toString(object));
- break;
- case DataType.DOUBLE:
- tuple.set(i,DataType.toDouble(object));
- break;
- case DataType.FLOAT:
- tuple.set(i,DataType.toFloat(object));
- break;
- case DataType.INTEGER:
- tuple.set(i,DataType.toInteger(object));
- break;
- case DataType.LONG:
- tuple.set(i,DataType.toLong(object));
- break;
- case DataType.BOOLEAN:
- tuple.set(i,DataType.toBoolean(object));
- break;
- case DataType.DATETIME:
- tuple.set(i,DataType.toDateTime(object));
- break;
- default:
- throw new RuntimeException(String.format(" Not supported [%s] pig type" , fieldSchema));
+
+ switch (fieldSchema.getType()) {
+ case DataType.BYTEARRAY:
+ byte[] bytes = PDataType.fromTypeId(PBinary.INSTANCE.getSqlType()).toBytes(object);
+ tuple.set(i, new DataByteArray(bytes, 0, bytes.length));
+ break;
+ case DataType.CHARARRAY:
+ tuple.set(i, DataType.toString(object));
+ break;
+ case DataType.DOUBLE:
+ tuple.set(i, DataType.toDouble(object));
+ break;
+ case DataType.FLOAT:
+ tuple.set(i, DataType.toFloat(object));
+ break;
+ case DataType.INTEGER:
+ tuple.set(i, DataType.toInteger(object));
+ break;
+ case DataType.LONG:
+ tuple.set(i, DataType.toLong(object));
+ break;
+ case DataType.BOOLEAN:
+ tuple.set(i, DataType.toBoolean(object));
+ break;
+ case DataType.DATETIME:
+ tuple.set(i, DataType.toDateTime(object));
+ break;
+ case DataType.BIGDECIMAL:
+ tuple.set(i, DataType.toBigDecimal(object));
+ break;
+ case DataType.BIGINTEGER:
+ tuple.set(i, DataType.toBigInteger(object));
+ break;
+ default:
+ throw new RuntimeException(String.format(" Not supported [%s] pig type", fieldSchema));
}
}
- } catch( Exception ex) {
+ } catch (Exception ex) {
final String errorMsg = String.format(" Error transforming PhoenixRecord to Tuple [%s] ", ex.getMessage());
LOG.error(errorMsg);
throw new PigException(errorMsg);
}
- return tuple;
+ return tuple;
}
-
+
/**
* Returns the mapping pig data type for a given phoenix data type.
+ *
* @param phoenixDataType
* @return
*/
public static Byte getPigDataTypeForPhoenixType(final PDataType phoenixDataType) {
Preconditions.checkNotNull(phoenixDataType);
- final Byte pigDataType = phoenixTypeToPigDataType.get(phoenixDataType);
- if(LOG.isDebugEnabled()) {
- LOG.debug(String.format(" For PhoenixDataType [%s] , pigDataType is [%s] " , phoenixDataType.getSqlTypeName() , DataType.findTypeName(pigDataType)));
+ final Byte pigDataType = PHOENIX_TO_PIG_TYPE.get(phoenixDataType);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug(String.format(" For PhoenixDataType [%s] , pigDataType is [%s] ",
+ phoenixDataType.getSqlTypeName(), DataType.findTypeName(pigDataType)));
}
return pigDataType;
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/8076126a/phoenix-pig/src/test/java/org/apache/phoenix/pig/util/TypeUtilTest.java
----------------------------------------------------------------------
diff --git a/phoenix-pig/src/test/java/org/apache/phoenix/pig/util/TypeUtilTest.java b/phoenix-pig/src/test/java/org/apache/phoenix/pig/util/TypeUtilTest.java
new file mode 100644
index 0000000..25d9f48
--- /dev/null
+++ b/phoenix-pig/src/test/java/org/apache/phoenix/pig/util/TypeUtilTest.java
@@ -0,0 +1,52 @@
+package org.apache.phoenix.pig.util;
+
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import java.math.BigDecimal;
+import java.math.BigInteger;
+import java.util.List;
+
+import org.apache.phoenix.pig.writable.PhoenixPigDBWritable;
+import org.apache.pig.ResourceSchema.ResourceFieldSchema;
+import org.apache.pig.data.DataType;
+import org.apache.pig.data.Tuple;
+import org.junit.Test;
+
+import com.google.common.collect.Lists;
+
+public class TypeUtilTest {
+
+ @Test
+ public void testTransformToTuple() throws Exception {
+ PhoenixPigDBWritable record = mock(PhoenixPigDBWritable.class);
+ List<Object> values = Lists.newArrayList();
+ values.add("213123");
+ values.add(1231123);
+ values.add(31231231232131L);
+ values.add("bytearray".getBytes());
+ when(record.getValues()).thenReturn(values);
+
+ ResourceFieldSchema field = new ResourceFieldSchema().setType(DataType.CHARARRAY);
+ ResourceFieldSchema field1 = new ResourceFieldSchema().setType(DataType.INTEGER);
+ ResourceFieldSchema field2 = new ResourceFieldSchema().setType(DataType.LONG);
+ ResourceFieldSchema field3 = new ResourceFieldSchema().setType(DataType.BYTEARRAY);
+ ResourceFieldSchema[] projectedColumns = { field, field1, field2, field3 };
+
+ Tuple t = TypeUtil.transformToTuple(record, projectedColumns);
+
+ assertEquals(DataType.LONG, DataType.findType(t.get(2)));
+
+ field = new ResourceFieldSchema().setType(DataType.BIGDECIMAL);
+ field1 = new ResourceFieldSchema().setType(DataType.BIGINTEGER);
+ values.clear();
+ values.add(new BigDecimal(123123123.123213));
+ values.add(new BigInteger("1312313231312"));
+ ResourceFieldSchema[] columns = { field, field1 };
+ t = TypeUtil.transformToTuple(record, columns);
+
+ assertEquals(DataType.BIGDECIMAL, DataType.findType(t.get(0)));
+ assertEquals(DataType.BIGINTEGER, DataType.findType(t.get(1)));
+ }
+}
[20/47] phoenix git commit: PHOENIX-2021 - Implement ARRAY_CAT built
in function (Dumindu Buddhika)
Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7385899d/phoenix-core/src/test/java/org/apache/phoenix/expression/ArrayConcatFunctionTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/ArrayConcatFunctionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/ArrayConcatFunctionTest.java
new file mode 100644
index 0000000..75d0827
--- /dev/null
+++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/ArrayConcatFunctionTest.java
@@ -0,0 +1,584 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.phoenix.expression;
+
+import static org.junit.Assert.assertEquals;
+
+import java.math.BigDecimal;
+import java.sql.Date;
+import java.sql.SQLException;
+import java.util.List;
+
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.phoenix.exception.DataExceedsCapacityException;
+import org.apache.phoenix.expression.function.ArrayConcatFunction;
+import org.apache.phoenix.schema.SortOrder;
+import org.apache.phoenix.schema.TypeMismatchException;
+import org.apache.phoenix.schema.types.*;
+import org.junit.Test;
+
+import com.google.common.collect.Lists;
+
+public class ArrayConcatFunctionTest {
+
+ private static void testExpression(LiteralExpression array1, LiteralExpression array2, PhoenixArray expected)
+ throws SQLException {
+ List<Expression> expressions = Lists.newArrayList((Expression) array1);
+ expressions.add(array2);
+
+ Expression arrayConcatFunction = new ArrayConcatFunction(expressions);
+ ImmutableBytesWritable ptr = new ImmutableBytesWritable();
+ arrayConcatFunction.evaluate(null, ptr);
+ PhoenixArray result = (PhoenixArray) arrayConcatFunction.getDataType().toObject(ptr, expressions.get(0).getSortOrder(), array1.getMaxLength(), array1.getScale());
+ assertEquals(expected, result);
+ }
+
+ private static void test(PhoenixArray array1, PhoenixArray array2, PDataType array1DataType, Integer arr1MaxLen, Integer arr1Scale, PDataType array2DataType, Integer arr2MaxLen, Integer arr2Scale, PhoenixArray expected, SortOrder array1SortOrder, SortOrder array2SortOrder) throws SQLException {
+ LiteralExpression array1Literal, array2Literal;
+ array1Literal = LiteralExpression.newConstant(array1, array1DataType, arr1MaxLen, arr1Scale, array1SortOrder, Determinism.ALWAYS);
+ array2Literal = LiteralExpression.newConstant(array2, array2DataType, arr2MaxLen, arr2Scale, array2SortOrder, Determinism.ALWAYS);
+ testExpression(array1Literal, array2Literal, expected);
+ }
+
+ @Test
+ public void testChar1() throws SQLException {
+ Object[] o1 = new Object[]{"aa", "bb"};
+ Object[] o2 = new Object[]{"c", "d"};
+ Object[] e = new Object[]{"aa", "bb", "c", "d"};
+ PDataType type = PCharArray.INSTANCE;
+ PDataType base = PChar.INSTANCE;
+
+ PhoenixArray arr1 = new PhoenixArray(base, o1);
+ PhoenixArray arr2 = new PhoenixArray(base, o2);
+ PhoenixArray expected = new PhoenixArray(base, e);
+ test(arr1, arr2, type, 2, null, type, 1, null, expected, SortOrder.ASC, SortOrder.ASC);
+ test(arr1, arr2, type, 2, null, type, 1, null, expected, SortOrder.DESC, SortOrder.DESC);
+ test(arr1, arr2, type, 2, null, type, 1, null, expected, SortOrder.ASC, SortOrder.DESC);
+ test(arr1, arr2, type, 2, null, type, 1, null, expected, SortOrder.DESC, SortOrder.ASC);
+
+ }
+
+ @Test
+ public void testChar2() throws SQLException {
+ Object[] o1 = new Object[]{"aa", "bb"};
+ Object[] o2 = new Object[]{"cc", "dc", "ee"};
+ Object[] e = new Object[]{"aa", "bb", "cc", "dc", "ee"};
+ PDataType type = PCharArray.INSTANCE;
+ PDataType base = PChar.INSTANCE;
+
+ PhoenixArray arr1 = new PhoenixArray(base, o1);
+ PhoenixArray arr2 = new PhoenixArray(base, o2);
+ PhoenixArray expected = new PhoenixArray(base, e);
+ test(arr1, arr2, type, 2, null, type, 2, null, expected, SortOrder.ASC, SortOrder.ASC);
+ test(arr1, arr2, type, 2, null, type, 2, null, expected, SortOrder.ASC, SortOrder.DESC);
+ test(arr1, arr2, type, 2, null, type, 2, null, expected, SortOrder.DESC, SortOrder.DESC);
+ test(arr1, arr2, type, 2, null, type, 2, null, expected, SortOrder.DESC, SortOrder.ASC);
+
+ }
+
+ @Test(expected = DataExceedsCapacityException.class)
+ public void testChar3() throws SQLException {
+ Object[] o1 = new Object[]{"c", "d"};
+ Object[] o2 = new Object[]{"aa", "bb"};
+ Object[] e = new Object[]{"aa", "bb", "c", "d"};
+ PDataType type = PCharArray.INSTANCE;
+ PDataType base = PChar.INSTANCE;
+
+ PhoenixArray arr1 = new PhoenixArray(base, o1);
+ PhoenixArray arr2 = new PhoenixArray(base, o2);
+ PhoenixArray expected = new PhoenixArray(base, e);
+ test(arr1, arr2, type, 2, null, type, 1, null, expected, SortOrder.ASC, SortOrder.ASC);
+ test(arr1, arr2, type, 2, null, type, 1, null, expected, SortOrder.DESC, SortOrder.DESC);
+ test(arr1, arr2, type, 2, null, type, 1, null, expected, SortOrder.ASC, SortOrder.DESC);
+ test(arr1, arr2, type, 2, null, type, 1, null, expected, SortOrder.DESC, SortOrder.ASC);
+ }
+
+ @Test
+ public void testInt1() throws SQLException {
+ Object[] o1 = new Object[]{1, 2};
+ Object[] o2 = new Object[]{5, 6, 7};
+ Object[] e = new Object[]{1, 2, 5, 6, 7};
+ PDataType type = PIntegerArray.INSTANCE;
+ PDataType base = PInteger.INSTANCE;
+
+ PhoenixArray arr1 = new PhoenixArray.PrimitiveIntPhoenixArray(base, o1);
+ PhoenixArray arr2 = new PhoenixArray.PrimitiveIntPhoenixArray(base, o2);
+ PhoenixArray expected = new PhoenixArray.PrimitiveIntPhoenixArray(base, e);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.DESC);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.DESC);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.ASC);
+ }
+
+ @Test
+ public void testFloat1() throws SQLException {
+ Object[] o1 = new Object[]{(float) 1.2, (float) 2};
+ Object[] o2 = new Object[]{(float) 5, (float) 6, (float) 7};
+ Object[] e = new Object[]{(float) 1.2, (float) 2, (float) 5, (float) 6, (float) 7};
+ PDataType type = PFloatArray.INSTANCE;
+ PDataType base = PFloat.INSTANCE;
+
+ PhoenixArray arr1 = new PhoenixArray.PrimitiveFloatPhoenixArray(base, o1);
+ PhoenixArray arr2 = new PhoenixArray.PrimitiveFloatPhoenixArray(base, o2);
+ PhoenixArray expected = new PhoenixArray.PrimitiveFloatPhoenixArray(base, e);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC);
+ }
+
+ @Test
+ public void testDouble1() throws SQLException {
+ Object[] o1 = new Object[]{(double) 1.2, (double) 2};
+ Object[] o2 = new Object[]{(double) 5.2, (double) 6, (double) 7};
+ Object[] e = new Object[]{(double) 1.2, (double) 2, (double) 5.2, (double) 6, (double) 7};
+ PDataType type = PDoubleArray.INSTANCE;
+ PDataType base = PDouble.INSTANCE;
+
+ PhoenixArray arr1 = new PhoenixArray.PrimitiveDoublePhoenixArray(base, o1);
+ PhoenixArray arr2 = new PhoenixArray.PrimitiveDoublePhoenixArray(base, o2);
+ PhoenixArray expected = new PhoenixArray.PrimitiveDoublePhoenixArray(base, e);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC);
+ }
+
+ @Test
+ public void testLong1() throws SQLException {
+ Object[] o1 = new Object[]{(long) 1, (long) 2};
+ Object[] o2 = new Object[]{(long) 5, (long) 6, (long) 7};
+ Object[] e = new Object[]{(long) 1, (long) 2, (long) 5, (long) 6, (long) 7};
+ PDataType type = PLongArray.INSTANCE;
+ PDataType base = PLong.INSTANCE;
+
+ PhoenixArray arr1 = new PhoenixArray.PrimitiveLongPhoenixArray(base, o1);
+ PhoenixArray arr2 = new PhoenixArray.PrimitiveLongPhoenixArray(base, o2);
+ PhoenixArray expected = new PhoenixArray.PrimitiveLongPhoenixArray(base, e);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.DESC);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.DESC);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.ASC);
+ }
+
+ @Test
+ public void testShort1() throws SQLException {
+ Object[] o1 = new Object[]{(short) 1, (short) 2};
+ Object[] o2 = new Object[]{(short) 5, (short) 6, (short) 7};
+ Object[] e = new Object[]{(short) 1, (short) 2, (short) 5, (short) 6, (short) 7};
+ PDataType type = PSmallintArray.INSTANCE;
+ PDataType base = PSmallint.INSTANCE;
+
+ PhoenixArray arr1 = new PhoenixArray.PrimitiveShortPhoenixArray(base, o1);
+ PhoenixArray arr2 = new PhoenixArray.PrimitiveShortPhoenixArray(base, o2);
+ PhoenixArray expected = new PhoenixArray.PrimitiveShortPhoenixArray(base, e);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.DESC);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.DESC);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.ASC);
+ }
+
+ @Test
+ public void testBoolean1() throws SQLException {
+ Object[] o1 = new Object[]{true, true};
+ Object[] o2 = new Object[]{false, false, false};
+ Object[] e = new Object[]{true, true, false, false, false};
+ PDataType type = PBooleanArray.INSTANCE;
+ PDataType base = PBoolean.INSTANCE;
+
+ PhoenixArray arr1 = new PhoenixArray.PrimitiveBooleanPhoenixArray(base, o1);
+ PhoenixArray arr2 = new PhoenixArray.PrimitiveBooleanPhoenixArray(base, o2);
+ PhoenixArray expected = new PhoenixArray.PrimitiveBooleanPhoenixArray(base, e);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC);
+ }
+
+ @Test
+ public void testTinyInt1() throws SQLException {
+ Object[] o1 = new Object[]{(byte) 2, (byte) 2};
+ Object[] o2 = new Object[]{(byte) 5, (byte) 6, (byte) 7};
+ Object[] e = new Object[]{(byte) 2, (byte) 2, (byte) 5, (byte) 6, (byte) 7};
+ PDataType type = PTinyintArray.INSTANCE;
+ PDataType base = PTinyint.INSTANCE;
+
+ PhoenixArray arr1 = new PhoenixArray.PrimitiveBytePhoenixArray(base, o1);
+ PhoenixArray arr2 = new PhoenixArray.PrimitiveBytePhoenixArray(base, o2);
+ PhoenixArray expected = new PhoenixArray.PrimitiveBytePhoenixArray(base, e);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.DESC);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.DESC);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.ASC);
+ }
+
+ @Test
+ public void testDate1() throws SQLException {
+ Object[] o1 = new Object[]{new Date(0l), new Date(0l)};
+ Object[] o2 = new Object[]{new Date(0l), new Date(0l), new Date(0l)};
+ Object[] e = new Object[]{new Date(0l), new Date(0l), new Date(0l), new Date(0l), new Date(0l)};
+ PDataType type = PDateArray.INSTANCE;
+ PDataType base = PDate.INSTANCE;
+
+ PhoenixArray arr1 = new PhoenixArray(base, o1);
+ PhoenixArray arr2 = new PhoenixArray(base, o2);
+ PhoenixArray expected = new PhoenixArray(base, e);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.DESC);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.DESC);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.ASC);
+ }
+
+ @Test
+ public void testDecimal1() throws SQLException {
+ Object[] o1 = new Object[]{BigDecimal.valueOf(32.4), BigDecimal.valueOf(34)};
+ Object[] o2 = new Object[]{BigDecimal.valueOf(32.4), BigDecimal.valueOf(34)};
+ Object[] e = new Object[]{BigDecimal.valueOf(32.4), BigDecimal.valueOf(34), BigDecimal.valueOf(32.4), BigDecimal.valueOf(34)};
+ PDataType type = PDecimalArray.INSTANCE;
+ PDataType base = PDecimal.INSTANCE;
+
+ PhoenixArray arr1 = new PhoenixArray(base, o1);
+ PhoenixArray arr2 = new PhoenixArray(base, o2);
+ PhoenixArray expected = new PhoenixArray(base, e);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.DESC);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.DESC);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.ASC);
+ }
+
+ @Test
+ public void testVarchar1() throws SQLException {
+ Object[] o1 = new Object[]{"a", "b"};
+ Object[] o2 = new Object[]{"c", "d"};
+ Object[] e = new Object[]{"a", "b", "c", "d"};
+ PDataType type = PVarcharArray.INSTANCE;
+ PDataType base = PVarchar.INSTANCE;
+
+ PhoenixArray arr1 = new PhoenixArray(base, o1);
+ PhoenixArray arr2 = new PhoenixArray(base, o2);
+ PhoenixArray expected = new PhoenixArray(base, e);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.DESC);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.DESC);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.ASC);
+ }
+
+ @Test
+ public void testVarchar2() throws SQLException {
+ Object[] o1 = new Object[]{"a"};
+ Object[] o2 = new Object[]{"c", "d"};
+ Object[] e = new Object[]{"a", "c", "d"};
+ PDataType type = PVarcharArray.INSTANCE;
+ PDataType base = PVarchar.INSTANCE;
+
+ PhoenixArray arr1 = new PhoenixArray(base, o1);
+ PhoenixArray arr2 = new PhoenixArray(base, o2);
+ PhoenixArray expected = new PhoenixArray(base, e);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.DESC);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.DESC);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.ASC);
+ }
+
+ @Test
+ public void testVarchar3() throws SQLException {
+ Object[] o1 = new Object[]{"a", "b"};
+ Object[] o2 = new Object[]{"c"};
+ Object[] e = new Object[]{"a", "b", "c"};
+ PDataType type = PVarcharArray.INSTANCE;
+ PDataType base = PVarchar.INSTANCE;
+
+ PhoenixArray arr1 = new PhoenixArray(base, o1);
+ PhoenixArray arr2 = new PhoenixArray(base, o2);
+ PhoenixArray expected = new PhoenixArray(base, e);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.DESC);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.DESC);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.ASC);
+ }
+
+ @Test
+ public void testVarchar4() throws SQLException {
+ Object[] o1 = new Object[]{"a"};
+ Object[] o2 = new Object[]{null, "c"};
+ Object[] e = new Object[]{"a", null, "c"};
+ PDataType type = PVarcharArray.INSTANCE;
+ PDataType base = PVarchar.INSTANCE;
+
+ PhoenixArray arr1 = new PhoenixArray(base, o1);
+ PhoenixArray arr2 = new PhoenixArray(base, o2);
+ PhoenixArray expected = new PhoenixArray(base, e);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.DESC);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.DESC);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.ASC);
+ }
+
+ @Test
+ public void testVarchar5() throws SQLException {
+ Object[] o1 = new Object[]{"a", null , null};
+ Object[] o2 = new Object[]{null, null, "c"};
+ Object[] e = new Object[]{"a", null, null, null, null, "c"};
+ PDataType type = PVarcharArray.INSTANCE;
+ PDataType base = PVarchar.INSTANCE;
+
+ PhoenixArray arr1 = new PhoenixArray(base, o1);
+ PhoenixArray arr2 = new PhoenixArray(base, o2);
+ PhoenixArray expected = new PhoenixArray(base, e);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.DESC);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.DESC);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.ASC);
+ }
+
+ @Test
+ public void testVarchar6() throws SQLException {
+ Object[] o1 = new Object[]{"a", "b"};
+ Object[] e = new Object[]{"a", "b"};
+ PDataType type = PVarcharArray.INSTANCE;
+ PDataType base = PVarchar.INSTANCE;
+
+ PhoenixArray arr1 = new PhoenixArray(base, o1);
+ PhoenixArray arr2 = null;
+ PhoenixArray expected = new PhoenixArray(base, e);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.DESC);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.DESC);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.ASC);
+ }
+
+ @Test
+ public void testVarchar7() throws SQLException {
+ Object[] o2 = new Object[]{"a", "b"};
+ PDataType type = PVarcharArray.INSTANCE;
+ PDataType base = PVarchar.INSTANCE;
+
+ PhoenixArray arr1 = null;
+ PhoenixArray arr2 = new PhoenixArray(base, o2);
+ PhoenixArray expected = null;
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.DESC);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.DESC);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.ASC);
+ }
+
+ @Test
+ public void testVarchar8() throws SQLException {
+ Object[] o1 = new Object[]{"a", null, null, "b"};
+ Object[] o2 = new Object[]{"c", null, "d", null, "e"};
+ Object[] e = new Object[]{"a", null, null, "b", "c", null, "d", null, "e"};
+ PDataType type = PVarcharArray.INSTANCE;
+ PDataType base = PVarchar.INSTANCE;
+
+ PhoenixArray arr1 = new PhoenixArray(base, o1);
+ PhoenixArray arr2 = new PhoenixArray(base, o2);
+ PhoenixArray expected = new PhoenixArray(base, e);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.DESC);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.DESC);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.ASC);
+ }
+
+ @Test(expected = TypeMismatchException.class)
+ public void testVarchar9() throws SQLException {
+ Object[] o1 = new Object[]{"a", "b"};
+ Object[] o2 = new Object[]{1, 2};
+
+ PhoenixArray arr1 = new PhoenixArray(PVarchar.INSTANCE, o1);
+ PhoenixArray arr2 = new PhoenixArray.PrimitiveIntPhoenixArray(PInteger.INSTANCE, o2);
+ test(arr1, arr2, PVarcharArray.INSTANCE, null, null, PIntegerArray.INSTANCE, null, null, null, SortOrder.ASC, SortOrder.ASC);
+ test(arr1, arr2, PVarcharArray.INSTANCE, null, null, PIntegerArray.INSTANCE, null, null, null, SortOrder.ASC, SortOrder.DESC);
+ test(arr1, arr2, PVarcharArray.INSTANCE, null, null, PIntegerArray.INSTANCE, null, null, null, SortOrder.DESC, SortOrder.DESC);
+ test(arr1, arr2, PVarcharArray.INSTANCE, null, null, PIntegerArray.INSTANCE, null, null, null, SortOrder.DESC, SortOrder.ASC);
+ }
+
+ @Test
+ public void testWithIntOffsetArray() throws SQLException {
+ Object[] o1 = new Object[Short.MAX_VALUE + 7];
+ Object[] o2 = new Object[]{"b", "b"};
+ Object[] e = new Object[Short.MAX_VALUE + 9];
+ for (int i = 0; i < o1.length; i++) {
+ o1[i] = "a";
+ e[i] = "a";
+ }
+ e[Short.MAX_VALUE + 7] = "b";
+ e[Short.MAX_VALUE + 8] = "b";
+ PDataType type = PVarcharArray.INSTANCE;
+ PDataType base = PVarchar.INSTANCE;
+
+ PhoenixArray arr1 = new PhoenixArray(base, o1);
+ PhoenixArray arr2 = new PhoenixArray(base, o2);
+ PhoenixArray expected = new PhoenixArray(base, e);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.DESC);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.DESC);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.ASC);
+ }
+
+ @Test
+ public void testWithShortToIntOffsetArray() throws SQLException {
+ Object[] o1 = new Object[Short.MAX_VALUE + 1];
+ Object[] o2 = new Object[]{"b", "b"};
+ Object[] e = new Object[Short.MAX_VALUE + 3];
+ for (int i = 0; i < o1.length; i++) {
+ o1[i] = "a";
+ e[i] = "a";
+ }
+ e[Short.MAX_VALUE + 2] = "b";
+ e[Short.MAX_VALUE + 1] = "b";
+ PDataType type = PVarcharArray.INSTANCE;
+ PDataType base = PVarchar.INSTANCE;
+
+ PhoenixArray arr1 = new PhoenixArray(base, o1);
+ PhoenixArray arr2 = new PhoenixArray(base, o2);
+ PhoenixArray expected = new PhoenixArray(base, e);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.DESC);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.DESC);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.ASC);
+ }
+
+ @Test
+ public void testWithShortToIntOffsetArray2() throws SQLException {
+ Object[] o1 = new Object[Short.MAX_VALUE + 1];
+ Object[] o2 = new Object[]{null, "b"};
+ Object[] e = new Object[Short.MAX_VALUE + 3];
+ for (int i = 0; i < o1.length; i++) {
+ o1[i] = "a";
+ e[i] = "a";
+ }
+ e[Short.MAX_VALUE + 1] = null;
+ e[Short.MAX_VALUE + 2] = "b";
+ PDataType type = PVarcharArray.INSTANCE;
+ PDataType base = PVarchar.INSTANCE;
+
+ PhoenixArray arr1 = new PhoenixArray(base, o1);
+ PhoenixArray arr2 = new PhoenixArray(base, o2);
+ PhoenixArray expected = new PhoenixArray(base, e);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.DESC);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.DESC);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.DESC, SortOrder.ASC);
+ }
+
+ @Test
+ public void testWith10NullsAnd246Nulls()throws SQLException{
+ Object[] o1 = new Object[11];
+ Object[] o2 = new Object[247];
+ Object[] e = new Object[258];
+ o1[0] = "a";
+ o2[o2.length - 1] = "a";
+ e[e.length - 1] = "a";
+ e[0] = "a";
+
+ PDataType type = PVarcharArray.INSTANCE;
+ PDataType base = PVarchar.INSTANCE;
+
+ PhoenixArray arr1 = new PhoenixArray(base, o1);
+ PhoenixArray arr2 = new PhoenixArray(base, o2);
+ PhoenixArray expected = new PhoenixArray(base, e);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC);
+ }
+
+ @Test
+ public void testWith0NullsAnd256Nulls()throws SQLException{
+ Object[] o1 = new Object[1];
+ Object[] o2 = new Object[257];
+ Object[] e = new Object[258];
+ o1[0] = "a";
+ o2[o2.length - 1] = "a";
+ e[e.length - 1] = "a";
+ e[0] = "a";
+
+ PDataType type = PVarcharArray.INSTANCE;
+ PDataType base = PVarchar.INSTANCE;
+
+ PhoenixArray arr1 = new PhoenixArray(base, o1);
+ PhoenixArray arr2 = new PhoenixArray(base, o2);
+ PhoenixArray expected = new PhoenixArray(base, e);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC);
+ }
+
+ @Test
+ public void testWith256NullsAnd0Nulls()throws SQLException{
+ Object[] o1 = new Object[257];
+ Object[] o2 = new Object[1];
+ Object[] e = new Object[258];
+ o1[0] = "a";
+ o2[o2.length - 1] = "a";
+ e[e.length - 1] = "a";
+ e[0] = "a";
+
+ PDataType type = PVarcharArray.INSTANCE;
+ PDataType base = PVarchar.INSTANCE;
+
+ PhoenixArray arr1 = new PhoenixArray(base, o1);
+ PhoenixArray arr2 = new PhoenixArray(base, o2);
+ PhoenixArray expected = new PhoenixArray(base, e);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC);
+ }
+
+ @Test
+ public void testWith255NullsAnd0Nulls()throws SQLException{
+ Object[] o1 = new Object[256];
+ Object[] o2 = new Object[1];
+ Object[] e = new Object[257];
+ o1[0] = "a";
+ o2[o2.length - 1] = "a";
+ e[e.length - 1] = "a";
+ e[0] = "a";
+
+ PDataType type = PVarcharArray.INSTANCE;
+ PDataType base = PVarchar.INSTANCE;
+
+ PhoenixArray arr1 = new PhoenixArray(base, o1);
+ PhoenixArray arr2 = new PhoenixArray(base, o2);
+ PhoenixArray expected = new PhoenixArray(base, e);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC);
+ }
+
+ @Test
+ public void testWith0NullsAnd255Nulls()throws SQLException{
+ Object[] o1 = new Object[1];
+ Object[] o2 = new Object[256];
+ Object[] e = new Object[257];
+ o1[0] = "a";
+ o2[o2.length - 1] = "a";
+ e[e.length - 1] = "a";
+ e[0] = "a";
+
+ PDataType type = PVarcharArray.INSTANCE;
+ PDataType base = PVarchar.INSTANCE;
+
+ PhoenixArray arr1 = new PhoenixArray(base, o1);
+ PhoenixArray arr2 = new PhoenixArray(base, o2);
+ PhoenixArray expected = new PhoenixArray(base, e);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC);
+ }
+
+ @Test
+ public void testWith10NullsAnd245Nulls()throws SQLException{
+ Object[] o1 = new Object[11];
+ Object[] o2 = new Object[246];
+ Object[] e = new Object[257];
+ o1[0] = "a";
+ o2[o2.length - 1] = "a";
+ e[e.length - 1] = "a";
+ e[0] = "a";
+
+ PDataType type = PVarcharArray.INSTANCE;
+ PDataType base = PVarchar.INSTANCE;
+
+ PhoenixArray arr1 = new PhoenixArray(base, o1);
+ PhoenixArray arr2 = new PhoenixArray(base, o2);
+ PhoenixArray expected = new PhoenixArray(base, e);
+ test(arr1, arr2, type, null, null, type, null, null, expected, SortOrder.ASC, SortOrder.ASC);
+ }
+}
[17/47] phoenix git commit: PHOENIX-1920 - Pherf - Add support for
mixed r/w workloads
Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7175dcbc/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/QueryVerifier.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/QueryVerifier.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/QueryVerifier.java
index 78f18ca..c9333a0 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/QueryVerifier.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/QueryVerifier.java
@@ -43,153 +43,160 @@ import difflib.DiffUtils;
import difflib.Patch;
public class QueryVerifier {
- private PhoenixUtil pUtil = new PhoenixUtil();
- private static final Logger logger = LoggerFactory
- .getLogger(QueryVerifier.class);
- private boolean useTemporaryOutput;
- private String directoryLocation;
-
- public QueryVerifier(boolean useTemporaryOutput) {
- this.useTemporaryOutput = useTemporaryOutput;
- this.directoryLocation = this.useTemporaryOutput ?
- PherfConstants.EXPORT_TMP : PherfConstants.EXPORT_DIR;
-
- ensureBaseDirExists();
- }
-
- /***
- * Export query resultSet to CSV file
- * @param query
- * @throws Exception
- */
- public String exportCSV(Query query) throws Exception {
- Connection conn = null;
- PreparedStatement statement = null;
- ResultSet rs = null;
- String fileName = getFileName(query);
- FileOutputStream fos = new FileOutputStream(fileName);
- try {
- conn = pUtil.getConnection(query.getTenantId());
- statement = conn.prepareStatement(query.getStatement());
- boolean isQuery = statement.execute();
- if (isQuery) {
- rs = statement.executeQuery();
- int columnCount = rs.getMetaData().getColumnCount();
- while (rs.next()) {
- for (int columnNum = 1; columnNum <= columnCount; columnNum++) {
- fos.write((rs.getString(columnNum) + PherfConstants.RESULT_FILE_DELIMETER).getBytes());
- }
- fos.write(PherfConstants.NEW_LINE.getBytes());
- }
- } else {
- conn.commit();
- }
- } catch (Exception e) {
- e.printStackTrace();
- } finally {
- if (rs != null) rs.close();
- if (statement != null) statement.close();
- if (conn != null) conn.close();
- fos.flush();
- fos.close();
- }
- return fileName;
- }
-
- /***
- * Do a diff between exported query results and temporary CSV file
- * @param query
- * @param newCSV
- * @return
- */
- public boolean doDiff(Query query, String newCSV) {
+ private PhoenixUtil pUtil = PhoenixUtil.create();
+ private static final Logger logger = LoggerFactory.getLogger(QueryVerifier.class);
+ private boolean useTemporaryOutput;
+ private String directoryLocation;
+
+ public QueryVerifier(boolean useTemporaryOutput) {
+ this.useTemporaryOutput = useTemporaryOutput;
+ this.directoryLocation =
+ this.useTemporaryOutput ? PherfConstants.EXPORT_TMP : PherfConstants.EXPORT_DIR;
+
+ ensureBaseDirExists();
+ }
+
+ /**
+ * Export query resultSet to CSV file
+ *
+ * @param query
+ * @throws Exception
+ */
+ public String exportCSV(Query query) throws Exception {
+ Connection conn = null;
+ PreparedStatement statement = null;
+ ResultSet rs = null;
+ String fileName = getFileName(query);
+ FileOutputStream fos = new FileOutputStream(fileName);
+ try {
+ conn = pUtil.getConnection(query.getTenantId());
+ statement = conn.prepareStatement(query.getStatement());
+ boolean isQuery = statement.execute();
+ if (isQuery) {
+ rs = statement.executeQuery();
+ int columnCount = rs.getMetaData().getColumnCount();
+ while (rs.next()) {
+ for (int columnNum = 1; columnNum <= columnCount; columnNum++) {
+ fos.write((rs.getString(columnNum) + PherfConstants.RESULT_FILE_DELIMETER)
+ .getBytes());
+ }
+ fos.write(PherfConstants.NEW_LINE.getBytes());
+ }
+ } else {
+ conn.commit();
+ }
+ } catch (Exception e) {
+ e.printStackTrace();
+ } finally {
+ if (rs != null) rs.close();
+ if (statement != null) statement.close();
+ if (conn != null) conn.close();
+ fos.flush();
+ fos.close();
+ }
+ return fileName;
+ }
+
+ /**
+ * Do a diff between exported query results and temporary CSV file
+ *
+ * @param query
+ * @param newCSV
+ * @return
+ */
+ public boolean doDiff(Query query, String newCSV) {
List<String> original = fileToLines(getCSVName(query, PherfConstants.EXPORT_DIR, ""));
- List<String> newLines = fileToLines(newCSV);
-
+ List<String> newLines = fileToLines(newCSV);
+
Patch patch = DiffUtils.diff(original, newLines);
if (patch.getDeltas().isEmpty()) {
- logger.info("Match: " + query.getId() + " with " + newCSV);
- return true;
+ logger.info("Match: " + query.getId() + " with " + newCSV);
+ return true;
} else {
- logger.error("DIFF FAILED: " + query.getId() + " with " + newCSV);
- return false;
+ logger.error("DIFF FAILED: " + query.getId() + " with " + newCSV);
+ return false;
}
- }
-
- /***
- * Helper method to load file
- * @param filename
- * @return
- */
+ }
+
+ /**
+ * Helper method to load file
+ *
+ * @param filename
+ * @return
+ */
private static List<String> fileToLines(String filename) {
- List<String> lines = new LinkedList<String>();
- String line = "";
- try {
- BufferedReader in = new BufferedReader(new FileReader(filename));
- while ((line = in.readLine()) != null) {
- lines.add(line);
- }
- in.close();
- } catch (IOException e) {
- e.printStackTrace();
+ List<String> lines = new LinkedList<String>();
+ String line = "";
+ try {
+ BufferedReader in = new BufferedReader(new FileReader(filename));
+ while ((line = in.readLine()) != null) {
+ lines.add(line);
}
-
- return lines;
+ in.close();
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+
+ return lines;
}
/**
* Get explain plan for a query
+ *
* @param query
* @return
* @throws SQLException
*/
- public String getExplainPlan(Query query) throws SQLException {
- Connection conn = null;
- ResultSet rs = null;
- PreparedStatement statement = null;
- StringBuilder buf = new StringBuilder();
- try {
- conn = pUtil.getConnection(query.getTenantId());
- statement = conn.prepareStatement("EXPLAIN " + query.getStatement());
- rs = statement.executeQuery();
- while (rs.next()) {
- buf.append(rs.getString(1).trim().replace(",", "-"));
- }
- statement.close();
- } catch (Exception e) {
- e.printStackTrace();
- } finally {
- if (rs != null) rs.close();
- if (statement != null) statement.close();
- if (conn != null) conn.close();
- }
- return buf.toString();
- }
-
- /***
+ public String getExplainPlan(Query query) throws SQLException {
+ Connection conn = null;
+ ResultSet rs = null;
+ PreparedStatement statement = null;
+ StringBuilder buf = new StringBuilder();
+ try {
+ conn = pUtil.getConnection(query.getTenantId());
+ statement = conn.prepareStatement("EXPLAIN " + query.getStatement());
+ rs = statement.executeQuery();
+ while (rs.next()) {
+ buf.append(rs.getString(1).trim().replace(",", "-"));
+ }
+ statement.close();
+ } catch (Exception e) {
+ e.printStackTrace();
+ } finally {
+ if (rs != null) rs.close();
+ if (statement != null) statement.close();
+ if (conn != null) conn.close();
+ }
+ return buf.toString();
+ }
+
+ /**
* Helper method to generate CSV file name
+ *
* @param query
* @return
* @throws FileNotFoundException
*/
- private String getFileName(Query query) throws FileNotFoundException {
- String tempExt = "";
- if (this.useTemporaryOutput) {
- tempExt = "_" + java.util.UUID.randomUUID().toString();
- }
- return getCSVName(query, this.directoryLocation, tempExt);
- }
-
- private String getCSVName(Query query, String directory, String tempExt) {
- String csvFile = directory + PherfConstants.PATH_SEPARATOR
- + query.getId() + tempExt + Extension.CSV.toString();
- return csvFile;
- }
-
- private void ensureBaseDirExists() {
- File baseDir = new File(this.directoryLocation);
- if (!baseDir.exists()) {
- baseDir.mkdir();
- }
- }
+ private String getFileName(Query query) throws FileNotFoundException {
+ String tempExt = "";
+ if (this.useTemporaryOutput) {
+ tempExt = "_" + java.util.UUID.randomUUID().toString();
+ }
+ return getCSVName(query, this.directoryLocation, tempExt);
+ }
+
+ private String getCSVName(Query query, String directory, String tempExt) {
+ String
+ csvFile =
+ directory + PherfConstants.PATH_SEPARATOR + query.getId() + tempExt + Extension.CSV
+ .toString();
+ return csvFile;
+ }
+
+ private void ensureBaseDirExists() {
+ File baseDir = new File(this.directoryLocation);
+ if (!baseDir.exists()) {
+ baseDir.mkdir();
+ }
+ }
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7175dcbc/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/Workload.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/Workload.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/Workload.java
new file mode 100644
index 0000000..16a493e
--- /dev/null
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/Workload.java
@@ -0,0 +1,10 @@
+package org.apache.phoenix.pherf.workload;
+
+public interface Workload {
+ public Runnable execute() throws Exception;
+
+ /**
+ * Use this method to perform any cleanup or forced shutdown of the thread.
+ */
+ public void complete();
+}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7175dcbc/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/WorkloadExecutor.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/WorkloadExecutor.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/WorkloadExecutor.java
index cf2f038..a65b4aa 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/WorkloadExecutor.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/WorkloadExecutor.java
@@ -19,95 +19,96 @@
package org.apache.phoenix.pherf.workload;
import org.apache.phoenix.pherf.PherfConstants;
-import org.apache.phoenix.pherf.PherfConstants.RunMode;
-import org.apache.phoenix.pherf.configuration.XMLConfigParser;
-import org.apache.phoenix.pherf.jmx.MonitorManager;
-import org.apache.phoenix.pherf.loaddata.DataLoader;
-
-import org.apache.phoenix.pherf.util.ResourceList;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
import java.util.Properties;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
+import java.util.concurrent.*;
public class WorkloadExecutor {
private static final Logger logger = LoggerFactory.getLogger(WorkloadExecutor.class);
- private final XMLConfigParser parser;
- private MonitorManager monitor;
- private Future monitorThread;
private final int poolSize;
- private final ExecutorService pool;
+ // Jobs can be accessed by multiple threads
+ private final Map<Workload, Future> jobs = new ConcurrentHashMap<>();
+ private final ExecutorService pool;
public WorkloadExecutor() throws Exception {
this(PherfConstants.create().getProperties(PherfConstants.PHERF_PROPERTIES));
}
- public WorkloadExecutor(Properties properties) throws Exception{
- this(properties,PherfConstants.DEFAULT_FILE_PATTERN);
+ public WorkloadExecutor(Properties properties) throws Exception {
+ this(properties, new ArrayList());
}
- public WorkloadExecutor(Properties properties, String filePattern) throws Exception {
- this(properties,
- new XMLConfigParser(filePattern),
- true);
+ public WorkloadExecutor(Properties properties, List<Workload> workloads) throws Exception {
+ this.poolSize =
+ (properties.getProperty("pherf.default.threadpool") == null) ?
+ PherfConstants.DEFAULT_THREAD_POOL_SIZE :
+ Integer.parseInt(properties.getProperty("pherf.default.threadpool"));
+
+ this.pool = Executors.newFixedThreadPool(this.poolSize);
+ init(workloads);
}
- public WorkloadExecutor(Properties properties, XMLConfigParser parser, boolean monitor) throws Exception {
- this.parser = parser;
- this.poolSize = (properties.getProperty("pherf.default.threadpool") == null)
- ? PherfConstants.DEFAULT_THREAD_POOL_SIZE
- : Integer.parseInt(properties.getProperty("pherf.default.threadpool"));
+ public void add(Workload workload) throws Exception {
+ this.jobs.put(workload, pool.submit(workload.execute()));
+ }
- this.pool = Executors.newFixedThreadPool(this.poolSize);
- if (monitor) {
- initMonitor(Integer.parseInt(properties.getProperty("pherf.default.monitorFrequency")));
+ /**
+ * Blocks on waiting for all workloads to finish. If a
+ * {@link org.apache.phoenix.pherf.workload.Workload} Requires complete() to be called, it must
+ * be called prior to using this method. Otherwise it will block infinitely.
+ */
+ public void get() {
+ for (Workload workload : jobs.keySet()) {
+ get(workload);
}
}
/**
- * Executes all scenarios dataload
+ * Calls the {@link java.util.concurrent.Future#get()} method pertaining to this workflow.
+ * Once the Future competes, the workflow is removed from the list.
*
- * @throws Exception
+ * @param workload Key entry in the HashMap
*/
- public void executeDataLoad() throws Exception {
- logger.info("\n\nStarting Data Loader...");
- DataLoader dataLoader = new DataLoader(parser);
- dataLoader.execute();
+ public void get(Workload workload) {
+ try {
+ Future future = jobs.get(workload);
+ future.get();
+ jobs.remove(workload);
+ } catch (InterruptedException | ExecutionException e) {
+ logger.error("", e);
+ }
}
/**
- * Executes all scenario multi-threaded query sets
- *
- * @param queryHint
- * @throws Exception
+ * Complete all workloads in the list.
+ * Entries in the job Map will persist until {#link WorkloadExecutorNew#get()} is called
*/
- public void executeMultithreadedQueryExecutor(String queryHint, boolean export, RunMode runMode) throws Exception {
- logger.info("\n\nStarting Query Executor...");
- QueryExecutor queryExecutor = new QueryExecutor(parser);
- queryExecutor.execute(queryHint, export, runMode);
+ public void complete() {
+ for (Workload workload : jobs.keySet()) {
+ workload.complete();
+ }
}
- public void shutdown() throws Exception {
- if (null != monitor && monitor.isRunning()) {
- this.monitor.stop();
- this.monitorThread.get(60, TimeUnit.SECONDS);
- this.pool.shutdown();
- }
+ public void shutdown() {
+ // Make sure any Workloads still on pool have been properly shutdown
+ complete();
+ pool.shutdownNow();
}
- // Just used for testing
- public XMLConfigParser getParser() {
- return parser;
+ public ExecutorService getPool() {
+ return pool;
}
- private void initMonitor(int monitorFrequency) throws Exception {
- this.monitor = new MonitorManager(monitorFrequency);
- monitorThread = pool.submit(this.monitor);
+ private void init(List<Workload> workloads) throws Exception {
+ for (Workload workload : workloads) {
+ this.jobs.put(workload, pool.submit(workload.execute()));
+ }
}
}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7175dcbc/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/WriteWorkload.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/WriteWorkload.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/WriteWorkload.java
new file mode 100644
index 0000000..305521b
--- /dev/null
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/WriteWorkload.java
@@ -0,0 +1,403 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.phoenix.pherf.workload;
+
+import org.apache.phoenix.pherf.PherfConstants;
+import org.apache.phoenix.pherf.configuration.Column;
+import org.apache.phoenix.pherf.configuration.Scenario;
+import org.apache.phoenix.pherf.configuration.WriteParams;
+import org.apache.phoenix.pherf.configuration.XMLConfigParser;
+import org.apache.phoenix.pherf.exception.PherfException;
+import org.apache.phoenix.pherf.result.DataLoadThreadTime;
+import org.apache.phoenix.pherf.result.DataLoadTimeSummary;
+import org.apache.phoenix.pherf.result.ResultUtil;
+import org.apache.phoenix.pherf.rules.DataValue;
+import org.apache.phoenix.pherf.rules.RulesApplier;
+import org.apache.phoenix.pherf.util.PhoenixUtil;
+import org.apache.phoenix.pherf.util.RowCalculator;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.math.BigDecimal;
+import java.sql.*;
+import java.text.SimpleDateFormat;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Properties;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+
+public class WriteWorkload implements Workload {
+ private static final Logger logger = LoggerFactory.getLogger(WriteWorkload.class);
+ private final PhoenixUtil pUtil;
+ private final XMLConfigParser parser;
+ private final RulesApplier rulesApplier;
+ private final ResultUtil resultUtil;
+ private final ExecutorService pool;
+ private final WriteParams writeParams;
+ private final Scenario scenario;
+ private final long threadSleepDuration;
+
+ private final int threadPoolSize;
+ private final int batchSize;
+
+ public WriteWorkload(XMLConfigParser parser) throws Exception {
+ this(PhoenixUtil.create(), parser);
+ }
+
+ public WriteWorkload(PhoenixUtil util, XMLConfigParser parser) throws Exception {
+ this(util, parser, null);
+ }
+
+ public WriteWorkload(PhoenixUtil phoenixUtil, XMLConfigParser parser, Scenario scenario)
+ throws Exception {
+ this(phoenixUtil, PherfConstants.create().getProperties(PherfConstants.PHERF_PROPERTIES),
+ parser, scenario);
+ }
+
+ /**
+ * Default the writers to use up all available cores for threads. If writeParams are used in
+ * the config files, they will override the defaults. writeParams are used for read/write mixed
+ * workloads.
+ * TODO extract notion of the scenario list and have 1 write workload per scenario
+ *
+ * @param phoenixUtil {@link org.apache.phoenix.pherf.util.PhoenixUtil} Query helper
+ * @param properties {@link java.util.Properties} default properties to use
+ * @param parser {@link org.apache.phoenix.pherf.configuration.XMLConfigParser}
+ * @param scenario {@link org.apache.phoenix.pherf.configuration.Scenario} If null is passed
+ * it will run against all scenarios in the parsers list.
+ * @throws Exception
+ */
+ public WriteWorkload(PhoenixUtil phoenixUtil, Properties properties, XMLConfigParser parser,
+ Scenario scenario) throws Exception {
+ this.pUtil = phoenixUtil;
+ this.parser = parser;
+ this.rulesApplier = new RulesApplier(parser);
+ this.resultUtil = new ResultUtil();
+
+ // Overwrite defaults properties with those given in the configuration. This indicates the
+ // scenario is a R/W mixed workload.
+ if (scenario != null) {
+ this.scenario = scenario;
+ writeParams = scenario.getWriteParams();
+ threadSleepDuration = writeParams.getThreadSleepDuration();
+ } else {
+ writeParams = null;
+ this.scenario = null;
+ threadSleepDuration = 0;
+ }
+
+ int size = Integer.parseInt(properties.getProperty("pherf.default.dataloader.threadpool"));
+
+ this.threadPoolSize = (size == 0) ? Runtime.getRuntime().availableProcessors() : size;
+
+ // TODO Move pool management up to WorkloadExecutor
+ this.pool = Executors.newFixedThreadPool(this.threadPoolSize);
+
+ String
+ bSize =
+ (writeParams == null) || (writeParams.getBatchSize() == Long.MIN_VALUE) ?
+ properties.getProperty("pherf.default.dataloader.batchsize") :
+ String.valueOf(writeParams.getBatchSize());
+ this.batchSize =
+ (bSize == null) ? PherfConstants.DEFAULT_BATCH_SIZE : Integer.parseInt(bSize);
+ }
+
+ @Override public void complete() {
+ }
+
+ public Runnable execute() throws Exception {
+ return new Runnable() {
+ @Override public void run() {
+ try {
+ DataLoadTimeSummary dataLoadTimeSummary = new DataLoadTimeSummary();
+ DataLoadThreadTime dataLoadThreadTime = new DataLoadThreadTime();
+
+ if (WriteWorkload.this.scenario == null) {
+ for (Scenario scenario : getParser().getScenarios()) {
+ exec(dataLoadTimeSummary, dataLoadThreadTime, scenario);
+ }
+ } else {
+ exec(dataLoadTimeSummary, dataLoadThreadTime, WriteWorkload.this.scenario);
+ }
+ resultUtil.write(dataLoadTimeSummary);
+ resultUtil.write(dataLoadThreadTime);
+
+ } catch (Exception e) {
+ logger.warn("", e);
+ }
+ }
+ };
+ }
+
+ private synchronized void exec(DataLoadTimeSummary dataLoadTimeSummary,
+ DataLoadThreadTime dataLoadThreadTime, Scenario scenario) throws Exception {
+ logger.info("\nLoading " + scenario.getRowCount() + " rows for " + scenario.getTableName());
+ long start = System.currentTimeMillis();
+
+ List<Future> writeBatches = getBatches(dataLoadThreadTime, scenario);
+
+ waitForBatches(dataLoadTimeSummary, scenario, start, writeBatches);
+
+ // always update stats for Phoenix base tables
+ updatePhoenixStats(scenario.getTableName());
+ }
+
+ private List<Future> getBatches(DataLoadThreadTime dataLoadThreadTime, Scenario scenario)
+ throws Exception {
+ RowCalculator
+ rowCalculator =
+ new RowCalculator(getThreadPoolSize(), scenario.getRowCount());
+ List<Future> writeBatches = new ArrayList<>();
+
+ for (int i = 0; i < getThreadPoolSize(); i++) {
+ List<Column>
+ phxMetaCols =
+ pUtil.getColumnsFromPhoenix(scenario.getSchemaName(),
+ scenario.getTableNameWithoutSchemaName(), pUtil.getConnection());
+ int threadRowCount = rowCalculator.getNext();
+ logger.info(
+ "Kick off thread (#" + i + ")for upsert with (" + threadRowCount + ") rows.");
+ Future<Info>
+ write =
+ upsertData(scenario, phxMetaCols, scenario.getTableName(), threadRowCount,
+ dataLoadThreadTime);
+ writeBatches.add(write);
+ }
+ if (writeBatches.isEmpty()) {
+ throw new PherfException(
+ "Holy shit snacks! Throwing up hands in disbelief and exiting. Could not write data for some unknown reason.");
+ }
+
+ return writeBatches;
+ }
+
+ private void waitForBatches(DataLoadTimeSummary dataLoadTimeSummary, Scenario scenario,
+ long start, List<Future> writeBatches)
+ throws InterruptedException, java.util.concurrent.ExecutionException {
+ int sumRows = 0, sumDuration = 0;
+ // Wait for all the batch threads to complete
+ for (Future<Info> write : writeBatches) {
+ Info writeInfo = write.get();
+ sumRows += writeInfo.getRowCount();
+ sumDuration += writeInfo.getDuration();
+ logger.info("Executor (" + this.hashCode() + ") writes complete with row count ("
+ + writeInfo.getRowCount() + ") in Ms (" + writeInfo.getDuration() + ")");
+ }
+ logger.info("Writes completed with total row count (" + sumRows + ") with total time of("
+ + sumDuration + ") Ms");
+ dataLoadTimeSummary
+ .add(scenario.getTableName(), sumRows, (int) (System.currentTimeMillis() - start));
+ }
+
+ /**
+ * TODO Move this method to PhoenixUtil
+ * Update Phoenix table stats
+ *
+ * @param tableName
+ * @throws Exception
+ */
+ public void updatePhoenixStats(String tableName) throws Exception {
+ logger.info("Updating stats for " + tableName);
+ pUtil.executeStatement("UPDATE STATISTICS " + tableName);
+ }
+
+ public Future<Info> upsertData(final Scenario scenario, final List<Column> columns,
+ final String tableName, final int rowCount,
+ final DataLoadThreadTime dataLoadThreadTime) {
+ Future<Info> future = pool.submit(new Callable<Info>() {
+ @Override public Info call() throws Exception {
+ int rowsCreated = 0;
+ long start = 0, duration, totalDuration;
+ SimpleDateFormat simpleDateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
+ Connection connection = null;
+ try {
+ connection = pUtil.getConnection();
+ long logStartTime = System.currentTimeMillis();
+ long
+ maxDuration =
+ (WriteWorkload.this.writeParams == null) ?
+ Long.MAX_VALUE :
+ WriteWorkload.this.writeParams.getExecutionDurationInMs();
+
+ for (long i = rowCount; (i > 0) && ((System.currentTimeMillis() - logStartTime)
+ < maxDuration); i--) {
+ String sql = buildSql(columns, tableName);
+ PreparedStatement stmt = connection.prepareStatement(sql);
+ stmt = buildStatement(scenario, columns, stmt, simpleDateFormat);
+ start = System.currentTimeMillis();
+ rowsCreated += stmt.executeUpdate();
+ stmt.close();
+ if ((i % getBatchSize()) == 0) {
+ connection.commit();
+ duration = System.currentTimeMillis() - start;
+ logger.info("Writer (" + Thread.currentThread().getName()
+ + ") committed Batch. Total " + getBatchSize()
+ + " rows for this thread (" + this.hashCode() + ") in ("
+ + duration + ") Ms");
+
+ if (i % PherfConstants.LOG_PER_NROWS == 0 && i != 0) {
+ dataLoadThreadTime
+ .add(tableName, Thread.currentThread().getName(), i,
+ System.currentTimeMillis() - logStartTime);
+ logStartTime = System.currentTimeMillis();
+ }
+
+ // Pause for throttling if configured to do so
+ Thread.sleep(threadSleepDuration);
+ }
+ }
+ } finally {
+ if (connection != null) {
+ try {
+ connection.commit();
+ duration = System.currentTimeMillis() - start;
+ logger.info("Writer ( " + Thread.currentThread().getName()
+ + ") committed Final Batch. Duration (" + duration + ") Ms");
+ connection.close();
+ } catch (SQLException e) {
+ // Swallow since we are closing anyway
+ e.printStackTrace();
+ }
+ }
+ }
+ totalDuration = System.currentTimeMillis() - start;
+ return new Info(totalDuration, rowsCreated);
+ }
+ });
+ return future;
+ }
+
+ private PreparedStatement buildStatement(Scenario scenario, List<Column> columns,
+ PreparedStatement statement, SimpleDateFormat simpleDateFormat) throws Exception {
+ int count = 1;
+ for (Column column : columns) {
+
+ DataValue dataValue = getRulesApplier().getDataForRule(scenario, column);
+ switch (column.getType()) {
+ case VARCHAR:
+ if (dataValue.getValue().equals("")) {
+ statement.setNull(count, Types.VARCHAR);
+ } else {
+ statement.setString(count, dataValue.getValue());
+ }
+ break;
+ case CHAR:
+ if (dataValue.getValue().equals("")) {
+ statement.setNull(count, Types.CHAR);
+ } else {
+ statement.setString(count, dataValue.getValue());
+ }
+ break;
+ case DECIMAL:
+ if (dataValue.getValue().equals("")) {
+ statement.setNull(count, Types.DECIMAL);
+ } else {
+ statement.setBigDecimal(count, new BigDecimal(dataValue.getValue()));
+ }
+ break;
+ case INTEGER:
+ if (dataValue.getValue().equals("")) {
+ statement.setNull(count, Types.INTEGER);
+ } else {
+ statement.setInt(count, Integer.parseInt(dataValue.getValue()));
+ }
+ break;
+ case DATE:
+ if (dataValue.getValue().equals("")) {
+ statement.setNull(count, Types.DATE);
+ } else {
+ Date
+ date =
+ new java.sql.Date(
+ simpleDateFormat.parse(dataValue.getValue()).getTime());
+ statement.setDate(count, date);
+ }
+ break;
+ default:
+ break;
+ }
+ count++;
+ }
+ return statement;
+ }
+
+ private String buildSql(final List<Column> columns, final String tableName) {
+ StringBuilder builder = new StringBuilder();
+ builder.append("upsert into ");
+ builder.append(tableName);
+ builder.append(" (");
+ int count = 1;
+ for (Column column : columns) {
+ builder.append(column.getName());
+ if (count < columns.size()) {
+ builder.append(",");
+ } else {
+ builder.append(")");
+ }
+ count++;
+ }
+ builder.append(" VALUES (");
+ for (int i = 0; i < columns.size(); i++) {
+ if (i < columns.size() - 1) {
+ builder.append("?,");
+ } else {
+ builder.append("?)");
+ }
+ }
+ return builder.toString();
+ }
+
+ public XMLConfigParser getParser() {
+ return parser;
+ }
+
+ public RulesApplier getRulesApplier() {
+ return rulesApplier;
+ }
+
+ public int getBatchSize() {
+ return batchSize;
+ }
+
+ public int getThreadPoolSize() {
+ return threadPoolSize;
+ }
+
+ private class Info {
+
+ private final int rowCount;
+ private final long duration;
+
+ public Info(long duration, int rows) {
+ this.duration = duration;
+ this.rowCount = rows;
+ }
+
+ public long getDuration() {
+ return duration;
+ }
+
+ public int getRowCount() {
+ return rowCount;
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7175dcbc/phoenix-pherf/src/main/resources/scenario/prod_test_unsalted_scenario.xml
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/main/resources/scenario/prod_test_unsalted_scenario.xml b/phoenix-pherf/src/main/resources/scenario/prod_test_unsalted_scenario.xml
index 9514089..8f93685 100644
--- a/phoenix-pherf/src/main/resources/scenario/prod_test_unsalted_scenario.xml
+++ b/phoenix-pherf/src/main/resources/scenario/prod_test_unsalted_scenario.xml
@@ -304,6 +304,41 @@
</column>
</datamapping>
<scenarios>
+ <scenario tableName="PHERF.PHERF_PROD_TEST_UNSALTED" rowCount="100" name="readWriteScenario">
+ <!-- Scenario level rule overrides will be unsupported in V1.
+ You can use the general datamappings in the mean time-->
+ <dataOverride>
+ <column>
+ <type>VARCHAR</type>
+ <userDefined>true</userDefined>
+ <dataSequence>LIST</dataSequence>
+ <name>TENANT_ID</name>
+ </column>
+ </dataOverride>
+ <writeParams executionDurationInMs="10000">
+ <!--
+ Number of writer it insert into the threadpool
+ -->
+ <writerThreadCount>5</writerThreadCount>
+
+ <!--
+ Time in Ms that each thread will sleep between batch writes. This helps to
+ throttle writers.
+ -->
+ <threadSleepDuration>10</threadSleepDuration>
+
+ <batchSize>100</batchSize>
+ </writeParams>
+ <!--Minimum of executionDurationInMs or numberOfExecutions. Which ever is reached first -->
+ <querySet concurrency="1" executionType="PARALLEL" executionDurationInMs="60000"
+ numberOfExecutions="100">
+ <!-- Aggregate queries on a per tenant basis -->
+ <query tenantId="00Dxx0000001gER"
+ ddl="CREATE VIEW IF NOT EXISTS PHERF.PHERF_TEST_VIEW_UNSALTED AS SELECT * FROM PHERF.PHERF_PROD_TEST_UNSALTED"
+ statement="select count(*) from PHERF.PHERF_TEST_VIEW_UNSALTED"/>
+ </querySet>
+
+ </scenario>
<scenario tableName="PHERF.PHERF_PROD_TEST_UNSALTED" rowCount="10">
<!-- Scenario level rule overrides will be unsupported in V1.
You can use the general datamappings in the mean time-->
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7175dcbc/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ConfigurationParserTest.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ConfigurationParserTest.java b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ConfigurationParserTest.java
index f362842..6f25fbd 100644
--- a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ConfigurationParserTest.java
+++ b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ConfigurationParserTest.java
@@ -18,6 +18,7 @@
package org.apache.phoenix.pherf;
+import java.net.URISyntaxException;
import java.net.URL;
import java.nio.file.Path;
import java.nio.file.Paths;
@@ -27,7 +28,6 @@ import java.util.List;
import org.apache.phoenix.pherf.configuration.*;
import org.apache.phoenix.pherf.rules.DataValue;
-import org.apache.phoenix.pherf.workload.WorkloadExecutor;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -38,53 +38,55 @@ import javax.xml.bind.Marshaller;
import static org.junit.Assert.*;
-public class ConfigurationParserTest extends ResultBaseTest{
+public class ConfigurationParserTest extends ResultBaseTest {
private static final Logger logger = LoggerFactory.getLogger(ConfigurationParserTest.class);
@Test
- public void testConfigFilesParsing() {
- try {
- WorkloadExecutor workloadExec = new WorkloadExecutor();
- List<Scenario> scenarioList = workloadExec.getParser().getScenarios();
- assertTrue("Could not load the scenarios from xml.", (scenarioList != null) && (scenarioList.size() > 0));
- logger.info("Number of scenarios loaded: " + scenarioList.size());
-
- } catch (Exception e) {
- e.printStackTrace();
- fail();
+ public void testReadWriteWorkloadReader() throws Exception {
+ String scenarioName = "testScenarioRW";
+ List<Scenario> scenarioList = getScenarios();
+ Scenario target = null;
+ for (Scenario scenario : scenarioList) {
+ if (scenarioName.equals(scenario.getName())) {
+ target = scenario;
+ }
}
+ assertNotNull("Could not find scenario: " + scenarioName, target);
+ WriteParams params = target.getWriteParams();
+
+ assertNotNull("Could not find writeParams in scenario: " + scenarioName, params);
+ assertNotNull("Could not find batch size: ", params.getBatchSize());
+ assertNotNull("Could not find execution duration: ", params.getExecutionDurationInMs());
+ assertNotNull("Could not find sleep duration: ", params.getThreadSleepDuration());
+ assertNotNull("Could not find writer count: ", params.getWriterThreadCount());
}
- @Test
+ @Test
// TODO Break this into multiple smaller tests.
- public void testConfigReader(){
- URL resourceUrl = getClass().getResource("/scenario/test_scenario.xml");
- assertNotNull("Test data XML file is missing", resourceUrl);
-
- try {
+ public void testConfigReader() {
+ try {
logger.debug("DataModel: " + writeXML());
- Path resourcePath = Paths.get(resourceUrl.toURI());
- DataModel data = XMLConfigParser.readDataModel(resourcePath);
- List<Scenario> scenarioList = data.getScenarios();
- assertTrue("Could not load the scenarios from xml.", (scenarioList != null) && (scenarioList.size() > 0));
- List<Column> dataMappingColumns = data.getDataMappingColumns();
- assertTrue("Could not load the data columns from xml.", (dataMappingColumns != null) && (dataMappingColumns.size() > 0));
+ List<Scenario> scenarioList = getScenarios();
+ List<Column> dataMappingColumns = getDataModel().getDataMappingColumns();
+ assertTrue("Could not load the data columns from xml.",
+ (dataMappingColumns != null) && (dataMappingColumns.size() > 0));
assertTrue("Could not load the data DataValue list from xml.",
(dataMappingColumns.get(6).getDataValues() != null)
- && (dataMappingColumns.get(6).getDataValues().size() > 0));
+ && (dataMappingColumns.get(6).getDataValues().size() > 0));
assertDateValue(dataMappingColumns);
// Validate column mappings
for (Column column : dataMappingColumns) {
- assertNotNull("Column ("+ column.getName() + ") is missing its type",column.getType());
+ assertNotNull("Column (" + column.getName() + ") is missing its type",
+ column.getType());
}
- Scenario scenario = scenarioList.get(0);
+ Scenario scenario = scenarioList.get(1);
assertNotNull(scenario);
assertEquals("PHERF.TEST_TABLE", scenario.getTableName());
- assertEquals(10, scenario.getRowCount());
+ assertEquals(30, scenario.getRowCount());
assertEquals(1, scenario.getDataOverride().getColumn().size());
QuerySet qs = scenario.getQuerySet().get(0);
assertEquals(ExecutionType.SERIAL, qs.getExecutionType());
@@ -99,27 +101,50 @@ public class ConfigurationParserTest extends ResultBaseTest{
assertEquals("select count(*) from PHERF.TEST_TABLE", firstQuery.getStatement());
assertEquals("123456789012345", firstQuery.getTenantId());
assertEquals(null, firstQuery.getDdl());
- assertEquals(0, (long)firstQuery.getExpectedAggregateRowCount());
+ assertEquals(0, (long) firstQuery.getExpectedAggregateRowCount());
Query secondQuery = qs.getQuery().get(1);
- assertEquals("Could not get statement.", "select sum(SOME_INT) from PHERF.TEST_TABLE", secondQuery.getStatement());
+ assertEquals("Could not get statement.", "select sum(SOME_INT) from PHERF.TEST_TABLE",
+ secondQuery.getStatement());
assertEquals("Could not get queryGroup.", "g1", secondQuery.getQueryGroup());
// Make sure anything in the overrides matches a real column in the data mappings
DataOverride override = scenario.getDataOverride();
for (Column column : override.getColumn()) {
- assertTrue("Could not lookup Column (" + column.getName() + ") in DataMapping columns: " + dataMappingColumns, dataMappingColumns.contains(column));
+ assertTrue("Could not lookup Column (" + column.getName()
+ + ") in DataMapping columns: " + dataMappingColumns,
+ dataMappingColumns.contains(column));
}
- } catch (Exception e) {
- e.printStackTrace();
- fail();
- }
- }
+ } catch (Exception e) {
+ e.printStackTrace();
+ fail();
+ }
+ }
+
+ private URL getResourceUrl() {
+ URL resourceUrl = getClass().getResource("/scenario/test_scenario.xml");
+ assertNotNull("Test data XML file is missing", resourceUrl);
+ return resourceUrl;
+ }
+
+ private List<Scenario> getScenarios() throws URISyntaxException, JAXBException{
+ DataModel data = getDataModel();
+ List<Scenario> scenarioList = data.getScenarios();
+ assertTrue("Could not load the scenarios from xml.",
+ (scenarioList != null) && (scenarioList.size() > 0));
+ return scenarioList;
+ }
+
+ private DataModel getDataModel() throws URISyntaxException, JAXBException {
+ Path resourcePath = Paths.get(getResourceUrl().toURI());
+ return XMLConfigParser.readDataModel(resourcePath);
+ }
private void assertDateValue(List<Column> dataMappingColumns) {
for (Column dataMapping : dataMappingColumns) {
- if ((dataMapping.getType() == DataTypeMapping.DATE) && (dataMapping.getName().equals("CREATED_DATE"))) {
+ if ((dataMapping.getType() == DataTypeMapping.DATE) && (dataMapping.getName()
+ .equals("CREATED_DATE"))) {
// First rule should have min/max set
assertNotNull(dataMapping.getDataValues().get(0).getMinValue());
assertNotNull(dataMapping.getDataValues().get(0).getMaxValue());
@@ -139,7 +164,7 @@ public class ConfigurationParserTest extends ResultBaseTest{
/*
Used for debugging to dump out a simple xml filed based on the bound objects.
*/
- private String writeXML() {
+ private String writeXML() {
DataModel data = new DataModel();
try {
DataValue dataValue = new DataValue();
@@ -156,7 +181,6 @@ public class ConfigurationParserTest extends ResultBaseTest{
List<Column> columnList = new ArrayList<>();
columnList.add(column);
- data.setRelease("192");
data.setDataMappingColumns(columnList);
Scenario scenario = new Scenario();
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7175dcbc/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ResultTest.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ResultTest.java b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ResultTest.java
index a202437..4ccf95c 100644
--- a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ResultTest.java
+++ b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ResultTest.java
@@ -33,7 +33,6 @@ import org.apache.phoenix.pherf.result.file.ResultFileDetails;
import org.apache.phoenix.pherf.result.impl.CSVResultHandler;
import org.apache.phoenix.pherf.result.impl.XMLResultHandler;
import org.apache.phoenix.pherf.result.*;
-import org.junit.BeforeClass;
import org.junit.Test;
import org.apache.phoenix.pherf.configuration.Query;
@@ -72,7 +71,7 @@ public class ResultTest extends ResultBaseTest {
public void testMonitorResult() throws Exception {
ExecutorService executorService = Executors.newFixedThreadPool(1);
MonitorManager monitor = new MonitorManager(100);
- Future future = executorService.submit(monitor);
+ Future future = executorService.submit(monitor.execute());
List<Result> records;
final int TIMEOUT = 30;
@@ -83,7 +82,7 @@ public class ResultTest extends ResultBaseTest {
Thread.sleep(100);
if (ct == max) {
int timer = 0;
- monitor.stop();
+ monitor.complete();
while (monitor.isRunning() && (timer < TIMEOUT)) {
System.out.println("Waiting for monitor to finish. Seconds Waited :" + timer);
Thread.sleep(1000);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7175dcbc/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/RuleGeneratorTest.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/RuleGeneratorTest.java b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/RuleGeneratorTest.java
index 15d4608..92604d4 100644
--- a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/RuleGeneratorTest.java
+++ b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/RuleGeneratorTest.java
@@ -19,7 +19,7 @@
package org.apache.phoenix.pherf;
import org.apache.phoenix.pherf.configuration.*;
-import org.apache.phoenix.pherf.loaddata.DataLoader;
+import org.apache.phoenix.pherf.workload.WriteWorkload;
import org.apache.phoenix.pherf.rules.DataValue;
import org.apache.phoenix.pherf.rules.RulesApplier;
import org.apache.phoenix.pherf.util.PhoenixUtil;
@@ -28,20 +28,19 @@ import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import org.junit.Test;
-import java.sql.Types;
import java.util.*;
import static org.junit.Assert.*;
public class RuleGeneratorTest {
- static PhoenixUtil util = new PhoenixUtil(true);
- static final String matcherScenario = PherfConstants.SCENARIO_ROOT_PATTERN + ".xml";
+ private static PhoenixUtil util = PhoenixUtil.create(true);
+ private static final String matcherScenario = PherfConstants.SCENARIO_ROOT_PATTERN + ".xml";
@Test
public void testDateGenerator() throws Exception {
XMLConfigParser parser = new XMLConfigParser(matcherScenario);
DataModel model = parser.getDataModels().get(0);
- DataLoader loader = new DataLoader(parser);
+ WriteWorkload loader = new WriteWorkload(parser);
RulesApplier rulesApplier = loader.getRulesApplier();
for (Column dataMapping : model.getDataMappingColumns()) {
@@ -68,7 +67,7 @@ public class RuleGeneratorTest {
public void testNullChance() throws Exception {
XMLConfigParser parser = new XMLConfigParser(matcherScenario);
DataModel model = parser.getDataModels().get(0);
- DataLoader loader = new DataLoader(parser);
+ WriteWorkload loader = new WriteWorkload(parser);
RulesApplier rulesApplier = loader.getRulesApplier();
int sampleSize = 100;
List<String> values = new ArrayList<>(sampleSize);
@@ -96,7 +95,7 @@ public class RuleGeneratorTest {
public void testSequentialDataSequence() throws Exception {
XMLConfigParser parser = new XMLConfigParser(matcherScenario);
DataModel model = parser.getDataModels().get(0);
- DataLoader loader = new DataLoader(parser);
+ WriteWorkload loader = new WriteWorkload(parser);
RulesApplier rulesApplier = loader.getRulesApplier();
Column targetColumn = null;
@@ -181,7 +180,7 @@ public class RuleGeneratorTest {
expectedValues.add("cCCyYhnNbBs9kWr");
XMLConfigParser parser = new XMLConfigParser(".*test_scenario.xml");
- DataLoader loader = new DataLoader(parser);
+ WriteWorkload loader = new WriteWorkload(parser);
RulesApplier rulesApplier = loader.getRulesApplier();
Scenario scenario = parser.getScenarios().get(0);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7175dcbc/phoenix-pherf/src/test/resources/scenario/test_scenario.xml
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/test/resources/scenario/test_scenario.xml b/phoenix-pherf/src/test/resources/scenario/test_scenario.xml
index 45d36d2..fddf022 100644
--- a/phoenix-pherf/src/test/resources/scenario/test_scenario.xml
+++ b/phoenix-pherf/src/test/resources/scenario/test_scenario.xml
@@ -127,10 +127,50 @@
<name>NEWVAL_STRING</name>
<prefix>TSTPRFX</prefix>
</column>
-
</datamapping>
<scenarios>
- <scenario tableName="PHERF.TEST_TABLE" rowCount="10" name="testScenario">
+ <scenario tableName="PHERF.TEST_TABLE" rowCount="100" name="testScenarioRW">
+ <!-- Scenario level rule overrides will be unsupported in V1.
+ You can use the general datamappings in the mean time-->
+ <dataOverride>
+ <column>
+ <type>VARCHAR</type>
+ <userDefined>true</userDefined>
+ <dataSequence>RANDOM</dataSequence>
+ <length>10</length>
+ <name>FIELD</name>
+ </column>
+ </dataOverride>
+
+ <!--
+ This is used to add mixed R/W workloads.
+
+ If this tag exists, a writer pool will be created based on the below properties.
+ These props will override the default values in pherf.properties, but only for this
+ scenario.The write jobs will run in conjunction with the querySet below.
+ -->
+ <writeParams executionDurationInMs="10000">
+ <!--
+ Number of writer it insert into the threadpool
+ -->
+ <writerThreadCount>2</writerThreadCount>
+
+ <!--
+ Time in Ms that each thread will sleep between batch writes. This helps to
+ throttle writers.
+ -->
+ <threadSleepDuration>10</threadSleepDuration>
+
+ <batchSize>1000</batchSize>
+ </writeParams>
+ <querySet concurrency="1" executionType="PARALLEL" executionDurationInMs="10000">
+ <query id="q3" statement="select count(*) from PHERF.TEST_TABLE"/>
+ <query id="q4" statement="select sum(DIVISION) from PHERF.TEST_TABLE"/>
+ </querySet>
+
+ </scenario>
+
+ <scenario tableName="PHERF.TEST_TABLE" rowCount="30" name="testScenario">
<!-- Scenario level rule overrides will be unsupported in V1.
You can use the general datamappings in the mean time-->
<dataOverride>
@@ -145,16 +185,20 @@
<!--Note: 1. Minimum of executionDurationInMs or numberOfExecutions. Which ever is reached first
2. DDL included in query are executed only once on start of querySet execution.
-->
- <querySet concurrency="1-3" executionType="SERIAL" executionDurationInMs="5000" numberOfExecutions="100">
- <query id="q1" tenantId="123456789012345" expectedAggregateRowCount="0" statement="select count(*) from PHERF.TEST_TABLE"/>
+ <querySet concurrency="1-3" executionType="SERIAL" executionDurationInMs="5000"
+ numberOfExecutions="100">
+ <query id="q1" tenantId="123456789012345" expectedAggregateRowCount="0"
+ statement="select count(*) from PHERF.TEST_TABLE"/>
<!-- queryGroup is a way to organize queries across tables or scenario files.
The value will be dumped to results. This gives a value to group by on reporting to compare queries -->
- <query id="q2" queryGroup="g1" statement="select sum(SOME_INT) from PHERF.TEST_TABLE"/>
+ <query id="q2" queryGroup="g1"
+ statement="select sum(SOME_INT) from PHERF.TEST_TABLE"/>
</querySet>
<!--Minimum of executionDurationInMs or numberOfExecutions. Which ever is reached first -->
- <querySet concurrency="2-3" executionType="PARALLEL" executionDurationInMs="10000" numberOfExecutions="10">
+ <querySet concurrency="2-3" executionType="PARALLEL" executionDurationInMs="10000"
+ numberOfExecutions="10">
<query id="q3" statement="select count(*) from PHERF.TEST_TABLE"/>
- <query id="q4" statement="select sum(DIVISION) from PHERF.TEST_TABLE"/>
+ <query id="q4" statement="select sum(SOME_INT) from PHERF.TEST_TABLE"/>
</querySet>
</scenario>
</scenarios>
[27/47] phoenix git commit: PHOENIX-2073 Two bytes character in LIKE
expression is not allowed (Yuhao Bi)
Posted by ma...@apache.org.
PHOENIX-2073 Two bytes character in LIKE expression is not allowed (Yuhao Bi)
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/11577dd7
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/11577dd7
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/11577dd7
Branch: refs/heads/calcite
Commit: 11577dd7b2f722cff0ef410bed60ec1ef6b9c55c
Parents: c1e5723
Author: Yuhao Bi <by...@gmail.com>
Authored: Thu Jun 25 15:41:06 2015 +0800
Committer: Thomas D'Silva <td...@salesforce.com>
Committed: Fri Jun 26 11:02:00 2015 -0700
----------------------------------------------------------------------
.../apache/phoenix/compile/WhereOptimizer.java | 3 ++-
.../phoenix/compile/WhereOptimizerTest.java | 18 ++++++++++++++++++
2 files changed, 20 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11577dd7/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereOptimizer.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereOptimizer.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereOptimizer.java
index b7f04e0..0cbef11 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereOptimizer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereOptimizer.java
@@ -65,6 +65,7 @@ import org.apache.phoenix.schema.tuple.Tuple;
import org.apache.phoenix.schema.types.PChar;
import org.apache.phoenix.schema.types.PDataType;
import org.apache.phoenix.schema.types.PVarbinary;
+import org.apache.phoenix.schema.types.PVarchar;
import org.apache.phoenix.util.ByteUtil;
import org.apache.phoenix.util.MetaDataUtil;
import org.apache.phoenix.util.ScanUtil;
@@ -952,7 +953,7 @@ public class WhereOptimizer {
KeySlots childSlots = childParts.get(0);
KeySlot childSlot = childSlots.iterator().next();
final String startsWith = node.getLiteralPrefix();
- byte[] key = PChar.INSTANCE.toBytes(startsWith, node.getChildren().get(0).getSortOrder());
+ byte[] key = PVarchar.INSTANCE.toBytes(startsWith, node.getChildren().get(0).getSortOrder());
// If the expression is an equality expression against a fixed length column
// and the key length doesn't match the column length, the expression can
// never be true.
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11577dd7/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereOptimizerTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereOptimizerTest.java b/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereOptimizerTest.java
index adbd9a2..c1787ca 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereOptimizerTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereOptimizerTest.java
@@ -688,6 +688,24 @@ public class WhereOptimizerTest extends BaseConnectionlessQueryTest {
}
@Test
+ public void testLikeExtractAllKeyExpression2() throws SQLException {
+ String tenantId = "000000000000001";
+ String keyPrefix = "ä¸æ–‡";
+ String query = "select * from atable where organization_id = ? and entity_id LIKE '" + keyPrefix + "%'";
+ List<Object> binds = Arrays.<Object>asList(tenantId);
+ StatementContext context = compileStatement(query, binds);
+ Scan scan = context.getScan();
+
+ assertNull(scan.getFilter());
+ byte[] startRow = ByteUtil.concat(
+ PVarchar.INSTANCE.toBytes(tenantId),StringUtil.padChar(PVarchar.INSTANCE.toBytes(keyPrefix),15));
+ assertArrayEquals(startRow, scan.getStartRow());
+ byte[] stopRow = ByteUtil.concat(
+ PVarchar.INSTANCE.toBytes(tenantId),StringUtil.padChar(ByteUtil.nextKey(PVarchar.INSTANCE.toBytes(keyPrefix)),15));
+ assertArrayEquals(stopRow, scan.getStopRow());
+ }
+
+ @Test
public void testLikeExtractAllAsEqKeyExpression() throws SQLException {
String tenantId = "000000000000001";
String keyPrefix = "002";
[33/47] phoenix git commit: PHOENIX-1819 Build a framework to capture
and report phoenix client side request level metrics
Posted by ma...@apache.org.
PHOENIX-1819 Build a framework to capture and report phoenix client side request level metrics
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/0f6595c0
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/0f6595c0
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/0f6595c0
Branch: refs/heads/calcite
Commit: 0f6595c0c511a3f07c51cf92d1ced665556b7d4c
Parents: 9c069bd
Author: Samarth <sa...@salesforce.com>
Authored: Fri Jun 26 16:44:43 2015 -0700
Committer: Samarth <sa...@salesforce.com>
Committed: Fri Jun 26 16:44:43 2015 -0700
----------------------------------------------------------------------
.../phoenix/end2end/PhoenixMetricsIT.java | 147 ----
.../apache/phoenix/execute/PartialCommitIT.java | 1 +
.../phoenix/monitoring/PhoenixMetricsIT.java | 815 +++++++++++++++++++
.../apache/phoenix/cache/ServerCacheClient.java | 7 +
.../apache/phoenix/compile/DeleteCompiler.java | 50 +-
.../MutatingParallelIteratorFactory.java | 51 +-
.../phoenix/compile/StatementContext.java | 49 +-
.../apache/phoenix/compile/UpsertCompiler.java | 80 +-
.../apache/phoenix/execute/AggregatePlan.java | 8 +-
.../apache/phoenix/execute/HashJoinPlan.java | 7 +
.../apache/phoenix/execute/MutationState.java | 290 ++++---
.../org/apache/phoenix/execute/UnionPlan.java | 8 +-
.../phoenix/iterate/BaseResultIterators.java | 15 +-
.../phoenix/iterate/ChunkedResultIterator.java | 21 +-
.../iterate/ParallelIteratorFactory.java | 4 +-
.../phoenix/iterate/ParallelIterators.java | 25 +-
.../iterate/RoundRobinResultIterator.java | 4 +-
.../phoenix/iterate/ScanningResultIterator.java | 38 +-
.../apache/phoenix/iterate/SerialIterators.java | 23 +-
.../phoenix/iterate/SpoolingResultIterator.java | 49 +-
.../phoenix/iterate/TableResultIterator.java | 17 +-
.../phoenix/iterate/UnionResultIterators.java | 70 +-
.../apache/phoenix/jdbc/PhoenixConnection.java | 27 +-
.../phoenix/jdbc/PhoenixDatabaseMetaData.java | 21 +-
.../apache/phoenix/jdbc/PhoenixResultSet.java | 48 +-
.../apache/phoenix/jdbc/PhoenixStatement.java | 20 +-
.../java/org/apache/phoenix/job/JobManager.java | 60 +-
.../phoenix/mapreduce/CsvBulkLoadTool.java | 10 +-
.../phoenix/mapreduce/PhoenixRecordReader.java | 12 +-
.../phoenix/memory/GlobalMemoryManager.java | 5 -
.../apache/phoenix/monitoring/AtomicMetric.java | 70 ++
.../phoenix/monitoring/CombinableMetric.java | 77 ++
.../monitoring/CombinableMetricImpl.java | 77 ++
.../org/apache/phoenix/monitoring/Counter.java | 85 --
.../phoenix/monitoring/GlobalClientMetrics.java | 117 +++
.../apache/phoenix/monitoring/GlobalMetric.java | 37 +
.../phoenix/monitoring/GlobalMetricImpl.java | 74 ++
.../phoenix/monitoring/MemoryMetricsHolder.java | 43 +
.../org/apache/phoenix/monitoring/Metric.java | 45 +-
.../apache/phoenix/monitoring/MetricType.java | 55 ++
.../phoenix/monitoring/MetricsStopWatch.java | 59 ++
.../phoenix/monitoring/MutationMetricQueue.java | 131 +++
.../phoenix/monitoring/NonAtomicMetric.java | 71 ++
.../phoenix/monitoring/OverAllQueryMetrics.java | 121 +++
.../phoenix/monitoring/PhoenixMetrics.java | 118 ---
.../phoenix/monitoring/ReadMetricQueue.java | 180 ++++
.../phoenix/monitoring/SizeStatistic.java | 78 --
.../monitoring/SpoolingMetricsHolder.java | 43 +
.../monitoring/TaskExecutionMetricsHolder.java | 68 ++
.../phoenix/query/BaseQueryServicesImpl.java | 2 +-
.../org/apache/phoenix/query/QueryServices.java | 3 +-
.../phoenix/query/QueryServicesOptions.java | 25 +-
.../phoenix/trace/PhoenixMetricsSink.java | 36 +-
.../java/org/apache/phoenix/util/JDBCUtil.java | 6 +-
.../org/apache/phoenix/util/PhoenixRuntime.java | 175 +++-
.../iterate/SpoolingResultIteratorTest.java | 4 +-
56 files changed, 2930 insertions(+), 852 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/0f6595c0/phoenix-core/src/it/java/org/apache/phoenix/end2end/PhoenixMetricsIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/PhoenixMetricsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/PhoenixMetricsIT.java
deleted file mode 100644
index edb4042..0000000
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/PhoenixMetricsIT.java
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.end2end;
-
-import static org.apache.phoenix.monitoring.PhoenixMetrics.CountMetric.FAILED_QUERY;
-import static org.apache.phoenix.monitoring.PhoenixMetrics.CountMetric.MUTATION_COUNT;
-import static org.apache.phoenix.monitoring.PhoenixMetrics.CountMetric.NUM_SPOOL_FILE;
-import static org.apache.phoenix.monitoring.PhoenixMetrics.CountMetric.QUERY_COUNT;
-import static org.apache.phoenix.monitoring.PhoenixMetrics.CountMetric.QUERY_TIMEOUT;
-import static org.apache.phoenix.monitoring.PhoenixMetrics.CountMetric.REJECTED_TASK_COUNT;
-import static org.apache.phoenix.monitoring.PhoenixMetrics.SizeMetric.MUTATION_BATCH_SIZE;
-import static org.apache.phoenix.monitoring.PhoenixMetrics.SizeMetric.MUTATION_BYTES;
-import static org.apache.phoenix.monitoring.PhoenixMetrics.SizeMetric.MUTATION_COMMIT_TIME;
-import static org.apache.phoenix.monitoring.PhoenixMetrics.SizeMetric.PARALLEL_SCANS;
-import static org.apache.phoenix.monitoring.PhoenixMetrics.SizeMetric.QUERY_TIME;
-import static org.apache.phoenix.monitoring.PhoenixMetrics.SizeMetric.SCAN_BYTES;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-
-import org.apache.phoenix.monitoring.Metric;
-import org.apache.phoenix.util.PhoenixRuntime;
-import org.junit.Test;
-
-public class PhoenixMetricsIT extends BaseHBaseManagedTimeIT {
-
- @Test
- public void testResetPhoenixMetrics() {
- resetMetrics();
- for (Metric m : PhoenixRuntime.getInternalPhoenixMetrics()) {
- assertEquals(0, m.getTotalSum());
- assertEquals(0, m.getNumberOfSamples());
- }
- }
-
- @Test
- public void testPhoenixMetricsForQueries() throws Exception {
- createTableAndInsertValues("T", true);
- resetMetrics(); // we want to count metrics related only to the below query
- Connection conn = DriverManager.getConnection(getUrl());
- String query = "SELECT * FROM T";
- ResultSet rs = conn.createStatement().executeQuery(query);
- while (rs.next()) {
- rs.getString(1);
- rs.getString(2);
- }
- assertEquals(1, PARALLEL_SCANS.getMetric().getTotalSum());
- assertEquals(1, QUERY_COUNT.getMetric().getTotalSum());
- assertEquals(0, REJECTED_TASK_COUNT.getMetric().getTotalSum());
- assertEquals(0, QUERY_TIMEOUT.getMetric().getTotalSum());
- assertEquals(0, FAILED_QUERY.getMetric().getTotalSum());
- assertEquals(0, NUM_SPOOL_FILE.getMetric().getTotalSum());
- assertEquals(0, MUTATION_BATCH_SIZE.getMetric().getTotalSum());
- assertEquals(0, MUTATION_BYTES.getMetric().getTotalSum());
- assertEquals(0, MUTATION_COMMIT_TIME.getMetric().getTotalSum());
-
- assertTrue(SCAN_BYTES.getMetric().getTotalSum() > 0);
- assertTrue(QUERY_TIME.getMetric().getTotalSum() > 0);
- }
-
- @Test
- public void testPhoenixMetricsForMutations() throws Exception {
- createTableAndInsertValues("T", true);
- assertEquals(10, MUTATION_BATCH_SIZE.getMetric().getTotalSum());
- assertEquals(10, MUTATION_COUNT.getMetric().getTotalSum());
- assertTrue(MUTATION_BYTES.getMetric().getTotalSum() > 0);
- assertTrue(MUTATION_COMMIT_TIME.getMetric().getTotalSum() > 0);
- assertEquals(0, PARALLEL_SCANS.getMetric().getTotalSum());
- assertEquals(0, QUERY_COUNT.getMetric().getTotalSum());
- assertEquals(0, REJECTED_TASK_COUNT.getMetric().getTotalSum());
- assertEquals(0, QUERY_TIMEOUT.getMetric().getTotalSum());
- assertEquals(0, FAILED_QUERY.getMetric().getTotalSum());
- assertEquals(0, NUM_SPOOL_FILE.getMetric().getTotalSum());
- }
-
-
- @Test
- public void testPhoenixMetricsForUpsertSelect() throws Exception {
- createTableAndInsertValues("T", true);
- resetMetrics();
- String ddl = "CREATE TABLE T2 (K VARCHAR NOT NULL PRIMARY KEY, V VARCHAR)";
- Connection conn = DriverManager.getConnection(getUrl());
- conn.createStatement().execute(ddl);
- resetMetrics();
- String dml = "UPSERT INTO T2 (K, V) SELECT K, V FROM T";
- conn.createStatement().executeUpdate(dml);
- conn.commit();
- assertEquals(10, MUTATION_BATCH_SIZE.getMetric().getTotalSum());
- assertEquals(1, MUTATION_COUNT.getMetric().getTotalSum());
- assertEquals(1, PARALLEL_SCANS.getMetric().getTotalSum());
- assertEquals(0, QUERY_TIME.getMetric().getTotalSum());
- assertTrue(SCAN_BYTES.getMetric().getTotalSum() > 0);
- assertTrue(MUTATION_BYTES.getMetric().getTotalSum() > 0);
- assertTrue(MUTATION_COMMIT_TIME.getMetric().getTotalSum() > 0);
- assertEquals(0, QUERY_COUNT.getMetric().getTotalSum());
- assertEquals(0, REJECTED_TASK_COUNT.getMetric().getTotalSum());
- assertEquals(0, QUERY_TIMEOUT.getMetric().getTotalSum());
- assertEquals(0, FAILED_QUERY.getMetric().getTotalSum());
- assertEquals(0, NUM_SPOOL_FILE.getMetric().getTotalSum());
- }
-
- private static void resetMetrics() {
- for (Metric m : PhoenixRuntime.getInternalPhoenixMetrics()) {
- m.reset();
- }
- }
-
- private static void createTableAndInsertValues(String tableName, boolean resetMetricsAfterTableCreate) throws Exception {
- String ddl = "CREATE TABLE " + tableName + " (K VARCHAR NOT NULL PRIMARY KEY, V VARCHAR)";
- Connection conn = DriverManager.getConnection(getUrl());
- conn.createStatement().execute(ddl);
- if (resetMetricsAfterTableCreate) {
- resetMetrics();
- }
- // executing 10 upserts/mutations.
- String dml = "UPSERT INTO " + tableName + " VALUES (?, ?)";
- PreparedStatement stmt = conn.prepareStatement(dml);
- for (int i = 1; i <= 10; i++) {
- stmt.setString(1, "key" + i);
- stmt.setString(2, "value" + i);
- stmt.executeUpdate();
- }
- conn.commit();
- }
-
-
-
-}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/0f6595c0/phoenix-core/src/it/java/org/apache/phoenix/execute/PartialCommitIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/execute/PartialCommitIT.java b/phoenix-core/src/it/java/org/apache/phoenix/execute/PartialCommitIT.java
index c8696e2..e0f0a3c 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/execute/PartialCommitIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/execute/PartialCommitIT.java
@@ -260,6 +260,7 @@ public class PartialCommitIT {
PhoenixConnection phxCon = new PhoenixConnection(con.unwrap(PhoenixConnection.class));
final Map<TableRef,Map<ImmutableBytesPtr,MutationState.RowMutationState>> mutations = Maps.newTreeMap(new TableRefComparator());
return new PhoenixConnection(phxCon) {
+ @Override
protected MutationState newMutationState(int maxSize) {
return new MutationState(maxSize, this, mutations);
};
http://git-wip-us.apache.org/repos/asf/phoenix/blob/0f6595c0/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
new file mode 100644
index 0000000..d9ca8e8
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
@@ -0,0 +1,815 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE
+ * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by
+ * applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language
+ * governing permissions and limitations under the License.
+ */
+package org.apache.phoenix.monitoring;
+
+import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_FAILED_QUERY_COUNTER;
+import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_MUTATION_BATCH_SIZE;
+import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_MUTATION_BYTES;
+import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_MUTATION_COMMIT_TIME;
+import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_MUTATION_SQL_COUNTER;
+import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_NUM_PARALLEL_SCANS;
+import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_QUERY_TIME;
+import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_QUERY_TIMEOUT_COUNTER;
+import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_REJECTED_TASK_COUNTER;
+import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_SCAN_BYTES;
+import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_SELECT_SQL_COUNTER;
+import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_SPOOL_FILE_COUNTER;
+import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_TASK_END_TO_END_TIME;
+import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_TASK_EXECUTION_TIME;
+import static org.apache.phoenix.monitoring.MetricType.MEMORY_CHUNK_BYTES;
+import static org.apache.phoenix.monitoring.MetricType.SCAN_BYTES;
+import static org.apache.phoenix.monitoring.MetricType.TASK_EXECUTED_COUNTER;
+import static org.apache.phoenix.monitoring.MetricType.TASK_EXECUTION_TIME;
+import static org.apache.phoenix.util.PhoenixRuntime.UPSERT_BATCH_SIZE_ATTRIB;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Properties;
+import java.util.Set;
+
+import org.apache.phoenix.compile.StatementContext;
+import org.apache.phoenix.end2end.BaseOwnClusterHBaseManagedTimeIT;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixResultSet;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.util.PhoenixRuntime;
+import org.apache.phoenix.util.ReadOnlyProps;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.mockito.internal.util.reflection.Whitebox;
+
+import com.google.common.base.Joiner;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+
+public class PhoenixMetricsIT extends BaseOwnClusterHBaseManagedTimeIT {
+
+ private static final List<String> mutationMetricsToSkip = Lists
+ .newArrayList(MetricType.MUTATION_COMMIT_TIME.name());
+ private static final List<String> readMetricsToSkip = Lists.newArrayList(MetricType.TASK_QUEUE_WAIT_TIME.name(),
+ MetricType.TASK_EXECUTION_TIME.name(), MetricType.TASK_END_TO_END_TIME.name());
+
+ @BeforeClass
+ public static void doSetup() throws Exception {
+ Map<String, String> props = Maps.newHashMapWithExpectedSize(1);
+ // Enable request metric collection at the driver level
+ props.put(QueryServices.COLLECT_REQUEST_LEVEL_METRICS, String.valueOf(true));
+ setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
+ }
+
+ @Test
+ public void testResetGlobalPhoenixMetrics() {
+ resetGlobalMetrics();
+ for (GlobalMetric m : PhoenixRuntime.getGlobalPhoenixClientMetrics()) {
+ assertEquals(0, m.getTotalSum());
+ assertEquals(0, m.getNumberOfSamples());
+ }
+ }
+
+ @Test
+ public void testGlobalPhoenixMetricsForQueries() throws Exception {
+ createTableAndInsertValues("T", true);
+ resetGlobalMetrics(); // we want to count metrics related only to the below query
+ Connection conn = DriverManager.getConnection(getUrl());
+ String query = "SELECT * FROM T";
+ ResultSet rs = conn.createStatement().executeQuery(query);
+ while (rs.next()) {
+ rs.getString(1);
+ rs.getString(2);
+ }
+ assertEquals(1, GLOBAL_NUM_PARALLEL_SCANS.getMetric().getTotalSum());
+ assertEquals(1, GLOBAL_SELECT_SQL_COUNTER.getMetric().getTotalSum());
+ assertEquals(0, GLOBAL_REJECTED_TASK_COUNTER.getMetric().getTotalSum());
+ assertEquals(0, GLOBAL_QUERY_TIMEOUT_COUNTER.getMetric().getTotalSum());
+ assertEquals(0, GLOBAL_FAILED_QUERY_COUNTER.getMetric().getTotalSum());
+ assertEquals(0, GLOBAL_SPOOL_FILE_COUNTER.getMetric().getTotalSum());
+ assertEquals(0, GLOBAL_MUTATION_BATCH_SIZE.getMetric().getTotalSum());
+ assertEquals(0, GLOBAL_MUTATION_BYTES.getMetric().getTotalSum());
+ assertEquals(0, GLOBAL_MUTATION_COMMIT_TIME.getMetric().getTotalSum());
+
+ assertTrue(GLOBAL_SCAN_BYTES.getMetric().getTotalSum() > 0);
+ assertTrue(GLOBAL_QUERY_TIME.getMetric().getTotalSum() > 0);
+ assertTrue(GLOBAL_TASK_END_TO_END_TIME.getMetric().getTotalSum() > 0);
+ assertTrue(GLOBAL_TASK_EXECUTION_TIME.getMetric().getTotalSum() > 0);
+ }
+
+ @Test
+ public void testGlobalPhoenixMetricsForMutations() throws Exception {
+ createTableAndInsertValues("T", true);
+ assertEquals(10, GLOBAL_MUTATION_BATCH_SIZE.getMetric().getTotalSum());
+ assertEquals(10, GLOBAL_MUTATION_SQL_COUNTER.getMetric().getTotalSum());
+ assertTrue(GLOBAL_MUTATION_BYTES.getMetric().getTotalSum() > 0);
+ assertTrue(GLOBAL_MUTATION_COMMIT_TIME.getMetric().getTotalSum() > 0);
+ assertEquals(0, GLOBAL_NUM_PARALLEL_SCANS.getMetric().getTotalSum());
+ assertEquals(0, GLOBAL_SELECT_SQL_COUNTER.getMetric().getTotalSum());
+ assertEquals(0, GLOBAL_REJECTED_TASK_COUNTER.getMetric().getTotalSum());
+ assertEquals(0, GLOBAL_QUERY_TIMEOUT_COUNTER.getMetric().getTotalSum());
+ assertEquals(0, GLOBAL_FAILED_QUERY_COUNTER.getMetric().getTotalSum());
+ assertEquals(0, GLOBAL_SPOOL_FILE_COUNTER.getMetric().getTotalSum());
+ }
+
+ @Test
+ public void testGlobalPhoenixMetricsForUpsertSelect() throws Exception {
+ createTableAndInsertValues("T", true);
+ resetGlobalMetrics();
+ String ddl = "CREATE TABLE T2 (K VARCHAR NOT NULL PRIMARY KEY, V VARCHAR)";
+ Connection conn = DriverManager.getConnection(getUrl());
+ conn.createStatement().execute(ddl);
+ resetGlobalMetrics();
+ String dml = "UPSERT INTO T2 (K, V) SELECT K, V FROM T";
+ conn.createStatement().executeUpdate(dml);
+ conn.commit();
+ assertEquals(10, GLOBAL_MUTATION_BATCH_SIZE.getMetric().getTotalSum());
+ assertEquals(1, GLOBAL_MUTATION_SQL_COUNTER.getMetric().getTotalSum());
+ assertEquals(1, GLOBAL_NUM_PARALLEL_SCANS.getMetric().getTotalSum());
+ assertEquals(0, GLOBAL_QUERY_TIME.getMetric().getTotalSum());
+ assertTrue(GLOBAL_SCAN_BYTES.getMetric().getTotalSum() > 0);
+ assertTrue(GLOBAL_MUTATION_BYTES.getMetric().getTotalSum() > 0);
+ assertTrue(GLOBAL_MUTATION_COMMIT_TIME.getMetric().getTotalSum() > 0);
+ assertEquals(0, GLOBAL_SELECT_SQL_COUNTER.getMetric().getTotalSum());
+ assertEquals(0, GLOBAL_REJECTED_TASK_COUNTER.getMetric().getTotalSum());
+ assertEquals(0, GLOBAL_QUERY_TIMEOUT_COUNTER.getMetric().getTotalSum());
+ assertEquals(0, GLOBAL_FAILED_QUERY_COUNTER.getMetric().getTotalSum());
+ assertEquals(0, GLOBAL_SPOOL_FILE_COUNTER.getMetric().getTotalSum());
+ }
+
+ private static void resetGlobalMetrics() {
+ for (GlobalMetric m : PhoenixRuntime.getGlobalPhoenixClientMetrics()) {
+ m.reset();
+ }
+ }
+
+ private static void createTableAndInsertValues(String tableName, boolean resetGlobalMetricsAfterTableCreate)
+ throws Exception {
+ String ddl = "CREATE TABLE " + tableName + " (K VARCHAR NOT NULL PRIMARY KEY, V VARCHAR)";
+ Connection conn = DriverManager.getConnection(getUrl());
+ conn.createStatement().execute(ddl);
+ if (resetGlobalMetricsAfterTableCreate) {
+ resetGlobalMetrics();
+ }
+ // executing 10 upserts/mutations.
+ String dml = "UPSERT INTO " + tableName + " VALUES (?, ?)";
+ PreparedStatement stmt = conn.prepareStatement(dml);
+ for (int i = 1; i <= 10; i++) {
+ stmt.setString(1, "key" + i);
+ stmt.setString(2, "value" + i);
+ stmt.executeUpdate();
+ }
+ conn.commit();
+ }
+
+ @Test
+ public void testOverallQueryMetricsForSelect() throws Exception {
+ String tableName = "SCANMETRICS";
+ String ddl = "CREATE TABLE " + tableName + " (K VARCHAR NOT NULL PRIMARY KEY, V VARCHAR)" + " SALT_BUCKETS = 6";
+ Connection conn = DriverManager.getConnection(getUrl());
+ conn.createStatement().execute(ddl);
+ }
+
+ @Test
+ public void testReadMetricsForSelect() throws Exception {
+ String tableName = "READMETRICSFORSELECT";
+ long numSaltBuckets = 6;
+ String ddl = "CREATE TABLE " + tableName + " (K VARCHAR NOT NULL PRIMARY KEY, V VARCHAR)" + " SALT_BUCKETS = "
+ + numSaltBuckets;
+ Connection conn = DriverManager.getConnection(getUrl());
+ conn.createStatement().execute(ddl);
+
+ long numRows = 1000;
+ long numExpectedTasks = numSaltBuckets;
+ insertRowsInTable(tableName, numRows);
+
+ String query = "SELECT * FROM " + tableName;
+ Statement stmt = conn.createStatement();
+ ResultSet rs = stmt.executeQuery(query);
+ PhoenixResultSet resultSetBeingTested = rs.unwrap(PhoenixResultSet.class);
+ changeInternalStateForTesting(resultSetBeingTested);
+ while (resultSetBeingTested.next()) {}
+ resultSetBeingTested.close();
+ Set<String> expectedTableNames = Sets.newHashSet(tableName);
+ assertReadMetricValuesForSelectSql(Lists.newArrayList(numRows), Lists.newArrayList(numExpectedTasks),
+ resultSetBeingTested, expectedTableNames);
+ }
+
+ @Test
+ public void testMetricsForUpsert() throws Exception {
+ String tableName = "UPSERTMETRICS";
+ String ddl = "CREATE TABLE " + tableName + " (K VARCHAR NOT NULL PRIMARY KEY, V VARCHAR)" + " SALT_BUCKETS = 6";
+ Connection ddlConn = DriverManager.getConnection(getUrl());
+ ddlConn.createStatement().execute(ddl);
+ ddlConn.close();
+
+ int numRows = 10;
+ Connection conn = insertRowsInTable(tableName, numRows);
+ PhoenixConnection pConn = conn.unwrap(PhoenixConnection.class);
+ Map<String, Map<String, Long>> mutationMetrics = PhoenixRuntime.getWriteMetricsForMutationsSinceLastReset(pConn);
+ for (Entry<String, Map<String, Long>> entry : mutationMetrics.entrySet()) {
+ String t = entry.getKey();
+ assertEquals("Table names didn't match!", tableName, t);
+ Map<String, Long> p = entry.getValue();
+ assertEquals("There should have been three metrics", 3, p.size());
+ boolean mutationBatchSizePresent = false;
+ boolean mutationCommitTimePresent = false;
+ boolean mutationBytesPresent = false;
+ for (Entry<String, Long> metric : p.entrySet()) {
+ String metricName = metric.getKey();
+ long metricValue = metric.getValue();
+ if (metricName.equals(MetricType.MUTATION_BATCH_SIZE.name())) {
+ assertEquals("Mutation batch sizes didn't match!", numRows, metricValue);
+ mutationBatchSizePresent = true;
+ } else if (metricName.equals(MetricType.MUTATION_COMMIT_TIME.name())) {
+ assertTrue("Mutation commit time should be greater than zero", metricValue > 0);
+ mutationCommitTimePresent = true;
+ } else if (metricName.equals(MetricType.MUTATION_BYTES.name())) {
+ assertTrue("Mutation bytes size should be greater than zero", metricValue > 0);
+ mutationBytesPresent = true;
+ }
+ }
+ assertTrue(mutationBatchSizePresent);
+ assertTrue(mutationCommitTimePresent);
+ assertTrue(mutationBytesPresent);
+ }
+ Map<String, Map<String, Long>> readMetrics = PhoenixRuntime.getReadMetricsForMutationsSinceLastReset(pConn);
+ assertEquals("Read metrics should be empty", 0, readMetrics.size());
+ }
+
+ @Test
+ public void testMetricsForUpsertSelect() throws Exception {
+ String tableName1 = "UPSERTFROM";
+ long table1SaltBuckets = 6;
+ String ddl = "CREATE TABLE " + tableName1 + " (K VARCHAR NOT NULL PRIMARY KEY, V VARCHAR)" + " SALT_BUCKETS = "
+ + table1SaltBuckets;
+ Connection ddlConn = DriverManager.getConnection(getUrl());
+ ddlConn.createStatement().execute(ddl);
+ ddlConn.close();
+ int numRows = 10;
+ insertRowsInTable(tableName1, numRows);
+
+ String tableName2 = "UPSERTTO";
+ ddl = "CREATE TABLE " + tableName2 + " (K VARCHAR NOT NULL PRIMARY KEY, V VARCHAR)" + " SALT_BUCKETS = 10";
+ ddlConn = DriverManager.getConnection(getUrl());
+ ddlConn.createStatement().execute(ddl);
+ ddlConn.close();
+
+ Connection conn = DriverManager.getConnection(getUrl());
+ String upsertSelect = "UPSERT INTO " + tableName2 + " SELECT * FROM " + tableName1;
+ conn.createStatement().executeUpdate(upsertSelect);
+ conn.commit();
+ PhoenixConnection pConn = conn.unwrap(PhoenixConnection.class);
+
+ Map<String, Map<String, Long>> mutationMetrics = PhoenixRuntime.getWriteMetricsForMutationsSinceLastReset(pConn);
+ assertMutationMetrics(tableName2, numRows, mutationMetrics);
+ Map<String, Map<String, Long>> readMetrics = PhoenixRuntime.getReadMetricsForMutationsSinceLastReset(pConn);
+ assertReadMetricsForMutatingSql(tableName1, table1SaltBuckets, readMetrics);
+ }
+
+ @Test
+ public void testMetricsForDelete() throws Exception {
+ String tableName = "DELETEMETRICS";
+ long tableSaltBuckets = 6;
+ String ddl = "CREATE TABLE " + tableName + " (K VARCHAR NOT NULL PRIMARY KEY, V VARCHAR)" + " SALT_BUCKETS = "
+ + tableSaltBuckets;
+ Connection ddlConn = DriverManager.getConnection(getUrl());
+ ddlConn.createStatement().execute(ddl);
+ ddlConn.close();
+ int numRows = 10;
+ insertRowsInTable(tableName, numRows);
+ Connection conn = DriverManager.getConnection(getUrl());
+ String delete = "DELETE FROM " + tableName;
+ conn.createStatement().execute(delete);
+ conn.commit();
+ PhoenixConnection pConn = conn.unwrap(PhoenixConnection.class);
+ Map<String, Map<String, Long>> mutationMetrics = PhoenixRuntime.getWriteMetricsForMutationsSinceLastReset(pConn);
+ assertMutationMetrics(tableName, numRows, mutationMetrics);
+
+ Map<String, Map<String, Long>> readMetrics = PhoenixRuntime.getReadMetricsForMutationsSinceLastReset(pConn);
+ assertReadMetricsForMutatingSql(tableName, tableSaltBuckets, readMetrics);
+ }
+
+ @Test
+ public void testNoMetricsCollectedForConnection() throws Exception {
+ String tableName = "NOMETRICS";
+ long tableSaltBuckets = 6;
+ String ddl = "CREATE TABLE " + tableName + " (K VARCHAR NOT NULL PRIMARY KEY, V VARCHAR)" + " SALT_BUCKETS = "
+ + tableSaltBuckets;
+ Connection ddlConn = DriverManager.getConnection(getUrl());
+ ddlConn.createStatement().execute(ddl);
+ ddlConn.close();
+ int numRows = 10;
+ insertRowsInTable(tableName, numRows);
+ Properties props = new Properties();
+ props.setProperty(QueryServices.COLLECT_REQUEST_LEVEL_METRICS, "false");
+ Connection conn = DriverManager.getConnection(getUrl(), props);
+ ResultSet rs = conn.createStatement().executeQuery("SELECT * FROM " + tableName);
+ while (rs.next()) {}
+ rs.close();
+ Map<String, Map<String, Long>> readMetrics = PhoenixRuntime.getRequestReadMetrics(rs);
+ assertTrue("No read metrics should have been generated", readMetrics.size() == 0);
+ conn.createStatement().executeUpdate("UPSERT INTO " + tableName + " VALUES ('KEY', 'VALUE')");
+ conn.commit();
+ Map<String, Map<String, Long>> writeMetrics = PhoenixRuntime.getWriteMetricsForMutationsSinceLastReset(conn);
+ assertTrue("No write metrics should have been generated", writeMetrics.size() == 0);
+ }
+
+ @Test
+ public void testMetricsForUpsertWithAutoCommit() throws Exception {
+ String tableName = "VERIFYUPSERTAUTOCOMMIT";
+ long tableSaltBuckets = 6;
+ String ddl = "CREATE TABLE " + tableName + " (K VARCHAR NOT NULL PRIMARY KEY, V VARCHAR)" + " SALT_BUCKETS = "
+ + tableSaltBuckets;
+ try (Connection ddlConn = DriverManager.getConnection(getUrl())) {
+ ddlConn.createStatement().execute(ddl);
+ }
+
+ String upsert = "UPSERT INTO " + tableName + " VALUES (?, ?)";
+ int numRows = 10;
+ Map<String, Map<String, Long>> mutationMetricsForAutoCommitOff = null;
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ conn.setAutoCommit(false);
+ upsertRows(upsert, numRows, conn);
+ conn.commit();
+ mutationMetricsForAutoCommitOff = PhoenixRuntime.getWriteMetricsForMutationsSinceLastReset(conn);
+ }
+
+ // Insert rows now with auto-commit on
+ Map<String, Map<String, Long>> mutationMetricsAutoCommitOn = null;
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ conn.setAutoCommit(true);
+ upsertRows(upsert, numRows, conn);
+ mutationMetricsAutoCommitOn = PhoenixRuntime.getWriteMetricsForMutationsSinceLastReset(conn);
+ }
+ // Verify that the mutation metrics are same for both cases
+ assertMetricsAreSame(mutationMetricsForAutoCommitOff, mutationMetricsAutoCommitOn, mutationMetricsToSkip);
+ }
+
+ private void upsertRows(String upsert, int numRows, Connection conn) throws SQLException {
+ PreparedStatement stmt = conn.prepareStatement(upsert);
+ for (int i = 1; i <= numRows; i++) {
+ stmt.setString(1, "key" + i);
+ stmt.setString(2, "value" + i);
+ stmt.executeUpdate();
+ }
+ }
+
+ @Test
+ public void testMetricsForDeleteWithAutoCommit() throws Exception {
+ String tableName = "VERIFYDELETEAUTOCOMMIT";
+ long tableSaltBuckets = 6;
+ String ddl = "CREATE TABLE " + tableName + " (K VARCHAR NOT NULL PRIMARY KEY, V VARCHAR)" + " SALT_BUCKETS = "
+ + tableSaltBuckets;
+ try (Connection ddlConn = DriverManager.getConnection(getUrl())) {
+ ddlConn.createStatement().execute(ddl);
+ }
+
+ String upsert = "UPSERT INTO " + tableName + " VALUES (?, ?)";
+ int numRows = 10;
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ conn.setAutoCommit(false);
+ upsertRows(upsert, numRows, conn);
+ conn.commit();
+ }
+
+ String delete = "DELETE FROM " + tableName;
+ // Delete rows now with auto-commit off
+ Map<String, Map<String, Long>> deleteMetricsWithAutoCommitOff = null;
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ conn.setAutoCommit(false);
+ conn.createStatement().executeUpdate(delete);
+ deleteMetricsWithAutoCommitOff = PhoenixRuntime.getWriteMetricsForMutationsSinceLastReset(conn);
+ }
+
+ // Upsert the rows back
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ conn.setAutoCommit(false);
+ upsertRows(upsert, numRows, conn);
+ conn.commit();
+ }
+
+ // Now delete rows with auto-commit on
+ Map<String, Map<String, Long>> deleteMetricsWithAutoCommitOn = null;
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ conn.setAutoCommit(true);
+ conn.createStatement().executeUpdate(delete);
+ deleteMetricsWithAutoCommitOn = PhoenixRuntime.getWriteMetricsForMutationsSinceLastReset(conn);
+ }
+
+ // Verify that the mutation metrics are same for both cases.
+ assertMetricsAreSame(deleteMetricsWithAutoCommitOff, deleteMetricsWithAutoCommitOn, mutationMetricsToSkip);
+ }
+
+ @Test
+ public void testMetricsForUpsertSelectWithAutoCommit() throws Exception {
+ String tableName1 = "UPSERTFROMAUTOCOMMIT";
+ long table1SaltBuckets = 6;
+ String ddl = "CREATE TABLE " + tableName1 + " (K VARCHAR NOT NULL PRIMARY KEY, V VARCHAR)" + " SALT_BUCKETS = "
+ + table1SaltBuckets;
+ Connection ddlConn = DriverManager.getConnection(getUrl());
+ ddlConn.createStatement().execute(ddl);
+ ddlConn.close();
+ int numRows = 10;
+ insertRowsInTable(tableName1, numRows);
+
+ String tableName2 = "UPSERTTOAUTCOMMIT";
+ ddl = "CREATE TABLE " + tableName2 + " (K VARCHAR NOT NULL PRIMARY KEY, V VARCHAR)" + " SALT_BUCKETS = 10";
+ ddlConn = DriverManager.getConnection(getUrl());
+ ddlConn.createStatement().execute(ddl);
+ ddlConn.close();
+
+ String upsertSelect = "UPSERT INTO " + tableName2 + " SELECT * FROM " + tableName1;
+
+ Map<String, Map<String, Long>> mutationMetricsAutoCommitOff = null;
+ Map<String, Map<String, Long>> readMetricsAutoCommitOff = null;
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ conn.setAutoCommit(false);
+ conn.createStatement().executeUpdate(upsertSelect);
+ conn.commit();
+ PhoenixConnection pConn = conn.unwrap(PhoenixConnection.class);
+ mutationMetricsAutoCommitOff = PhoenixRuntime.getWriteMetricsForMutationsSinceLastReset(pConn);
+ readMetricsAutoCommitOff = PhoenixRuntime.getReadMetricsForMutationsSinceLastReset(pConn);
+ }
+
+ Map<String, Map<String, Long>> mutationMetricsAutoCommitOn = null;
+ Map<String, Map<String, Long>> readMetricsAutoCommitOn = null;
+
+ int autoCommitBatchSize = numRows + 1; // batchsize = 11 is less than numRows and is not a divisor of batchsize
+ Properties props = new Properties();
+ props.setProperty(UPSERT_BATCH_SIZE_ATTRIB, Integer.toString(autoCommitBatchSize));
+ try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
+ conn.setAutoCommit(true);
+ conn.createStatement().executeUpdate(upsertSelect);
+ PhoenixConnection pConn = conn.unwrap(PhoenixConnection.class);
+ mutationMetricsAutoCommitOn = PhoenixRuntime.getWriteMetricsForMutationsSinceLastReset(pConn);
+ readMetricsAutoCommitOn = PhoenixRuntime.getReadMetricsForMutationsSinceLastReset(pConn);
+ }
+ assertMetricsAreSame(mutationMetricsAutoCommitOff, mutationMetricsAutoCommitOn, mutationMetricsToSkip);
+ assertMetricsAreSame(readMetricsAutoCommitOff, readMetricsAutoCommitOn, readMetricsToSkip);
+
+ autoCommitBatchSize = numRows - 1; // batchsize = 9 is less than numRows and is not a divisor of batchsize
+ props = new Properties();
+ props.setProperty(UPSERT_BATCH_SIZE_ATTRIB, Integer.toString(autoCommitBatchSize));
+ try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
+ conn.setAutoCommit(true);
+ conn.createStatement().executeUpdate(upsertSelect);
+ PhoenixConnection pConn = conn.unwrap(PhoenixConnection.class);
+ mutationMetricsAutoCommitOn = PhoenixRuntime.getWriteMetricsForMutationsSinceLastReset(pConn);
+ readMetricsAutoCommitOn = PhoenixRuntime.getReadMetricsForMutationsSinceLastReset(pConn);
+ }
+ assertMetricsAreSame(mutationMetricsAutoCommitOff, mutationMetricsAutoCommitOn, mutationMetricsToSkip);
+ assertMetricsAreSame(readMetricsAutoCommitOff, readMetricsAutoCommitOn, readMetricsToSkip);
+
+ autoCommitBatchSize = numRows;
+ props = new Properties();
+ props.setProperty(UPSERT_BATCH_SIZE_ATTRIB, Integer.toString(autoCommitBatchSize));
+ try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
+ conn.setAutoCommit(true);
+ conn.createStatement().executeUpdate(upsertSelect);
+ PhoenixConnection pConn = conn.unwrap(PhoenixConnection.class);
+ mutationMetricsAutoCommitOn = PhoenixRuntime.getWriteMetricsForMutationsSinceLastReset(pConn);
+ readMetricsAutoCommitOn = PhoenixRuntime.getReadMetricsForMutationsSinceLastReset(pConn);
+ }
+ assertMetricsAreSame(mutationMetricsAutoCommitOff, mutationMetricsAutoCommitOn, mutationMetricsToSkip);
+ assertMetricsAreSame(readMetricsAutoCommitOff, readMetricsAutoCommitOn, readMetricsToSkip);
+
+ autoCommitBatchSize = 2; // multiple batches of equal size
+ props = new Properties();
+ props.setProperty(UPSERT_BATCH_SIZE_ATTRIB, Integer.toString(autoCommitBatchSize));
+ try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
+ conn.setAutoCommit(true);
+ conn.createStatement().executeUpdate(upsertSelect);
+ PhoenixConnection pConn = conn.unwrap(PhoenixConnection.class);
+ mutationMetricsAutoCommitOn = PhoenixRuntime.getWriteMetricsForMutationsSinceLastReset(pConn);
+ readMetricsAutoCommitOn = PhoenixRuntime.getReadMetricsForMutationsSinceLastReset(pConn);
+ }
+ assertMetricsAreSame(mutationMetricsAutoCommitOff, mutationMetricsAutoCommitOn, mutationMetricsToSkip);
+ assertMetricsAreSame(readMetricsAutoCommitOff, readMetricsAutoCommitOff, readMetricsToSkip);
+ }
+
+ @Test
+ public void testMutationMetricsWhenUpsertingToMultipleTables() throws Exception {
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ String table1 = "TABLE1";
+ createTableAndInsertValues(true, 10, conn, table1);
+ String table2 = "TABLE2";
+ createTableAndInsertValues(true, 10, conn, table2);
+ String table3 = "TABLE3";
+ createTableAndInsertValues(true, 10, conn, table3);
+ Map<String, Map<String, Long>> mutationMetrics = PhoenixRuntime.getWriteMetricsForMutationsSinceLastReset(conn);
+ assertTrue("Mutation metrics not present for " + table1, mutationMetrics.get(table1) != null);
+ assertTrue("Mutation metrics not present for " + table2, mutationMetrics.get(table2) != null);
+ assertTrue("Mutation metrics not present for " + table3, mutationMetrics.get(table3) != null);
+ assertMetricsHaveSameValues(mutationMetrics.get(table1), mutationMetrics.get(table2), mutationMetricsToSkip);
+ assertMetricsHaveSameValues(mutationMetrics.get(table1), mutationMetrics.get(table3), mutationMetricsToSkip);
+ }
+ }
+
+ @Test
+ public void testClosingConnectionClearsMetrics() throws Exception {
+ Connection conn = null;
+ try {
+ conn = DriverManager.getConnection(getUrl());
+ createTableAndInsertValues(true, 10, conn, "clearmetrics");
+ assertTrue("Mutation metrics not present", PhoenixRuntime.getWriteMetricsForMutationsSinceLastReset(conn).size() > 0);
+ } finally {
+ if (conn != null) {
+ conn.close();
+ assertTrue("Closing connection didn't clear metrics",
+ PhoenixRuntime.getWriteMetricsForMutationsSinceLastReset(conn).size() == 0);
+ }
+ }
+ }
+
+ @Test
+ public void testMetricsForUpsertingIntoImmutableTableWithIndices() throws Exception {
+ String dataTable = "IMMTABLEWITHINDICES";
+ String tableDdl = "CREATE TABLE "
+ + dataTable
+ + " (K1 VARCHAR NOT NULL, K2 VARCHAR NOT NULL, V1 INTEGER, V2 INTEGER, V3 INTEGER CONSTRAINT NAME_PK PRIMARY KEY(K1, K2)) IMMUTABLE_ROWS = true";
+ String index1 = "I1";
+ String index1Ddl = "CREATE INDEX " + index1 + " ON " + dataTable + " (V1) include (V2)";
+ String index2 = "I2";
+ String index2Ddl = "CREATE INDEX " + index2 + " ON " + dataTable + " (V2) include (V3)";
+ String index3 = "I3";
+ String index3Ddl = "CREATE INDEX " + index3 + " ON " + dataTable + " (V3) include (V1)";
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ conn.createStatement().execute(tableDdl);
+ conn.createStatement().execute(index1Ddl);
+ conn.createStatement().execute(index2Ddl);
+ conn.createStatement().execute(index3Ddl);
+ }
+ String upsert = "UPSERT INTO " + dataTable + " VALUES (?, ?, ?, ?, ?)";
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ /*
+ * Upsert data into table. Because the table is immutable, mutations for updating the indices on it are
+ * handled by the client itself. So mutation metrics should include mutations for the indices as well as the
+ * data table.
+ */
+ PreparedStatement stmt = conn.prepareStatement(upsert);
+ for (int i = 1; i < 10; i++) {
+ stmt.setString(1, "key1" + i);
+ stmt.setString(2, "key2" + i);
+ stmt.setInt(3, i);
+ stmt.setInt(4, i);
+ stmt.setInt(5, i);
+ stmt.executeUpdate();
+ }
+ conn.commit();
+ Map<String, Map<String, Long>> metrics = PhoenixRuntime.getWriteMetricsForMutationsSinceLastReset(conn);
+ assertTrue(metrics.get(dataTable).size() > 0);
+ assertTrue(metrics.get(index1).size() > 0);
+ assertTrue(metrics.get(index2).size() > 0);
+ assertMetricsHaveSameValues(metrics.get(index1), metrics.get(index2), mutationMetricsToSkip);
+ assertTrue(metrics.get(index3).size() > 0);
+ assertMetricsHaveSameValues(metrics.get(index1), metrics.get(index3), mutationMetricsToSkip);
+ }
+ }
+
+ @Test
+ public void testMetricsForUpsertSelectSameTable() throws Exception {
+ String tableName = "UPSERTSAME";
+ long table1SaltBuckets = 6;
+ String ddl = "CREATE TABLE " + tableName + " (K VARCHAR NOT NULL PRIMARY KEY, V VARCHAR)" + " SALT_BUCKETS = "
+ + table1SaltBuckets;
+ Connection ddlConn = DriverManager.getConnection(getUrl());
+ ddlConn.createStatement().execute(ddl);
+ ddlConn.close();
+ int numRows = 10;
+ insertRowsInTable(tableName, numRows);
+
+ Connection conn = DriverManager.getConnection(getUrl());
+ conn.setAutoCommit(false);
+ String upsertSelect = "UPSERT INTO " + tableName + " SELECT * FROM " + tableName;
+ conn.createStatement().executeUpdate(upsertSelect);
+ conn.commit();
+ PhoenixConnection pConn = conn.unwrap(PhoenixConnection.class);
+
+ Map<String, Map<String, Long>> mutationMetrics = PhoenixRuntime.getWriteMetricsForMutationsSinceLastReset(pConn);
+ // Because auto-commit is off, upsert select into the same table will run on the client.
+ // So we should have client side read and write metrics available.
+ assertMutationMetrics(tableName, numRows, mutationMetrics);
+ Map<String, Map<String, Long>> readMetrics = PhoenixRuntime.getReadMetricsForMutationsSinceLastReset(pConn);
+ assertReadMetricsForMutatingSql(tableName, table1SaltBuckets, readMetrics);
+ PhoenixRuntime.resetMetrics(pConn);
+ // With autocommit on, still, this upsert select runs on the client side.
+ conn.setAutoCommit(true);
+ conn.createStatement().executeUpdate(upsertSelect);
+ Map<String, Map<String, Long>> autoCommitMutationMetrics = PhoenixRuntime.getWriteMetricsForMutationsSinceLastReset(pConn);
+ Map<String, Map<String, Long>> autoCommitReadMetrics = PhoenixRuntime.getReadMetricsForMutationsSinceLastReset(pConn);
+ assertMetricsAreSame(mutationMetrics, autoCommitMutationMetrics, mutationMetricsToSkip);
+ assertMetricsAreSame(readMetrics, autoCommitReadMetrics, readMetricsToSkip);
+ }
+
+ private void createTableAndInsertValues(boolean commit, int numRows, Connection conn, String tableName)
+ throws SQLException {
+ String ddl = "CREATE TABLE " + tableName + " (K VARCHAR NOT NULL PRIMARY KEY, V VARCHAR)";
+ conn.createStatement().execute(ddl);
+ // executing 10 upserts/mutations.
+ String dml = "UPSERT INTO " + tableName + " VALUES (?, ?)";
+ PreparedStatement stmt = conn.prepareStatement(dml);
+ for (int i = 1; i <= numRows; i++) {
+ stmt.setString(1, "key" + i);
+ stmt.setString(2, "value" + i);
+ stmt.executeUpdate();
+ }
+ if (commit) {
+ conn.commit();
+ }
+ }
+
+ private void assertMetricsAreSame(Map<String, Map<String, Long>> metric1, Map<String, Map<String, Long>> metric2,
+ List<String> metricsToSkip) {
+ assertTrue("The two metrics have different or unequal number of table names ",
+ metric1.keySet().equals(metric2.keySet()));
+ for (Entry<String, Map<String, Long>> entry : metric1.entrySet()) {
+ Map<String, Long> metricNameValueMap1 = entry.getValue();
+ Map<String, Long> metricNameValueMap2 = metric2.get(entry.getKey());
+ assertMetricsHaveSameValues(metricNameValueMap1, metricNameValueMap2, metricsToSkip);
+ }
+ }
+
+ private void assertMetricsHaveSameValues(Map<String, Long> metricNameValueMap1,
+ Map<String, Long> metricNameValueMap2, List<String> metricsToSkip) {
+ assertTrue("The two metrics have different or unequal number of metric names ", metricNameValueMap1.keySet()
+ .equals(metricNameValueMap2.keySet()));
+ for (Entry<String, Long> entry : metricNameValueMap1.entrySet()) {
+ String metricName = entry.getKey();
+ if (!metricsToSkip.contains(metricName)) {
+ assertEquals("Unequal values for metric " + metricName, entry.getValue(),
+ metricNameValueMap2.get(metricName));
+ }
+ }
+ }
+
+ private void changeInternalStateForTesting(PhoenixResultSet rs) {
+ // get and set the internal state for testing purposes.
+ ReadMetricQueue testMetricsQueue = new TestReadMetricsQueue(true);
+ StatementContext ctx = (StatementContext)Whitebox.getInternalState(rs, "context");
+ Whitebox.setInternalState(ctx, "readMetricsQueue", testMetricsQueue);
+ Whitebox.setInternalState(rs, "readMetricsQueue", testMetricsQueue);
+ }
+
+ private void assertReadMetricValuesForSelectSql(ArrayList<Long> numRows, ArrayList<Long> numExpectedTasks,
+ PhoenixResultSet resultSetBeingTested, Set<String> expectedTableNames) throws SQLException {
+ Map<String, Map<String, Long>> metrics = PhoenixRuntime.getRequestReadMetrics(resultSetBeingTested);
+ int counter = 0;
+ for (Entry<String, Map<String, Long>> entry : metrics.entrySet()) {
+ String tableName = entry.getKey();
+ expectedTableNames.remove(tableName);
+ Map<String, Long> metricValues = entry.getValue();
+ boolean scanMetricsPresent = false;
+ boolean taskCounterMetricsPresent = false;
+ boolean taskExecutionTimeMetricsPresent = false;
+ boolean memoryMetricsPresent = false;
+ for (Entry<String, Long> pair : metricValues.entrySet()) {
+ String metricName = pair.getKey();
+ long metricValue = pair.getValue();
+ long n = numRows.get(counter);
+ long numTask = numExpectedTasks.get(counter);
+ if (metricName.equals(SCAN_BYTES.name())) {
+ // we are using a SCAN_BYTES_DELTA of 1. So number of scan bytes read should be number of rows read
+ assertEquals(n, metricValue);
+ scanMetricsPresent = true;
+ } else if (metricName.equals(TASK_EXECUTED_COUNTER.name())) {
+ assertEquals(numTask, metricValue);
+ taskCounterMetricsPresent = true;
+ } else if (metricName.equals(TASK_EXECUTION_TIME.name())) {
+ assertEquals(numTask * TASK_EXECUTION_TIME_DELTA, metricValue);
+ taskExecutionTimeMetricsPresent = true;
+ } else if (metricName.equals(MEMORY_CHUNK_BYTES.name())) {
+ assertEquals(numTask * MEMORY_CHUNK_BYTES_DELTA, metricValue);
+ memoryMetricsPresent = true;
+ }
+ }
+ counter++;
+ assertTrue(scanMetricsPresent);
+ assertTrue(taskCounterMetricsPresent);
+ assertTrue(taskExecutionTimeMetricsPresent);
+ assertTrue(memoryMetricsPresent);
+ }
+ PhoenixRuntime.resetMetrics(resultSetBeingTested);
+ assertTrue("Metrics not found tables " + Joiner.on(",").join(expectedTableNames),
+ expectedTableNames.size() == 0);
+ }
+
+ private Connection insertRowsInTable(String tableName, long numRows) throws SQLException {
+ String dml = "UPSERT INTO " + tableName + " VALUES (?, ?)";
+ Connection conn = DriverManager.getConnection(getUrl());
+ PreparedStatement stmt = conn.prepareStatement(dml);
+ for (int i = 1; i <= numRows; i++) {
+ stmt.setString(1, "key" + i);
+ stmt.setString(2, "value" + i);
+ stmt.executeUpdate();
+ }
+ conn.commit();
+ return conn;
+ }
+
+ // number of records read should be number of bytes at the end
+ public static final long SCAN_BYTES_DELTA = 1;
+
+ // total task execution time should be numTasks * TASK_EXECUTION_TIME_DELTA
+ public static final long TASK_EXECUTION_TIME_DELTA = 10;
+
+ // total task execution time should be numTasks * TASK_EXECUTION_TIME_DELTA
+ public static final long MEMORY_CHUNK_BYTES_DELTA = 100;
+
+ private class TestReadMetricsQueue extends ReadMetricQueue {
+
+ public TestReadMetricsQueue(boolean isRequestMetricsEnabled) {
+ super(isRequestMetricsEnabled);
+ }
+
+ @Override
+ public CombinableMetric getMetric(MetricType type) {
+ switch (type) {
+ case SCAN_BYTES:
+ return new CombinableMetricImpl(type) {
+
+ @Override
+ public void change(long delta) {
+ super.change(SCAN_BYTES_DELTA);
+ }
+ };
+ case TASK_EXECUTION_TIME:
+ return new CombinableMetricImpl(type) {
+
+ @Override
+ public void change(long delta) {
+ super.change(TASK_EXECUTION_TIME_DELTA);
+ }
+ };
+ case MEMORY_CHUNK_BYTES:
+ return new CombinableMetricImpl(type) {
+
+ @Override
+ public void change(long delta) {
+ super.change(MEMORY_CHUNK_BYTES_DELTA);
+ }
+ };
+ }
+ return super.getMetric(type);
+ }
+ }
+
+ private void assertReadMetricsForMutatingSql(String tableName, long tableSaltBuckets,
+ Map<String, Map<String, Long>> readMetrics) {
+ assertTrue("No read metrics present when there should have been!", readMetrics.size() > 0);
+ int numTables = 0;
+ for (Entry<String, Map<String, Long>> entry : readMetrics.entrySet()) {
+ String t = entry.getKey();
+ assertEquals("Table name didn't match for read metrics", tableName, t);
+ numTables++;
+ Map<String, Long> p = entry.getValue();
+ assertTrue("No read metrics present when there should have been", p.size() > 0);
+ for (Entry<String, Long> metric : p.entrySet()) {
+ String metricName = metric.getKey();
+ long metricValue = metric.getValue();
+ if (metricName.equals(TASK_EXECUTED_COUNTER.name())) {
+ assertEquals(tableSaltBuckets, metricValue);
+ } else if (metricName.equals(SCAN_BYTES.name())) {
+ assertTrue("Scan bytes read should be greater than zero", metricValue > 0);
+ }
+ }
+ }
+ assertEquals("There should have been read metrics only for one table: " + tableName, 1, numTables);
+ }
+
+ private void assertMutationMetrics(String tableName, int numRows, Map<String, Map<String, Long>> mutationMetrics) {
+ assertTrue("No mutation metrics present when there should have been", mutationMetrics.size() > 0);
+ for (Entry<String, Map<String, Long>> entry : mutationMetrics.entrySet()) {
+ String t = entry.getKey();
+ assertEquals("Table name didn't match for mutation metrics", tableName, t);
+ Map<String, Long> p = entry.getValue();
+ assertEquals("There should have been three metrics", 3, p.size());
+ for (Entry<String, Long> metric : p.entrySet()) {
+ String metricName = metric.getKey();
+ long metricValue = metric.getValue();
+ if (metricName.equals(MetricType.MUTATION_BATCH_SIZE.name())) {
+ assertEquals("Mutation batch sizes didn't match!", numRows, metricValue);
+ } else if (metricName.equals(MetricType.MUTATION_COMMIT_TIME.name())) {
+ assertTrue("Mutation commit time should be greater than zero", metricValue > 0);
+ } else if (metricName.equals(MetricType.MUTATION_BYTES.name())) {
+ assertTrue("Mutation bytes size should be greater than zero", metricValue > 0);
+ }
+ }
+ }
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/0f6595c0/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java b/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java
index 9718709..9ad9ef5 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java
@@ -17,6 +17,7 @@
*/
package org.apache.phoenix.cache;
+import static org.apache.phoenix.monitoring.TaskExecutionMetricsHolder.NO_OP_INSTANCE;
import static org.apache.phoenix.util.LogUtil.addCustomAnnotations;
import java.io.Closeable;
@@ -57,6 +58,7 @@ import org.apache.phoenix.coprocessor.generated.ServerCachingProtos.ServerCachin
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.job.JobManager.JobCallable;
import org.apache.phoenix.memory.MemoryManager.MemoryChunk;
+import org.apache.phoenix.monitoring.TaskExecutionMetricsHolder;
import org.apache.phoenix.query.ConnectionQueryServices;
import org.apache.phoenix.query.QueryServices;
import org.apache.phoenix.query.QueryServicesOptions;
@@ -226,6 +228,11 @@ public class ServerCacheClient {
public Object getJobId() {
return ServerCacheClient.this;
}
+
+ @Override
+ public TaskExecutionMetricsHolder getTaskExecutionMetric() {
+ return NO_OP_INSTANCE;
+ }
}));
} else {
if (LOG.isDebugEnabled()) {LOG.debug(addCustomAnnotations("NOT adding cache entry to be sent for " + entry + " since one already exists for that entry", connection));}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/0f6595c0/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
index 575f0f3..a28f614 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
@@ -94,8 +94,9 @@ public class DeleteCompiler {
this.statement = statement;
}
- private static MutationState deleteRows(PhoenixStatement statement, TableRef targetTableRef, TableRef indexTableRef, ResultIterator iterator, RowProjector projector, TableRef sourceTableRef) throws SQLException {
+ private static MutationState deleteRows(StatementContext childContext, TableRef targetTableRef, TableRef indexTableRef, ResultIterator iterator, RowProjector projector, TableRef sourceTableRef) throws SQLException {
PTable table = targetTableRef.getTable();
+ PhoenixStatement statement = childContext.getStatement();
PhoenixConnection connection = statement.getConnection();
PName tenantId = connection.getTenantId();
byte[] tenantIdBytes = null;
@@ -114,19 +115,18 @@ public class DeleteCompiler {
if (indexTableRef != null) {
indexMutations = Maps.newHashMapWithExpectedSize(batchSize);
}
- try {
- List<PColumn> pkColumns = table.getPKColumns();
- boolean isMultiTenant = table.isMultiTenant() && tenantIdBytes != null;
- boolean isSharedViewIndex = table.getViewIndexId() != null;
- int offset = (table.getBucketNum() == null ? 0 : 1);
- byte[][] values = new byte[pkColumns.size()][];
- if (isMultiTenant) {
- values[offset++] = tenantIdBytes;
- }
- if (isSharedViewIndex) {
- values[offset++] = MetaDataUtil.getViewIndexIdDataType().toBytes(table.getViewIndexId());
- }
- PhoenixResultSet rs = new PhoenixResultSet(iterator, projector, statement);
+ List<PColumn> pkColumns = table.getPKColumns();
+ boolean isMultiTenant = table.isMultiTenant() && tenantIdBytes != null;
+ boolean isSharedViewIndex = table.getViewIndexId() != null;
+ int offset = (table.getBucketNum() == null ? 0 : 1);
+ byte[][] values = new byte[pkColumns.size()][];
+ if (isMultiTenant) {
+ values[offset++] = tenantIdBytes;
+ }
+ if (isSharedViewIndex) {
+ values[offset++] = MetaDataUtil.getViewIndexIdDataType().toBytes(table.getViewIndexId());
+ }
+ try (PhoenixResultSet rs = new PhoenixResultSet(iterator, projector, childContext)) {
int rowCount = 0;
while (rs.next()) {
ImmutableBytesPtr ptr = new ImmutableBytesPtr(); // allocate new as this is a key in a Map
@@ -183,8 +183,6 @@ public class DeleteCompiler {
state.join(indexState);
}
return state;
- } finally {
- iterator.close();
}
}
@@ -199,9 +197,16 @@ public class DeleteCompiler {
}
@Override
- protected MutationState mutate(StatementContext context, ResultIterator iterator, PhoenixConnection connection) throws SQLException {
+ protected MutationState mutate(StatementContext parentContext, ResultIterator iterator, PhoenixConnection connection) throws SQLException {
PhoenixStatement statement = new PhoenixStatement(connection);
- return deleteRows(statement, targetTableRef, indexTableRef, iterator, projector, sourceTableRef);
+ /*
+ * We don't want to collect any read metrics within the child context. This is because any read metrics that
+ * need to be captured are already getting collected in the parent statement context enclosed in the result
+ * iterator being used for reading rows out.
+ */
+ StatementContext ctx = new StatementContext(statement, false);
+ MutationState state = deleteRows(ctx, targetTableRef, indexTableRef, iterator, projector, sourceTableRef);
+ return state;
}
public void setTargetTableRef(TableRef tableRef) {
@@ -559,9 +564,14 @@ public class DeleteCompiler {
}
// Return total number of rows that have been delete. In the case of auto commit being off
// the mutations will all be in the mutation state of the current connection.
- return new MutationState(maxSize, connection, totalRowCount);
+ MutationState state = new MutationState(maxSize, connection, totalRowCount);
+
+ // set the read metrics accumulated in the parent context so that it can be published when the mutations are committed.
+ state.setReadMetricQueue(plan.getContext().getReadMetricsQueue());
+
+ return state;
} else {
- return deleteRows(statement, tableRef, deleteFromImmutableIndexToo ? plan.getTableRef() : null, iterator, plan.getProjector(), plan.getTableRef());
+ return deleteRows(plan.getContext(), tableRef, deleteFromImmutableIndexToo ? plan.getTableRef() : null, iterator, plan.getProjector(), plan.getTableRef());
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/0f6595c0/phoenix-core/src/main/java/org/apache/phoenix/compile/MutatingParallelIteratorFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/MutatingParallelIteratorFactory.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/MutatingParallelIteratorFactory.java
index bcac17d..630760c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/MutatingParallelIteratorFactory.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/MutatingParallelIteratorFactory.java
@@ -35,9 +35,9 @@ import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.query.ConnectionQueryServices;
import org.apache.phoenix.query.QueryServices;
import org.apache.phoenix.query.QueryServicesOptions;
-import org.apache.phoenix.schema.types.PLong;
import org.apache.phoenix.schema.tuple.SingleKeyValueTuple;
import org.apache.phoenix.schema.tuple.Tuple;
+import org.apache.phoenix.schema.types.PLong;
import org.apache.phoenix.util.KeyValueUtil;
/**
@@ -53,21 +53,34 @@ public abstract class MutatingParallelIteratorFactory implements ParallelIterato
/**
* Method that does the actual mutation work
*/
- abstract protected MutationState mutate(StatementContext context, ResultIterator iterator, PhoenixConnection connection) throws SQLException;
+ abstract protected MutationState mutate(StatementContext parentContext, ResultIterator iterator, PhoenixConnection connection) throws SQLException;
@Override
- public PeekingResultIterator newIterator(StatementContext context, ResultIterator iterator, Scan scan) throws SQLException {
- final PhoenixConnection connection = new PhoenixConnection(this.connection);
- MutationState state = mutate(context, iterator, connection);
+ public PeekingResultIterator newIterator(final StatementContext parentContext, ResultIterator iterator, Scan scan, String tableName) throws SQLException {
+ final PhoenixConnection clonedConnection = new PhoenixConnection(this.connection);
+
+ MutationState state = mutate(parentContext, iterator, clonedConnection);
+
long totalRowCount = state.getUpdateCount();
- if (connection.getAutoCommit()) {
- connection.getMutationState().join(state);
- connection.commit();
- ConnectionQueryServices services = connection.getQueryServices();
- int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB,QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
- state = new MutationState(maxSize, connection, totalRowCount);
+ if (clonedConnection.getAutoCommit()) {
+ clonedConnection.getMutationState().join(state);
+ clonedConnection.commit();
+ ConnectionQueryServices services = clonedConnection.getQueryServices();
+ int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
+ /*
+ * Everything that was mutated as part of the clonedConnection has been committed. However, we want to
+ * report the mutation work done using this clonedConnection as part of the overall mutation work of the
+ * parent connection. So we need to set those metrics in the empty mutation state so that they could be
+ * combined with the parent connection's mutation metrics (as part of combining mutation state) in the
+ * close() method of the iterator being returned. Don't combine the read metrics in parent context yet
+ * though because they are possibly being concurrently modified by other threads at this stage. Instead we
+ * will get hold of the read metrics when all the mutating iterators are done.
+ */
+ state = MutationState.emptyMutationState(maxSize, clonedConnection);
+ state.getMutationMetricQueue().combineMetricQueues(clonedConnection.getMutationState().getMutationMetricQueue());
}
final MutationState finalState = state;
+
byte[] value = PLong.INSTANCE.toBytes(totalRowCount);
KeyValue keyValue = KeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length);
final Tuple tuple = new SingleKeyValueTuple(keyValue);
@@ -90,13 +103,17 @@ public abstract class MutatingParallelIteratorFactory implements ParallelIterato
@Override
public void close() throws SQLException {
try {
- // Join the child mutation states in close, since this is called in a single threaded manner
- // after the parallel results have been processed.
- if (!connection.getAutoCommit()) {
- MutatingParallelIteratorFactory.this.connection.getMutationState().join(finalState);
- }
+ /*
+ * Join the child mutation states in close, since this is called in a single threaded manner
+ * after the parallel results have been processed.
+ * If auto-commit is on for the cloned child connection, then the finalState here is an empty mutation
+ * state (with no mutations). However, it still has the metrics for mutation work done by the
+ * mutating-iterator. Joining the mutation state makes sure those metrics are passed over
+ * to the parent connection.
+ */
+ MutatingParallelIteratorFactory.this.connection.getMutationState().join(finalState);
} finally {
- connection.close();
+ clonedConnection.close();
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/0f6595c0/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementContext.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementContext.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementContext.java
index d726488..52bb7f2 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementContext.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementContext.java
@@ -32,6 +32,8 @@ import org.apache.hadoop.hbase.io.TimeRange;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.jdbc.PhoenixStatement;
+import org.apache.phoenix.monitoring.OverAllQueryMetrics;
+import org.apache.phoenix.monitoring.ReadMetricQueue;
import org.apache.phoenix.parse.SelectStatement;
import org.apache.phoenix.query.QueryConstants;
import org.apache.phoenix.query.QueryServices;
@@ -41,6 +43,7 @@ import org.apache.phoenix.schema.PTable;
import org.apache.phoenix.schema.TableRef;
import org.apache.phoenix.util.DateUtil;
import org.apache.phoenix.util.NumberUtil;
+import org.apache.phoenix.util.ReadOnlyProps;
import com.google.common.collect.Maps;
@@ -80,10 +83,19 @@ public class StatementContext {
private TimeRange scanTimeRange = null;
private Map<SelectStatement, Object> subqueryResults;
-
+ private final ReadMetricQueue readMetricsQueue;
+ private final OverAllQueryMetrics overAllQueryMetrics;
+
public StatementContext(PhoenixStatement statement) {
this(statement, new Scan());
}
+
+ /**
+ * Constructor that lets you override whether or not to collect request level metrics.
+ */
+ public StatementContext(PhoenixStatement statement, boolean collectRequestLevelMetrics) {
+ this(statement, FromCompiler.EMPTY_TABLE_RESOLVER, new Scan(), new SequenceManager(statement), collectRequestLevelMetrics);
+ }
public StatementContext(PhoenixStatement statement, Scan scan) {
this(statement, FromCompiler.EMPTY_TABLE_RESOLVER, new Scan(), new SequenceManager(statement));
@@ -94,6 +106,10 @@ public class StatementContext {
}
public StatementContext(PhoenixStatement statement, ColumnResolver resolver, Scan scan, SequenceManager seqManager) {
+ this(statement, resolver, scan, seqManager, statement.getConnection().isRequestLevelMetricsEnabled());
+ }
+
+ public StatementContext(PhoenixStatement statement, ColumnResolver resolver, Scan scan, SequenceManager seqManager, boolean isRequestMetricsEnabled) {
this.statement = statement;
this.resolver = resolver;
this.scan = scan;
@@ -102,20 +118,24 @@ public class StatementContext {
this.aggregates = new AggregationManager();
this.expressions = new ExpressionManager();
PhoenixConnection connection = statement.getConnection();
- this.dateFormat = connection.getQueryServices().getProps().get(QueryServices.DATE_FORMAT_ATTRIB, DateUtil.DEFAULT_DATE_FORMAT);
+ ReadOnlyProps props = connection.getQueryServices().getProps();
+ this.dateFormat = props.get(QueryServices.DATE_FORMAT_ATTRIB, DateUtil.DEFAULT_DATE_FORMAT);
this.dateFormatter = DateUtil.getDateFormatter(dateFormat);
- this.timeFormat = connection.getQueryServices().getProps().get(QueryServices.TIME_FORMAT_ATTRIB, DateUtil.DEFAULT_TIME_FORMAT);
+ this.timeFormat = props.get(QueryServices.TIME_FORMAT_ATTRIB, DateUtil.DEFAULT_TIME_FORMAT);
this.timeFormatter = DateUtil.getTimeFormatter(timeFormat);
- this.timestampFormat = connection.getQueryServices().getProps().get(QueryServices.TIMESTAMP_FORMAT_ATTRIB, DateUtil.DEFAULT_TIMESTAMP_FORMAT);
+ this.timestampFormat = props.get(QueryServices.TIMESTAMP_FORMAT_ATTRIB, DateUtil.DEFAULT_TIMESTAMP_FORMAT);
this.timestampFormatter = DateUtil.getTimestampFormatter(timestampFormat);
- this.dateFormatTimeZone = TimeZone.getTimeZone(
- connection.getQueryServices().getProps().get(QueryServices.DATE_FORMAT_TIMEZONE_ATTRIB, DateUtil.DEFAULT_TIME_ZONE_ID));
- this.numberFormat = connection.getQueryServices().getProps().get(QueryServices.NUMBER_FORMAT_ATTRIB, NumberUtil.DEFAULT_NUMBER_FORMAT);
+ this.dateFormatTimeZone = TimeZone.getTimeZone(props.get(QueryServices.DATE_FORMAT_TIMEZONE_ATTRIB,
+ DateUtil.DEFAULT_TIME_ZONE_ID));
+ this.numberFormat = props.get(QueryServices.NUMBER_FORMAT_ATTRIB, NumberUtil.DEFAULT_NUMBER_FORMAT);
this.tempPtr = new ImmutableBytesWritable();
this.currentTable = resolver != null && !resolver.getTables().isEmpty() ? resolver.getTables().get(0) : null;
- this.whereConditionColumns = new ArrayList<Pair<byte[],byte[]>>();
- this.dataColumns = this.currentTable == null ? Collections.<PColumn, Integer>emptyMap() : Maps.<PColumn, Integer>newLinkedHashMap();
- this.subqueryResults = Maps.<SelectStatement, Object>newHashMap();
+ this.whereConditionColumns = new ArrayList<Pair<byte[], byte[]>>();
+ this.dataColumns = this.currentTable == null ? Collections.<PColumn, Integer> emptyMap() : Maps
+ .<PColumn, Integer> newLinkedHashMap();
+ this.subqueryResults = Maps.<SelectStatement, Object> newHashMap();
+ this.readMetricsQueue = new ReadMetricQueue(isRequestMetricsEnabled);
+ this.overAllQueryMetrics = new OverAllQueryMetrics(isRequestMetricsEnabled);
}
/**
@@ -285,4 +305,13 @@ public class StatementContext {
public void setSubqueryResult(SelectStatement select, Object result) {
subqueryResults.put(select, result);
}
+
+ public ReadMetricQueue getReadMetricsQueue() {
+ return readMetricsQueue;
+ }
+
+ public OverAllQueryMetrics getOverallQueryMetrics() {
+ return overAllQueryMetrics;
+ }
+
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/0f6595c0/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
index 2b35d4f..7b39a28 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
@@ -118,43 +118,40 @@ public class UpsertCompiler {
mutation.put(ptr, new RowMutationState(columnValues, statement.getConnection().getStatementExecutionCounter()));
}
- private static MutationState upsertSelect(PhoenixStatement statement,
- TableRef tableRef, RowProjector projector, ResultIterator iterator, int[] columnIndexes,
- int[] pkSlotIndexes) throws SQLException {
- try {
- PhoenixConnection connection = statement.getConnection();
- ConnectionQueryServices services = connection.getQueryServices();
- int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB,QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
- int batchSize = Math.min(connection.getMutateBatchSize(), maxSize);
- boolean isAutoCommit = connection.getAutoCommit();
- byte[][] values = new byte[columnIndexes.length][];
- int rowCount = 0;
- Map<ImmutableBytesPtr,RowMutationState> mutation = Maps.newHashMapWithExpectedSize(batchSize);
- PTable table = tableRef.getTable();
- ResultSet rs = new PhoenixResultSet(iterator, projector, statement);
+ private static MutationState upsertSelect(StatementContext childContext, TableRef tableRef, RowProjector projector,
+ ResultIterator iterator, int[] columnIndexes, int[] pkSlotIndexes) throws SQLException {
+ PhoenixStatement statement = childContext.getStatement();
+ PhoenixConnection connection = statement.getConnection();
+ ConnectionQueryServices services = connection.getQueryServices();
+ int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB,
+ QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
+ int batchSize = Math.min(connection.getMutateBatchSize(), maxSize);
+ boolean isAutoCommit = connection.getAutoCommit();
+ byte[][] values = new byte[columnIndexes.length][];
+ int rowCount = 0;
+ Map<ImmutableBytesPtr, RowMutationState> mutation = Maps.newHashMapWithExpectedSize(batchSize);
+ PTable table = tableRef.getTable();
+ try (ResultSet rs = new PhoenixResultSet(iterator, projector, childContext)) {
ImmutableBytesWritable ptr = new ImmutableBytesWritable();
while (rs.next()) {
for (int i = 0; i < values.length; i++) {
PColumn column = table.getColumns().get(columnIndexes[i]);
- byte[] bytes = rs.getBytes(i+1);
+ byte[] bytes = rs.getBytes(i + 1);
ptr.set(bytes == null ? ByteUtil.EMPTY_BYTE_ARRAY : bytes);
- Object value = rs.getObject(i+1);
- int rsPrecision = rs.getMetaData().getPrecision(i+1);
+ Object value = rs.getObject(i + 1);
+ int rsPrecision = rs.getMetaData().getPrecision(i + 1);
Integer precision = rsPrecision == 0 ? null : rsPrecision;
- int rsScale = rs.getMetaData().getScale(i+1);
+ int rsScale = rs.getMetaData().getScale(i + 1);
Integer scale = rsScale == 0 ? null : rsScale;
// We are guaranteed that the two column will have compatible types,
// as we checked that before.
- if (!column.getDataType().isSizeCompatible(ptr, value, column.getDataType(),
- precision, scale,
- column.getMaxLength(),column.getScale())) {
- throw new SQLExceptionInfo.Builder(SQLExceptionCode.DATA_EXCEEDS_MAX_CAPACITY)
- .setColumnName(column.getName().getString())
- .setMessage("value=" + column.getDataType().toStringLiteral(ptr, null)).build().buildException();
- }
- column.getDataType().coerceBytes(ptr, value, column.getDataType(),
- precision, scale, SortOrder.getDefault(),
- column.getMaxLength(), column.getScale(), column.getSortOrder());
+ if (!column.getDataType().isSizeCompatible(ptr, value, column.getDataType(), precision, scale,
+ column.getMaxLength(), column.getScale())) { throw new SQLExceptionInfo.Builder(
+ SQLExceptionCode.DATA_EXCEEDS_MAX_CAPACITY).setColumnName(column.getName().getString())
+ .setMessage("value=" + column.getDataType().toStringLiteral(ptr, null)).build()
+ .buildException(); }
+ column.getDataType().coerceBytes(ptr, value, column.getDataType(), precision, scale,
+ SortOrder.getDefault(), column.getMaxLength(), column.getScale(), column.getSortOrder());
values[i] = ByteUtil.copyKeyBytesIfNecessary(ptr);
}
setValues(values, pkSlotIndexes, columnIndexes, table, mutation, statement);
@@ -169,8 +166,6 @@ public class UpsertCompiler {
}
// If auto commit is true, this last batch will be committed upon return
return new MutationState(tableRef, mutation, rowCount / batchSize * batchSize, maxSize, connection);
- } finally {
- iterator.close();
}
}
@@ -186,14 +181,21 @@ public class UpsertCompiler {
}
@Override
- protected MutationState mutate(StatementContext context, ResultIterator iterator, PhoenixConnection connection) throws SQLException {
- if (context.getSequenceManager().getSequenceCount() > 0) {
+ protected MutationState mutate(StatementContext parentContext, ResultIterator iterator, PhoenixConnection connection) throws SQLException {
+ if (parentContext.getSequenceManager().getSequenceCount() > 0) {
throw new IllegalStateException("Cannot pipeline upsert when sequence is referenced");
}
PhoenixStatement statement = new PhoenixStatement(connection);
+ /*
+ * We don't want to collect any read metrics within the child context. This is because any read metrics that
+ * need to be captured are already getting collected in the parent statement context enclosed in the result
+ * iterator being used for reading rows out.
+ */
+ StatementContext childContext = new StatementContext(statement, false);
// Clone the row projector as it's not thread safe and would be used simultaneously by
// multiple threads otherwise.
- return upsertSelect(statement, tableRef, projector.cloneIfNecessary(), iterator, columnIndexes, pkSlotIndexes);
+ MutationState state = upsertSelect(childContext, tableRef, projector.cloneIfNecessary(), iterator, columnIndexes, pkSlotIndexes);
+ return state;
}
public void setRowProjector(RowProjector projector) {
@@ -669,7 +671,7 @@ public class UpsertCompiler {
public MutationState execute() throws SQLException {
ResultIterator iterator = queryPlan.iterator();
if (parallelIteratorFactory == null) {
- return upsertSelect(statement, tableRef, projector, iterator, columnIndexes, pkSlotIndexes);
+ return upsertSelect(new StatementContext(statement), tableRef, projector, iterator, columnIndexes, pkSlotIndexes);
}
try {
parallelIteratorFactory.setRowProjector(projector);
@@ -677,13 +679,21 @@ public class UpsertCompiler {
parallelIteratorFactory.setPkSlotIndexes(pkSlotIndexes);
Tuple tuple;
long totalRowCount = 0;
+ StatementContext context = queryPlan.getContext();
while ((tuple=iterator.next()) != null) {// Runs query
Cell kv = tuple.getValue(0);
totalRowCount += PLong.INSTANCE.getCodec().decodeLong(kv.getValueArray(), kv.getValueOffset(), SortOrder.getDefault());
}
// Return total number of rows that have been updated. In the case of auto commit being off
// the mutations will all be in the mutation state of the current connection.
- return new MutationState(maxSize, statement.getConnection(), totalRowCount);
+ MutationState mutationState = new MutationState(maxSize, statement.getConnection(), totalRowCount);
+ /*
+ * All the metrics collected for measuring the reads done by the parallel mutating iterators
+ * is included in the ReadMetricHolder of the statement context. Include these metrics in the
+ * returned mutation state so they can be published on commit.
+ */
+ mutationState.setReadMetricQueue(context.getReadMetricsQueue());
+ return mutationState;
} finally {
iterator.close();
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/0f6595c0/phoenix-core/src/main/java/org/apache/phoenix/execute/AggregatePlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/AggregatePlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/AggregatePlan.java
index ba137f8..00e843d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/AggregatePlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/AggregatePlan.java
@@ -102,7 +102,7 @@ public class AggregatePlan extends BaseQueryPlan {
this.services = services;
}
@Override
- public PeekingResultIterator newIterator(StatementContext context, ResultIterator scanner, Scan scan) throws SQLException {
+ public PeekingResultIterator newIterator(StatementContext context, ResultIterator scanner, Scan scan, String tableName) throws SQLException {
Expression expression = RowKeyExpression.INSTANCE;
OrderByExpression orderByExpression = new OrderByExpression(expression, false, true);
int threshold = services.getProps().getInt(QueryServices.SPOOL_THRESHOLD_BYTES_ATTRIB, QueryServicesOptions.DEFAULT_SPOOL_THRESHOLD_BYTES);
@@ -119,9 +119,9 @@ public class AggregatePlan extends BaseQueryPlan {
this.outerFactory = outerFactory;
}
@Override
- public PeekingResultIterator newIterator(StatementContext context, ResultIterator scanner, Scan scan) throws SQLException {
- PeekingResultIterator iterator = innerFactory.newIterator(context, scanner, scan);
- return outerFactory.newIterator(context, iterator, scan);
+ public PeekingResultIterator newIterator(StatementContext context, ResultIterator scanner, Scan scan, String tableName) throws SQLException {
+ PeekingResultIterator iterator = innerFactory.newIterator(context, scanner, scan, tableName);
+ return outerFactory.newIterator(context, iterator, scan, tableName);
}
}
[37/47] phoenix git commit: PHOENIX-1659
PhoenixDatabaseMetaData.getColumns does not return REMARKS column
Posted by ma...@apache.org.
PHOENIX-1659 PhoenixDatabaseMetaData.getColumns does not return REMARKS column
Followup commit to adjust the COLUMN_FAMILY_POSITION in QueryUtil as well.
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/83b8db4d
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/83b8db4d
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/83b8db4d
Branch: refs/heads/calcite
Commit: 83b8db4def81d9a7fc959de116edbaa1a265bf18
Parents: d604494
Author: Josh Mahonin <jm...@interset.com>
Authored: Mon Jun 29 15:27:01 2015 -0400
Committer: Josh Mahonin <jm...@interset.com>
Committed: Mon Jun 29 16:56:54 2015 -0400
----------------------------------------------------------------------
phoenix-core/src/main/java/org/apache/phoenix/util/QueryUtil.java | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/83b8db4d/phoenix-core/src/main/java/org/apache/phoenix/util/QueryUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/QueryUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/QueryUtil.java
index a2d4a91..bc2141c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/QueryUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/QueryUtil.java
@@ -59,7 +59,7 @@ public final class QueryUtil {
/**
* Column family name index within ResultSet resulting from {@link DatabaseMetaData#getColumns(String, String, String, String)}
*/
- public static final int COLUMN_FAMILY_POSITION = 24;
+ public static final int COLUMN_FAMILY_POSITION = 25;
/**
* Column name index within ResultSet resulting from {@link DatabaseMetaData#getColumns(String, String, String, String)}
[02/47] phoenix git commit: PHOENIX-1660 Implement missing math
built-in functions ABS, POWER, LN, LOG, SQRT, CBRT, EXP (Shuxiong Ye)
Posted by ma...@apache.org.
PHOENIX-1660 Implement missing math built-in functions ABS, POWER, LN, LOG, SQRT, CBRT, EXP (Shuxiong Ye)
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/c2927dde
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/c2927dde
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/c2927dde
Branch: refs/heads/calcite
Commit: c2927ddec5ab954dd779516ed29b4b7fa4b011d9
Parents: d1934af
Author: James Taylor <ja...@apache.org>
Authored: Mon Jun 15 15:53:44 2015 -0700
Committer: James Taylor <ja...@apache.org>
Committed: Mon Jun 15 15:53:44 2015 -0700
----------------------------------------------------------------------
.../phoenix/end2end/AbsFunctionEnd2EndIT.java | 108 +++++++++++
.../phoenix/end2end/CbrtFunctionEnd2EndIT.java | 143 +++++++++++++++
.../phoenix/end2end/ExpFunctionEnd2EndIT.java | 128 +++++++++++++
.../phoenix/end2end/LnLogFunctionEnd2EndIT.java | 143 +++++++++++++++
.../phoenix/end2end/PowerFunctionEnd2EndIT.java | 144 +++++++++++++++
.../phoenix/expression/ExpressionType.java | 14 +-
.../expression/function/AbsFunction.java | 66 +++++++
.../expression/function/CbrtFunction.java | 55 ++++++
.../expression/function/ExpFunction.java | 55 ++++++
.../function/JavaMathOneArgumentFunction.java | 43 ++---
.../function/JavaMathTwoArgumentFunction.java | 69 +++++++
.../phoenix/expression/function/LnFunction.java | 55 ++++++
.../expression/function/LogFunction.java | 56 ++++++
.../expression/function/PowerFunction.java | 51 ++++++
.../expression/function/ScalarFunction.java | 4 +-
.../expression/function/SqrtFunction.java | 8 +-
.../apache/phoenix/schema/types/PDecimal.java | 11 ++
.../phoenix/schema/types/PNumericType.java | 8 +
.../phoenix/schema/types/PRealNumber.java | 8 +
.../phoenix/schema/types/PWholeNumber.java | 8 +
.../phoenix/compile/QueryCompilerTest.java | 68 ++++++-
.../phoenix/expression/AbsFunctionTest.java | 180 ++++++++++++++++++
.../phoenix/expression/CbrtFunctionTest.java | 127 +++++++++++++
.../phoenix/expression/ExpFunctionTest.java | 150 +++++++++++++++
.../phoenix/expression/LnLogFunctionTest.java | 182 +++++++++++++++++++
.../phoenix/expression/PowerFunctionTest.java | 182 +++++++++++++++++++
26 files changed, 2036 insertions(+), 30 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c2927dde/phoenix-core/src/it/java/org/apache/phoenix/end2end/AbsFunctionEnd2EndIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AbsFunctionEnd2EndIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AbsFunctionEnd2EndIT.java
new file mode 100644
index 0000000..0c6204c
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AbsFunctionEnd2EndIT.java
@@ -0,0 +1,108 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import static org.apache.phoenix.util.TestUtil.closeStmtAndConn;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.math.BigDecimal;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+
+import org.apache.phoenix.expression.function.AbsFunction;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * End to end tests for {@link AbsFunction}
+ */
+public class AbsFunctionEnd2EndIT extends BaseHBaseManagedTimeIT {
+
+ private static final String KEY = "key";
+
+ @Before
+ public void initTable() throws Exception {
+ Connection conn = null;
+ PreparedStatement stmt = null;
+ try {
+ conn = DriverManager.getConnection(getUrl());
+ String ddl;
+ ddl = "CREATE TABLE testSigned (k VARCHAR NOT NULL PRIMARY KEY, dec DECIMAL, doub DOUBLE, fl FLOAT, inte INTEGER, lon BIGINT, smalli SMALLINT, tinyi TINYINT)";
+ conn.createStatement().execute(ddl);
+ conn.commit();
+ } finally {
+ closeStmtAndConn(stmt, conn);
+ }
+ }
+
+ private void updateSignedTable(Connection conn, double data) throws Exception {
+ PreparedStatement stmt = conn.prepareStatement("UPSERT INTO testSigned VALUES (?, ?, ?, ?, ?, ?, ?, ?)");
+ stmt.setString(1, KEY);
+ Double d = Double.valueOf(data);
+ stmt.setBigDecimal(2, BigDecimal.valueOf(data));
+ stmt.setDouble(3, d.doubleValue());
+ stmt.setFloat(4, d.floatValue());
+ stmt.setInt(5, d.intValue());
+ stmt.setLong(6, d.longValue());
+ stmt.setShort(7, d.shortValue());
+ stmt.setByte(8, d.byteValue());
+ stmt.executeUpdate();
+ conn.commit();
+ }
+
+ private void testSignedNumberSpec(Connection conn, double data) throws Exception {
+ updateSignedTable(conn, data);
+ ResultSet rs = conn.createStatement() .executeQuery("SELECT ABS(dec),ABS(doub),ABS(fl),ABS(inte),ABS(lon),ABS(smalli),ABS(tinyi) FROM testSigned");
+ assertTrue(rs.next());
+ Double d = Double.valueOf(data);
+ assertEquals(rs.getBigDecimal(1).compareTo(BigDecimal.valueOf(data).abs()), 0);
+ assertEquals(rs.getDouble(2), Math.abs(data), 1e-6);
+ assertEquals(rs.getFloat(3), Math.abs(d.floatValue()), 1e-6);
+ assertEquals(rs.getInt(4), Math.abs(d.intValue()));
+ assertEquals(rs.getLong(5), Math.abs(d.longValue()));
+ assertEquals(rs.getShort(6), Math.abs(d.shortValue()));
+ assertEquals(rs.getByte(7), Math.abs(d.byteValue()));
+ assertTrue(!rs.next());
+
+ PreparedStatement stmt = conn.prepareStatement("SELECT k FROM testSigned WHERE ABS(dec)=? AND ABS(doub)=? AND ABS(fl)=? AND ABS(inte)=? AND ABS(lon)=? AND ABS(smalli)=? AND ABS(tinyi)=?");
+ stmt.setBigDecimal(1, BigDecimal.valueOf(data).abs());
+ stmt.setDouble(2, Math.abs(d.doubleValue()));
+ stmt.setFloat(3, Math.abs(d.floatValue()));
+ stmt.setInt(4, Math.abs(d.intValue()));
+ stmt.setLong(5, Math.abs(d.longValue()));
+ stmt.setShort(6, (short) Math.abs(d.shortValue()));
+ stmt.setByte(7, (byte) Math.abs(d.byteValue()));
+ rs = stmt.executeQuery();
+ assertTrue(rs.next());
+ assertEquals(KEY, rs.getString(1));
+ assertTrue(!rs.next());
+ }
+
+ @Test
+ public void testSignedNumber() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ testSignedNumberSpec(conn, 0.0);
+ testSignedNumberSpec(conn, 1.0);
+ testSignedNumberSpec(conn, -1.0);
+ testSignedNumberSpec(conn, 123.1234);
+ testSignedNumberSpec(conn, -123.1234);
+ }
+}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c2927dde/phoenix-core/src/it/java/org/apache/phoenix/end2end/CbrtFunctionEnd2EndIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CbrtFunctionEnd2EndIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CbrtFunctionEnd2EndIT.java
new file mode 100644
index 0000000..a632104
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CbrtFunctionEnd2EndIT.java
@@ -0,0 +1,143 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import static org.apache.phoenix.util.TestUtil.closeStmtAndConn;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+
+import org.apache.phoenix.expression.function.CbrtFunction;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * End to end tests for {@link CbrtFunction}
+ */
+public class CbrtFunctionEnd2EndIT extends BaseHBaseManagedTimeIT {
+
+ private static final String KEY = "key";
+ private static final double ZERO = 1e-8;
+
+ @Before
+ public void initTable() throws Exception {
+ Connection conn = null;
+ PreparedStatement stmt = null;
+ try {
+ conn = DriverManager.getConnection(getUrl());
+ String ddl;
+ ddl = "CREATE TABLE testSigned (k VARCHAR NOT NULL PRIMARY KEY, doub DOUBLE, fl FLOAT, inte INTEGER, lon BIGINT, smalli SMALLINT, tinyi TINYINT)";
+ conn.createStatement().execute(ddl);
+ ddl = "CREATE TABLE testUnsigned (k VARCHAR NOT NULL PRIMARY KEY, doub UNSIGNED_DOUBLE, fl UNSIGNED_FLOAT, inte UNSIGNED_INT, lon UNSIGNED_LONG, smalli UNSIGNED_SMALLINT, tinyi UNSIGNED_TINYINT)";
+ conn.createStatement().execute(ddl);
+ conn.commit();
+ } finally {
+ closeStmtAndConn(stmt, conn);
+ }
+ }
+
+ private void updateSignedTable(Connection conn, double data) throws Exception {
+ PreparedStatement stmt = conn.prepareStatement("UPSERT INTO testSigned VALUES (?, ?, ?, ?, ?, ?, ?)");
+ stmt.setString(1, KEY);
+ Double d = Double.valueOf(data);
+ stmt.setDouble(2, d.doubleValue());
+ stmt.setFloat(3, d.floatValue());
+ stmt.setInt(4, d.intValue());
+ stmt.setLong(5, d.longValue());
+ stmt.setShort(6, d.shortValue());
+ stmt.setByte(7, d.byteValue());
+ stmt.executeUpdate();
+ conn.commit();
+ }
+
+ private void updateUnsignedTable(Connection conn, double data) throws Exception {
+ PreparedStatement stmt = conn.prepareStatement("UPSERT INTO testUnsigned VALUES (?, ?, ?, ?, ?, ?, ?)");
+ stmt.setString(1, KEY);
+ Double d = Double.valueOf(data);
+ stmt.setDouble(2, d.doubleValue());
+ stmt.setFloat(3, d.floatValue());
+ stmt.setInt(4, d.intValue());
+ stmt.setLong(5, d.longValue());
+ stmt.setShort(6, d.shortValue());
+ stmt.setByte(7, d.byteValue());
+ stmt.executeUpdate();
+ conn.commit();
+ }
+
+ private void testSignedNumberSpec(Connection conn, double data) throws Exception {
+ updateSignedTable(conn, data);
+ ResultSet rs = conn.createStatement().executeQuery("SELECT CBRT(doub),CBRT(fl),CBRT(inte),CBRT(lon),CBRT(smalli),CBRT(tinyi) FROM testSigned");
+ assertTrue(rs.next());
+ Double d = Double.valueOf(data);
+ assertTrue(Math.abs(rs.getDouble(1) - Math.cbrt(d.doubleValue())) < ZERO);
+ assertTrue(Math.abs(rs.getDouble(2) - Math.cbrt(d.floatValue())) < ZERO);
+ assertTrue(Math.abs(rs.getDouble(3) - Math.cbrt(d.intValue())) < ZERO);
+ assertTrue(Math.abs(rs.getDouble(4) - Math.cbrt(d.longValue())) < ZERO);
+ assertTrue(Math.abs(rs.getDouble(5) - Math.cbrt(d.shortValue())) < ZERO);
+ assertTrue(Math.abs(rs.getDouble(6) - Math.cbrt(d.byteValue())) < ZERO);
+ assertTrue(!rs.next());
+ PreparedStatement stmt = conn.prepareStatement("SELECT k FROM testSigned WHERE CBRT(doub)>0 AND CBRT(fl)>0 AND CBRT(inte)>0 AND CBRT(lon)>0 AND CBRT(smalli)>0 AND CBRT(tinyi)>0");
+ rs = stmt.executeQuery();
+ if (data > 0) {
+ assertTrue(rs.next());
+ assertEquals(KEY, rs.getString(1));
+ }
+ assertTrue(!rs.next());
+ }
+
+ private void testUnsignedNumberSpec(Connection conn, double data) throws Exception {
+ updateUnsignedTable(conn, data);
+ ResultSet rs = conn.createStatement().executeQuery("SELECT CBRT(doub),CBRT(fl),CBRT(inte),CBRT(lon),CBRT(smalli),CBRT(tinyi) FROM testUnsigned");
+ assertTrue(rs.next());
+ Double d = Double.valueOf(data);
+ assertTrue(Math.abs(rs.getDouble(1) - Math.cbrt(d.doubleValue())) < ZERO);
+ assertTrue(Math.abs(rs.getDouble(2) - Math.cbrt(d.floatValue())) < ZERO);
+ assertTrue(Math.abs(rs.getDouble(3) - Math.cbrt(d.intValue())) < ZERO);
+ assertTrue(Math.abs(rs.getDouble(4) - Math.cbrt(d.longValue())) < ZERO);
+ assertTrue(Math.abs(rs.getDouble(5) - Math.cbrt(d.shortValue())) < ZERO);
+ assertTrue(Math.abs(rs.getDouble(6) - Math.cbrt(d.byteValue())) < ZERO);
+ assertTrue(!rs.next());
+ PreparedStatement stmt = conn.prepareStatement("SELECT k FROM testUnsigned WHERE CBRT(doub)>0 AND CBRT(fl)>0 AND CBRT(inte)>0 AND CBRT(lon)>0 AND CBRT(smalli)>0 AND CBRT(tinyi)>0");
+ rs = stmt.executeQuery();
+ if (data > 0) {
+ assertTrue(rs.next());
+ assertEquals(KEY, rs.getString(1));
+ }
+ assertTrue(!rs.next());
+ }
+
+ @Test
+ public void testSignedNumber() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ for (double d : new double[] { 0.0, 1.0, -1.0, 123.1234, -123.1234 }) {
+ testSignedNumberSpec(conn, d);
+ }
+ }
+
+ @Test
+ public void testUnsignedNumber() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ for (double d : new double[] { 0.0, 1.0, 123.1234 }) {
+ testUnsignedNumberSpec(conn, d);
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c2927dde/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExpFunctionEnd2EndIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExpFunctionEnd2EndIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExpFunctionEnd2EndIT.java
new file mode 100644
index 0000000..8772400
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExpFunctionEnd2EndIT.java
@@ -0,0 +1,128 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import static org.apache.phoenix.util.TestUtil.closeStmtAndConn;
+import static org.junit.Assert.assertTrue;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+
+import org.apache.phoenix.expression.function.ExpFunction;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * End to end tests for {@link ExpFunction}
+ */
+public class ExpFunctionEnd2EndIT extends BaseHBaseManagedTimeIT {
+
+ private static final String KEY = "key";
+ private static final double ZERO = 1e-8;
+
+ @Before
+ public void initTable() throws Exception {
+ Connection conn = null;
+ PreparedStatement stmt = null;
+ try {
+ conn = DriverManager.getConnection(getUrl());
+ String ddl;
+ ddl = "CREATE TABLE testSigned (k VARCHAR NOT NULL PRIMARY KEY, doub DOUBLE, fl FLOAT, inte INTEGER, lon BIGINT, smalli SMALLINT, tinyi TINYINT)";
+ conn.createStatement().execute(ddl);
+ ddl = "CREATE TABLE testUnsigned (k VARCHAR NOT NULL PRIMARY KEY, doub UNSIGNED_DOUBLE, fl UNSIGNED_FLOAT, inte UNSIGNED_INT, lon UNSIGNED_LONG, smalli UNSIGNED_SMALLINT, tinyi UNSIGNED_TINYINT)";
+ conn.createStatement().execute(ddl);
+ conn.commit();
+ } finally {
+ closeStmtAndConn(stmt, conn);
+ }
+ }
+
+ private void updateSignedTable(Connection conn, double data) throws Exception {
+ PreparedStatement stmt = conn.prepareStatement("UPSERT INTO testSigned VALUES (?, ?, ?, ?, ?, ?, ?)");
+ stmt.setString(1, KEY);
+ Double d = Double.valueOf(data);
+ stmt.setDouble(2, d.doubleValue());
+ stmt.setFloat(3, d.floatValue());
+ stmt.setInt(4, d.intValue());
+ stmt.setLong(5, d.longValue());
+ stmt.setShort(6, d.shortValue());
+ stmt.setByte(7, d.byteValue());
+ stmt.executeUpdate();
+ conn.commit();
+ }
+
+ private void updateUnsignedTable(Connection conn, double data) throws Exception {
+ PreparedStatement stmt = conn.prepareStatement("UPSERT INTO testUnsigned VALUES (?, ?, ?, ?, ?, ?, ?)");
+ stmt.setString(1, KEY);
+ Double d = Double.valueOf(data);
+ stmt.setDouble(2, d.doubleValue());
+ stmt.setFloat(3, d.floatValue());
+ stmt.setInt(4, d.intValue());
+ stmt.setLong(5, d.longValue());
+ stmt.setShort(6, d.shortValue());
+ stmt.setByte(7, d.byteValue());
+ stmt.executeUpdate();
+ conn.commit();
+ }
+
+ private void testSignedNumberSpec(Connection conn, double data) throws Exception {
+ updateSignedTable(conn, data);
+ ResultSet rs = conn.createStatement().executeQuery("SELECT EXP(doub),EXP(fl),EXP(inte),EXP(lon),EXP(smalli),EXP(tinyi) FROM testSigned");
+ assertTrue(rs.next());
+ Double d = Double.valueOf(data);
+ assertTrue(Math.abs(rs.getDouble(1) - Math.exp(d.doubleValue())) < ZERO);
+ assertTrue(Math.abs(rs.getDouble(2) - Math.exp(d.floatValue())) < ZERO);
+ assertTrue(Math.abs(rs.getDouble(3) - Math.exp(d.intValue())) < ZERO);
+ assertTrue(Math.abs(rs.getDouble(4) - Math.exp(d.longValue())) < ZERO);
+ assertTrue(Math.abs(rs.getDouble(5) - Math.exp(d.shortValue())) < ZERO);
+ assertTrue(Math.abs(rs.getDouble(6) - Math.exp(d.byteValue())) < ZERO);
+ assertTrue(!rs.next());
+ }
+
+ private void testUnsignedNumberSpec(Connection conn, double data) throws Exception {
+ updateUnsignedTable(conn, data);
+ ResultSet rs = conn.createStatement().executeQuery("SELECT EXP(doub),EXP(fl),EXP(inte),EXP(lon),EXP(smalli),EXP(tinyi) FROM testUnsigned");
+ assertTrue(rs.next());
+ Double d = Double.valueOf(data);
+ assertTrue(Math.abs(rs.getDouble(1) - Math.exp(d.doubleValue())) < ZERO);
+ assertTrue(Math.abs(rs.getDouble(2) - Math.exp(d.floatValue())) < ZERO);
+ assertTrue(Math.abs(rs.getDouble(3) - Math.exp(d.intValue())) < ZERO);
+ assertTrue(Math.abs(rs.getDouble(4) - Math.exp(d.longValue())) < ZERO);
+ assertTrue(Math.abs(rs.getDouble(5) - Math.exp(d.shortValue())) < ZERO);
+ assertTrue(Math.abs(rs.getDouble(6) - Math.exp(d.byteValue())) < ZERO);
+ assertTrue(!rs.next());
+ }
+
+ @Test
+ public void testSignedNumber() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ for (double d : new double[] { 0.0, 1.0, 123.1234}) {
+ testSignedNumberSpec(conn, d);
+ }
+ }
+
+ @Test
+ public void testUnsignedNumber() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ for (double d : new double[] { 0.0, 1.0, 123.1234 }) {
+ testUnsignedNumberSpec(conn, d);
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c2927dde/phoenix-core/src/it/java/org/apache/phoenix/end2end/LnLogFunctionEnd2EndIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/LnLogFunctionEnd2EndIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/LnLogFunctionEnd2EndIT.java
new file mode 100644
index 0000000..e2c72ca
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/LnLogFunctionEnd2EndIT.java
@@ -0,0 +1,143 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import static org.apache.phoenix.util.TestUtil.closeStmtAndConn;
+import static org.junit.Assert.assertTrue;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+
+import org.apache.phoenix.expression.function.LnFunction;
+import org.apache.phoenix.expression.function.LogFunction;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * End to end tests for {@link LnFunction} and {@link LogFunction}
+ */
+public class LnLogFunctionEnd2EndIT extends BaseHBaseManagedTimeIT {
+
+ private static final String KEY = "key";
+ private static final double ZERO = 1e-9;
+
+ private static boolean twoDoubleEquals(double a, double b) {
+ if (Double.isNaN(a) ^ Double.isNaN(b)) return false;
+ if (Double.isNaN(a)) return true;
+ if (Double.isInfinite(a) ^ Double.isInfinite(b)) return false;
+ if (Double.isInfinite(a)) {
+ if ((a > 0) ^ (b > 0)) return false;
+ else return true;
+ }
+ if (Math.abs(a - b) <= ZERO) {
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ @Before
+ public void initTable() throws Exception {
+ Connection conn = null;
+ PreparedStatement stmt = null;
+ try {
+ conn = DriverManager.getConnection(getUrl());
+ String ddl;
+ ddl =
+ "CREATE TABLE testSigned (k VARCHAR NOT NULL PRIMARY KEY, doub DOUBLE, fl FLOAT, inte INTEGER, lon BIGINT, smalli SMALLINT, tinyi TINYINT)";
+ conn.createStatement().execute(ddl);
+ ddl =
+ "CREATE TABLE testUnsigned (k VARCHAR NOT NULL PRIMARY KEY, doub UNSIGNED_DOUBLE, fl UNSIGNED_FLOAT, inte UNSIGNED_INT, lon UNSIGNED_LONG, smalli UNSIGNED_SMALLINT, tinyi UNSIGNED_TINYINT)";
+ conn.createStatement().execute(ddl);
+ conn.commit();
+ } finally {
+ closeStmtAndConn(stmt, conn);
+ }
+ }
+
+ private void updateTableSpec(Connection conn, double data, String tableName) throws Exception {
+ PreparedStatement stmt =
+ conn.prepareStatement("UPSERT INTO " + tableName + " VALUES (?, ?, ?, ?, ?, ?, ?)");
+ stmt.setString(1, KEY);
+ Double d = Double.valueOf(data);
+ stmt.setDouble(2, d.doubleValue());
+ stmt.setFloat(3, d.floatValue());
+ stmt.setInt(4, d.intValue());
+ stmt.setLong(5, d.longValue());
+ stmt.setShort(6, d.shortValue());
+ stmt.setByte(7, d.byteValue());
+ stmt.executeUpdate();
+ conn.commit();
+ }
+
+ private void testNumberSpec(Connection conn, double data, String tableName) throws Exception {
+ updateTableSpec(conn, data, tableName);
+ ResultSet rs =
+ conn.createStatement().executeQuery(
+ "SELECT LN(doub),LN(fl),LN(inte),LN(lon),LN(smalli),LN(tinyi) FROM "
+ + tableName);
+ assertTrue(rs.next());
+ Double d = Double.valueOf(data);
+ assertTrue(twoDoubleEquals(rs.getDouble(1), Math.log(d.doubleValue())));
+ assertTrue(twoDoubleEquals(rs.getDouble(2), Math.log(d.floatValue())));
+ assertTrue(twoDoubleEquals(rs.getDouble(3), Math.log(d.intValue())));
+ assertTrue(twoDoubleEquals(rs.getDouble(4), Math.log(d.longValue())));
+ assertTrue(twoDoubleEquals(rs.getDouble(5), Math.log(d.shortValue())));
+ assertTrue(twoDoubleEquals(rs.getDouble(6), Math.log(d.byteValue())));
+
+ assertTrue(!rs.next());
+ rs =
+ conn.createStatement().executeQuery(
+ "SELECT LOG(doub),LOG(fl),LOG(inte),LOG(lon),LOG(smalli),LOG(tinyi) FROM "
+ + tableName);
+ assertTrue(rs.next());
+ d = Double.valueOf(data);
+ assertTrue(twoDoubleEquals(rs.getDouble(1), Math.log10(d.doubleValue())));
+ assertTrue(twoDoubleEquals(rs.getDouble(2), Math.log10(d.floatValue())));
+ assertTrue(twoDoubleEquals(rs.getDouble(3), Math.log10(d.intValue())));
+ assertTrue(twoDoubleEquals(rs.getDouble(4), Math.log10(d.longValue())));
+ assertTrue(twoDoubleEquals(rs.getDouble(5), Math.log10(d.shortValue())));
+ assertTrue(twoDoubleEquals(rs.getDouble(6), Math.log10(d.byteValue())));
+ assertTrue(!rs.next());
+
+ rs =
+ conn.createStatement().executeQuery(
+ "SELECT LOG(doub,3),LOG(fl,3),LOG(inte,3),LOG(lon,3),LOG(smalli,3),LOG(tinyi,3) FROM "
+ + tableName);
+ assertTrue(rs.next());
+ d = Double.valueOf(data);
+ assertTrue(twoDoubleEquals(rs.getDouble(1), Math.log(d.doubleValue()) / Math.log(3)));
+ assertTrue(twoDoubleEquals(rs.getDouble(2), Math.log(d.floatValue()) / Math.log(3)));
+ assertTrue(twoDoubleEquals(rs.getDouble(3), Math.log(d.intValue()) / Math.log(3)));
+ assertTrue(twoDoubleEquals(rs.getDouble(4), Math.log(d.longValue()) / Math.log(3)));
+ assertTrue(twoDoubleEquals(rs.getDouble(5), Math.log(d.shortValue()) / Math.log(3)));
+ assertTrue(twoDoubleEquals(rs.getDouble(6), Math.log(d.byteValue()) / Math.log(3)));
+ assertTrue(!rs.next());
+ }
+
+ @Test
+ public void test() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ for (double d : new double[] { 0.0, 1.0, -1.0, 123.1234, -123.1234 }) {
+ testNumberSpec(conn, d, "testSigned");
+ if (d >= 0) testNumberSpec(conn, d, "testUnsigned");
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c2927dde/phoenix-core/src/it/java/org/apache/phoenix/end2end/PowerFunctionEnd2EndIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/PowerFunctionEnd2EndIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/PowerFunctionEnd2EndIT.java
new file mode 100644
index 0000000..691fb61
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/PowerFunctionEnd2EndIT.java
@@ -0,0 +1,144 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import static org.apache.phoenix.util.TestUtil.closeStmtAndConn;
+import static org.junit.Assert.assertTrue;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+
+import org.apache.phoenix.expression.function.PowerFunction;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * End to end tests for {@link PowerFunction}
+ */
+public class PowerFunctionEnd2EndIT extends BaseHBaseManagedTimeIT {
+
+ private static final String KEY = "key";
+ private static final double ZERO = 1e-9;
+
+ private static boolean twoDoubleEquals(double a, double b) {
+ if (Double.isNaN(a) ^ Double.isNaN(b)) return false;
+ if (Double.isNaN(a)) return true;
+ if (Double.isInfinite(a) ^ Double.isInfinite(b)) return false;
+ if (Double.isInfinite(a)) {
+ if ((a > 0) ^ (b > 0)) return false;
+ else return true;
+ }
+ if (Math.abs(a - b) <= ZERO) {
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ @Before
+ public void initTable() throws Exception {
+ Connection conn = null;
+ PreparedStatement stmt = null;
+ try {
+ conn = DriverManager.getConnection(getUrl());
+ String ddl;
+ ddl =
+ "CREATE TABLE testSigned (k VARCHAR NOT NULL PRIMARY KEY, doub DOUBLE, fl FLOAT, inte INTEGER, lon BIGINT, smalli SMALLINT, tinyi TINYINT)";
+ conn.createStatement().execute(ddl);
+ ddl =
+ "CREATE TABLE testUnsigned (k VARCHAR NOT NULL PRIMARY KEY, doub UNSIGNED_DOUBLE, fl UNSIGNED_FLOAT, inte UNSIGNED_INT, lon UNSIGNED_LONG, smalli UNSIGNED_SMALLINT, tinyi UNSIGNED_TINYINT)";
+ conn.createStatement().execute(ddl);
+ conn.commit();
+ } finally {
+ closeStmtAndConn(stmt, conn);
+ }
+ }
+
+ private void updateTableSpec(Connection conn, double data, String tableName) throws Exception {
+ PreparedStatement stmt =
+ conn.prepareStatement("UPSERT INTO " + tableName + " VALUES (?, ?, ?, ?, ?, ?, ?)");
+ stmt.setString(1, KEY);
+ Double d = Double.valueOf(data);
+ stmt.setDouble(2, d.doubleValue());
+ stmt.setFloat(3, d.floatValue());
+ stmt.setInt(4, d.intValue());
+ stmt.setLong(5, d.longValue());
+ stmt.setShort(6, d.shortValue());
+ stmt.setByte(7, d.byteValue());
+ stmt.executeUpdate();
+ conn.commit();
+ }
+
+ private void testNumberSpec(Connection conn, double data, String tableName) throws Exception {
+ updateTableSpec(conn, data, tableName);
+ ResultSet rs =
+ conn.createStatement()
+ .executeQuery(
+ "SELECT POWER(doub, 1.5),POWER(fl, 1.5),POWER(inte, 1.5),POWER(lon, 1.5),POWER(smalli, 1.5),POWER(tinyi, 1.5) FROM "
+ + tableName);
+ assertTrue(rs.next());
+ Double d = Double.valueOf(data);
+ assertTrue(twoDoubleEquals(rs.getDouble(1), Math.pow(d.doubleValue(), 1.5)));
+ assertTrue(twoDoubleEquals(rs.getDouble(2), Math.pow(d.floatValue(), 1.5)));
+ assertTrue(twoDoubleEquals(rs.getDouble(3), Math.pow(d.intValue(), 1.5)));
+ assertTrue(twoDoubleEquals(rs.getDouble(4), Math.pow(d.longValue(), 1.5)));
+ assertTrue(twoDoubleEquals(rs.getDouble(5), Math.pow(d.shortValue(), 1.5)));
+ assertTrue(twoDoubleEquals(rs.getDouble(6), Math.pow(d.byteValue(), 1.5)));
+
+ assertTrue(!rs.next());
+ rs =
+ conn.createStatement()
+ .executeQuery(
+ "SELECT POWER(doub, 2),POWER(fl, 2),POWER(inte, 2),POWER(lon, 2),POWER(smalli, 2),POWER(tinyi, 2) FROM "
+ + tableName);
+ assertTrue(rs.next());
+ d = Double.valueOf(data);
+ assertTrue(twoDoubleEquals(rs.getDouble(1), Math.pow(d.doubleValue(), 2)));
+ assertTrue(twoDoubleEquals(rs.getDouble(2), Math.pow(d.floatValue(), 2)));
+ assertTrue(twoDoubleEquals(rs.getDouble(3), Math.pow(d.intValue(), 2)));
+ assertTrue(twoDoubleEquals(rs.getDouble(4), Math.pow(d.longValue(), 2)));
+ assertTrue(twoDoubleEquals(rs.getDouble(5), Math.pow(d.shortValue(), 2)));
+ assertTrue(twoDoubleEquals(rs.getDouble(6), Math.pow(d.byteValue(), 2)));
+ assertTrue(!rs.next());
+
+ rs =
+ conn.createStatement().executeQuery(
+ "SELECT POWER(doub,3),POWER(fl,3),POWER(inte,3),POWER(lon,3),POWER(smalli,3),POWER(tinyi,3) FROM "
+ + tableName);
+ assertTrue(rs.next());
+ d = Double.valueOf(data);
+ assertTrue(twoDoubleEquals(rs.getDouble(1), Math.pow(d.doubleValue(), 3)));
+ assertTrue(twoDoubleEquals(rs.getDouble(2), Math.pow(d.floatValue(), 3)));
+ assertTrue(twoDoubleEquals(rs.getDouble(3), Math.pow(d.intValue(), 3)));
+ assertTrue(twoDoubleEquals(rs.getDouble(4), Math.pow(d.longValue(), 3)));
+ assertTrue(twoDoubleEquals(rs.getDouble(5), Math.pow(d.shortValue(), 3)));
+ assertTrue(twoDoubleEquals(rs.getDouble(6), Math.pow(d.byteValue(), 3)));
+ assertTrue(!rs.next());
+ }
+
+ @Test
+ public void test() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ for (double d : new double[] { 0.0, 1.0, -1.0, 123.1234, -123.1234 }) {
+ testNumberSpec(conn, d, "testSigned");
+ if (d >= 0) testNumberSpec(conn, d, "testUnsigned");
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c2927dde/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
index 684e620..4f98cb8 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
@@ -19,6 +19,7 @@ package org.apache.phoenix.expression;
import java.util.Map;
+import org.apache.phoenix.expression.function.AbsFunction;
import org.apache.phoenix.expression.function.ArrayAllComparisonExpression;
import org.apache.phoenix.expression.function.ArrayAnyComparisonExpression;
import org.apache.phoenix.expression.function.ArrayAppendFunction;
@@ -29,6 +30,7 @@ import org.apache.phoenix.expression.function.ArrayPrependFunction;
import org.apache.phoenix.expression.function.ByteBasedRegexpReplaceFunction;
import org.apache.phoenix.expression.function.ByteBasedRegexpSplitFunction;
import org.apache.phoenix.expression.function.ByteBasedRegexpSubstrFunction;
+import org.apache.phoenix.expression.function.CbrtFunction;
import org.apache.phoenix.expression.function.CeilDateExpression;
import org.apache.phoenix.expression.function.CeilDecimalExpression;
import org.apache.phoenix.expression.function.CeilFunction;
@@ -41,6 +43,7 @@ import org.apache.phoenix.expression.function.DecodeFunction;
import org.apache.phoenix.expression.function.DistinctCountAggregateFunction;
import org.apache.phoenix.expression.function.DistinctValueAggregateFunction;
import org.apache.phoenix.expression.function.EncodeFunction;
+import org.apache.phoenix.expression.function.ExpFunction;
import org.apache.phoenix.expression.function.ExternalSqlTypeIdFunction;
import org.apache.phoenix.expression.function.FirstValueFunction;
import org.apache.phoenix.expression.function.FloorDateExpression;
@@ -53,6 +56,8 @@ import org.apache.phoenix.expression.function.InvertFunction;
import org.apache.phoenix.expression.function.LTrimFunction;
import org.apache.phoenix.expression.function.LastValueFunction;
import org.apache.phoenix.expression.function.LengthFunction;
+import org.apache.phoenix.expression.function.LnFunction;
+import org.apache.phoenix.expression.function.LogFunction;
import org.apache.phoenix.expression.function.LowerFunction;
import org.apache.phoenix.expression.function.LpadFunction;
import org.apache.phoenix.expression.function.MD5Function;
@@ -65,6 +70,7 @@ import org.apache.phoenix.expression.function.NthValueFunction;
import org.apache.phoenix.expression.function.PercentRankAggregateFunction;
import org.apache.phoenix.expression.function.PercentileContAggregateFunction;
import org.apache.phoenix.expression.function.PercentileDiscAggregateFunction;
+import org.apache.phoenix.expression.function.PowerFunction;
import org.apache.phoenix.expression.function.RTrimFunction;
import org.apache.phoenix.expression.function.RandomFunction;
import org.apache.phoenix.expression.function.RegexpReplaceFunction;
@@ -233,7 +239,13 @@ public enum ExpressionType {
ArrayAppendFunction(ArrayAppendFunction.class),
UDFExpression(UDFExpression.class),
ArrayPrependFunction(ArrayPrependFunction.class),
- SqrtFunction(SqrtFunction.class)
+ SqrtFunction(SqrtFunction.class),
+ AbsFunction(AbsFunction.class),
+ CbrtFunction(CbrtFunction.class),
+ LnFunction(LnFunction.class),
+ LogFunction(LogFunction.class),
+ ExpFunction(ExpFunction.class),
+ PowerFunction(PowerFunction.class)
;
ExpressionType(Class<? extends Expression> clazz) {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c2927dde/phoenix-core/src/main/java/org/apache/phoenix/expression/function/AbsFunction.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/AbsFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/AbsFunction.java
new file mode 100644
index 0000000..6ef1b38
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/AbsFunction.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.expression.function;
+
+import java.util.List;
+
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.parse.FunctionParseNode.Argument;
+import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction;
+import org.apache.phoenix.schema.tuple.Tuple;
+import org.apache.phoenix.schema.types.PDataType;
+import org.apache.phoenix.schema.types.PDecimal;
+import org.apache.phoenix.schema.types.PNumericType;
+
+@BuiltInFunction(name = AbsFunction.NAME, args = { @Argument(allowedTypes = PDecimal.class) })
+public class AbsFunction extends ScalarFunction {
+
+ public static final String NAME = "ABS";
+
+ public AbsFunction() {
+ }
+
+ public AbsFunction(List<Expression> children) {
+ super(children);
+ }
+
+ @Override
+ public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) {
+ Expression childExpr = children.get(0);
+ PDataType dataType = childExpr.getDataType();
+ if (childExpr.evaluate(tuple, ptr)) {
+ byte[] bytes = ptr.get();
+ int offset = ptr.getOffset(), length = ptr.getLength();
+ ptr.set(new byte[getDataType().getByteSize()]);
+ ((PNumericType) dataType).abs(bytes, offset, length, childExpr.getSortOrder(), ptr);
+ return true;
+ }
+ return false;
+ }
+
+ @Override
+ public PDataType getDataType() {
+ return children.get(0).getDataType();
+ }
+
+ @Override
+ public String getName() {
+ return AbsFunction.NAME;
+ }
+}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c2927dde/phoenix-core/src/main/java/org/apache/phoenix/expression/function/CbrtFunction.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/CbrtFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/CbrtFunction.java
new file mode 100644
index 0000000..1c13924
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/CbrtFunction.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.expression.function;
+
+import java.sql.SQLException;
+import java.util.List;
+
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.parse.FunctionParseNode.Argument;
+import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction;
+import org.apache.phoenix.schema.types.PDecimal;
+import org.apache.phoenix.schema.types.PDouble;
+
+@BuiltInFunction(name = CbrtFunction.NAME, args = { @Argument(allowedTypes = { PDouble.class, PDecimal.class }) })
+public class CbrtFunction extends JavaMathOneArgumentFunction {
+
+ public static final String NAME = "CBRT";
+
+ public CbrtFunction() {
+ }
+
+ public CbrtFunction(List<Expression> children) throws SQLException {
+ super(children);
+ }
+
+ @Override
+ public String getName() {
+ return NAME;
+ }
+
+ @Override
+ protected double compute(double firstArg) {
+ return Math.cbrt(firstArg);
+ }
+
+ @Override
+ public OrderPreserving preservesOrder() {
+ return OrderPreserving.YES;
+ }
+}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c2927dde/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ExpFunction.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ExpFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ExpFunction.java
new file mode 100644
index 0000000..5c0ca72
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ExpFunction.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.expression.function;
+
+import java.sql.SQLException;
+import java.util.List;
+
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.parse.FunctionParseNode.Argument;
+import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction;
+import org.apache.phoenix.schema.types.PDecimal;
+import org.apache.phoenix.schema.types.PDouble;
+
+@BuiltInFunction(name = ExpFunction.NAME, args = { @Argument(allowedTypes = { PDouble.class, PDecimal.class }) })
+public class ExpFunction extends JavaMathOneArgumentFunction {
+
+ public static final String NAME = "EXP";
+
+ public ExpFunction() {
+ }
+
+ public ExpFunction(List<Expression> children) throws SQLException {
+ super(children);
+ }
+
+ @Override
+ public String getName() {
+ return NAME;
+ }
+
+ @Override
+ protected double compute(double firstArg) {
+ return Math.exp(firstArg);
+ }
+
+ @Override
+ public OrderPreserving preservesOrder() {
+ return OrderPreserving.YES;
+ }
+}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c2927dde/phoenix-core/src/main/java/org/apache/phoenix/expression/function/JavaMathOneArgumentFunction.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/JavaMathOneArgumentFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/JavaMathOneArgumentFunction.java
index 4ea5367..733f6fc 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/JavaMathOneArgumentFunction.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/JavaMathOneArgumentFunction.java
@@ -39,39 +39,30 @@ public abstract class JavaMathOneArgumentFunction extends ScalarFunction {
protected abstract double compute(double firstArg);
- @Override
- public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) {
- Expression childExpr = children.get(0);
- PDataType returnType = getDataType();
- if (childExpr.evaluate(tuple, ptr)) {
- if (ptr.getLength() == 0) {
- return true;
- }
- double result;
- if (childExpr.getDataType() == PDecimal.INSTANCE) {
- result =
- ((BigDecimal) childExpr.getDataType().toObject(ptr,
- childExpr.getSortOrder())).doubleValue();
- } else {
- result =
- childExpr.getDataType().getCodec()
- .decodeDouble(ptr, childExpr.getSortOrder());
- }
- ptr.set(new byte[returnType.getByteSize()]);
- returnType.getCodec().encodeDouble(compute(result), ptr);
- return true;
+ static double getArg(Expression exp, ImmutableBytesWritable ptr) {
+ if (exp.getDataType() == PDecimal.INSTANCE) {
+ return ((BigDecimal) exp.getDataType().toObject(ptr, exp.getSortOrder())).doubleValue();
} else {
- return false;
+ return exp.getDataType().getCodec().decodeDouble(ptr, exp.getSortOrder());
}
}
@Override
- public PDataType getDataType() {
- return PDouble.INSTANCE;
+ public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) {
+ PDataType returnType = getDataType();
+
+ Expression arg1Expr = children.get(0);
+ if (!arg1Expr.evaluate(tuple, ptr)) return false;
+ if (ptr.getLength() == 0) return true;
+ double arg1 = getArg(arg1Expr, ptr);
+
+ ptr.set(new byte[returnType.getByteSize()]);
+ returnType.getCodec().encodeDouble(compute(arg1), ptr);
+ return true;
}
@Override
- public OrderPreserving preservesOrder() {
- return OrderPreserving.YES;
+ public PDataType getDataType() {
+ return PDouble.INSTANCE;
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c2927dde/phoenix-core/src/main/java/org/apache/phoenix/expression/function/JavaMathTwoArgumentFunction.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/JavaMathTwoArgumentFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/JavaMathTwoArgumentFunction.java
new file mode 100644
index 0000000..0d85797
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/JavaMathTwoArgumentFunction.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.expression.function;
+
+import java.sql.SQLException;
+import java.util.List;
+
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.schema.tuple.Tuple;
+import org.apache.phoenix.schema.types.PDataType;
+import org.apache.phoenix.schema.types.PDouble;
+import org.apache.phoenix.util.ByteUtil;
+
+public abstract class JavaMathTwoArgumentFunction extends ScalarFunction {
+
+ public JavaMathTwoArgumentFunction() {
+ }
+
+ public JavaMathTwoArgumentFunction(List<Expression> children) throws SQLException {
+ super(children);
+ }
+
+ protected abstract double compute(double firstArg, double secondArg);
+
+ @Override
+ public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) {
+ PDataType returnType = getDataType();
+
+ Expression arg1Expr = children.get(0);
+ if (!arg1Expr.evaluate(tuple, ptr)) return false;
+ if (ptr.getLength() == 0) return true;
+ double arg1 = JavaMathOneArgumentFunction.getArg(arg1Expr, ptr);
+
+ Expression arg2Expr = (children.size() <= 1) ? null : children.get(1);
+ double arg2;
+ if (arg2Expr != null && !arg2Expr.evaluate(tuple, ptr)) return false;
+ if (arg2Expr == null || ptr.getLength() == 0) {
+ ptr.set(ByteUtil.EMPTY_BYTE_ARRAY);
+ return true;
+ } else {
+ arg2 = JavaMathOneArgumentFunction.getArg(arg2Expr, ptr);
+ }
+
+ ptr.set(new byte[returnType.getByteSize()]);
+ returnType.getCodec().encodeDouble(compute(arg1, arg2), ptr);
+ return true;
+ }
+
+ @Override
+ public PDataType getDataType() {
+ return PDouble.INSTANCE;
+ }
+}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c2927dde/phoenix-core/src/main/java/org/apache/phoenix/expression/function/LnFunction.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/LnFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/LnFunction.java
new file mode 100644
index 0000000..4275336
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/LnFunction.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.expression.function;
+
+import java.sql.SQLException;
+import java.util.List;
+
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.parse.FunctionParseNode.Argument;
+import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction;
+import org.apache.phoenix.schema.types.PDecimal;
+import org.apache.phoenix.schema.types.PDouble;
+
+@BuiltInFunction(name = LnFunction.NAME, args = { @Argument(allowedTypes = { PDouble.class, PDecimal.class }) })
+public class LnFunction extends JavaMathOneArgumentFunction {
+
+ public static final String NAME = "LN";
+
+ public LnFunction() {
+ }
+
+ public LnFunction(List<Expression> children) throws SQLException {
+ super(children);
+ }
+
+ @Override
+ public String getName() {
+ return NAME;
+ }
+
+ @Override
+ protected double compute(double firstArg) {
+ return Math.log(firstArg);
+ }
+
+ @Override
+ public OrderPreserving preservesOrder() {
+ return OrderPreserving.YES;
+ }
+}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c2927dde/phoenix-core/src/main/java/org/apache/phoenix/expression/function/LogFunction.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/LogFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/LogFunction.java
new file mode 100644
index 0000000..87b9a79
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/LogFunction.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.expression.function;
+
+import java.sql.SQLException;
+import java.util.List;
+
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.parse.FunctionParseNode.Argument;
+import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction;
+import org.apache.phoenix.schema.types.PDecimal;
+import org.apache.phoenix.schema.types.PDouble;
+
+@BuiltInFunction(name = LogFunction.NAME, args = { @Argument(allowedTypes = { PDouble.class, PDecimal.class }),
+ @Argument(allowedTypes = { PDouble.class, PDecimal.class }, defaultValue = "1e1") })
+public class LogFunction extends JavaMathTwoArgumentFunction {
+
+ public static final String NAME = "LOG";
+
+ public LogFunction() {
+ }
+
+ public LogFunction(List<Expression> children) throws SQLException {
+ super(children);
+ }
+
+ @Override
+ public String getName() {
+ return NAME;
+ }
+
+ @Override
+ protected double compute(double firstArg, double secondArg) {
+ return Math.log(firstArg) / Math.log(secondArg);
+ }
+
+ @Override
+ public OrderPreserving preservesOrder() {
+ return OrderPreserving.YES;
+ }
+}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c2927dde/phoenix-core/src/main/java/org/apache/phoenix/expression/function/PowerFunction.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/PowerFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/PowerFunction.java
new file mode 100644
index 0000000..1125ce1
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/PowerFunction.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.expression.function;
+
+import java.sql.SQLException;
+import java.util.List;
+
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.parse.FunctionParseNode.Argument;
+import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction;
+import org.apache.phoenix.schema.types.PDecimal;
+import org.apache.phoenix.schema.types.PDouble;
+
+@BuiltInFunction(name = PowerFunction.NAME, args = { @Argument(allowedTypes = { PDouble.class, PDecimal.class }),
+ @Argument(allowedTypes = { PDouble.class, PDecimal.class }) })
+public class PowerFunction extends JavaMathTwoArgumentFunction {
+
+ public static final String NAME = "POWER";
+
+ public PowerFunction() {
+ }
+
+ public PowerFunction(List<Expression> children) throws SQLException {
+ super(children);
+ }
+
+ @Override
+ public String getName() {
+ return NAME;
+ }
+
+ @Override
+ protected double compute(double firstArg, double secondArg) {
+ return Math.pow(firstArg, secondArg);
+ }
+}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c2927dde/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ScalarFunction.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ScalarFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ScalarFunction.java
index 014bda4..4f44cde 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ScalarFunction.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ScalarFunction.java
@@ -65,13 +65,15 @@ public abstract class ScalarFunction extends FunctionExpression {
/**
* Determines whether or not a function may be used to form
* the start/stop key of a scan
+ * When OrderPreserving is YES, in order to make order-by optimization
+ * valid, it should return 0. (refer to {@link RoundDateExpression})
* @return the zero-based position of the argument to traverse
* into to look for a primary key column reference, or
* {@value #NO_TRAVERSAL} if the function cannot be used to
* form the scan key.
*/
public int getKeyFormationTraversalIndex() {
- return NO_TRAVERSAL;
+ return preservesOrder() == OrderPreserving.NO ? NO_TRAVERSAL : 0;
}
/**
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c2927dde/phoenix-core/src/main/java/org/apache/phoenix/expression/function/SqrtFunction.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/SqrtFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/SqrtFunction.java
index bb5376e..260305a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/SqrtFunction.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/SqrtFunction.java
@@ -24,8 +24,9 @@ import org.apache.phoenix.expression.Expression;
import org.apache.phoenix.parse.FunctionParseNode.Argument;
import org.apache.phoenix.parse.FunctionParseNode.BuiltInFunction;
import org.apache.phoenix.schema.types.PDecimal;
+import org.apache.phoenix.schema.types.PDouble;
-@BuiltInFunction(name = SqrtFunction.NAME, args = { @Argument(allowedTypes = { PDecimal.class }) })
+@BuiltInFunction(name = SqrtFunction.NAME, args = { @Argument(allowedTypes = { PDouble.class, PDecimal.class }) })
public class SqrtFunction extends JavaMathOneArgumentFunction {
public static final String NAME = "SQRT";
@@ -46,4 +47,9 @@ public class SqrtFunction extends JavaMathOneArgumentFunction {
protected double compute(double firstArg) {
return Math.sqrt(firstArg);
}
+
+ @Override
+ public OrderPreserving preservesOrder() {
+ return OrderPreserving.YES;
+ }
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c2927dde/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDecimal.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDecimal.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDecimal.java
index 199ed28..228aef1 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDecimal.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDecimal.java
@@ -421,4 +421,15 @@ public class PDecimal extends PRealNumber<BigDecimal> {
}
return ((signByte & 0x80) == 0) ? -1 : 1;
}
+
+ @Override
+ public void abs(byte[] bytes, int offset, int length, SortOrder sortOrder,
+ ImmutableBytesWritable outPtr) {
+ if (sortOrder == SortOrder.DESC) {
+ bytes = SortOrder.invert(bytes, offset, new byte[length], 0, length);
+ offset = 0;
+ }
+ BigDecimal bigDecimal = toBigDecimal(bytes, offset, length);
+ outPtr.set(toBytes(bigDecimal.abs()));
+ }
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c2927dde/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PNumericType.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PNumericType.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PNumericType.java
index 631ac8d..826d9ad 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PNumericType.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PNumericType.java
@@ -41,4 +41,12 @@ public abstract class PNumericType<T> extends PDataType<T> {
abstract public int signum(byte[] bytes, int offset, int length, SortOrder sortOrder,
Integer maxLength, Integer scale);
+
+ abstract public void abs(byte[] bytes, int offset, int length, SortOrder sortOrder,
+ ImmutableBytesWritable outPtr);
+
+ public final void abs(ImmutableBytesWritable ptr, SortOrder sortOrder,
+ ImmutableBytesWritable outPtr) {
+ abs(ptr.get(), ptr.getOffset(), ptr.getLength(), sortOrder, outPtr);
+ }
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c2927dde/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PRealNumber.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PRealNumber.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PRealNumber.java
index d074511..4cab433 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PRealNumber.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PRealNumber.java
@@ -17,6 +17,7 @@
*/
package org.apache.phoenix.schema.types;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.phoenix.schema.IllegalDataException;
import org.apache.phoenix.schema.SortOrder;
@@ -36,4 +37,11 @@ public abstract class PRealNumber<T> extends PNumericType<T> {
}
return (d > 0) ? 1 : ((d < 0) ? -1 : 0);
}
+
+ @Override
+ public void abs(byte[] bytes, int offset, int length, SortOrder sortOrder,
+ ImmutableBytesWritable outPtr) {
+ double d = getCodec().decodeDouble(bytes, offset, sortOrder);
+ getCodec().encodeDouble(Math.abs(d), outPtr);
+ }
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c2927dde/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PWholeNumber.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PWholeNumber.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PWholeNumber.java
index f1c7d13..a3a1d13 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PWholeNumber.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PWholeNumber.java
@@ -17,6 +17,7 @@
*/
package org.apache.phoenix.schema.types;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.phoenix.schema.SortOrder;
public abstract class PWholeNumber<T> extends PNumericType<T> {
@@ -32,4 +33,11 @@ public abstract class PWholeNumber<T> extends PNumericType<T> {
long l = getCodec().decodeLong(bytes, offset, sortOrder);
return Long.signum(l);
}
+
+ @Override
+ public void abs(byte[] bytes, int offset, int length, SortOrder sortOrder,
+ ImmutableBytesWritable outPtr) {
+ long l = getCodec().decodeLong(bytes, offset, sortOrder);
+ getCodec().encodeLong(Math.abs(l), outPtr);
+ }
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c2927dde/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java b/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
index 7be8eae..79721df 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
@@ -34,6 +34,7 @@ import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
+import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Properties;
@@ -1634,7 +1635,72 @@ public class QueryCompilerTest extends BaseConnectionlessQueryTest {
assertLiteralEquals(oneMoreThanMaxLong, p, 11);
}
-
+ @Test
+ public void testMathFunctionOrderByOrderPreservingFwd() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ conn.createStatement().execute("CREATE TABLE t (k1 INTEGER not null, k2 double not null, k3 BIGINT not null, v varchar, constraint pk primary key(k1,k2,k3))");
+ /*
+ * "SELECT * FROM T ORDER BY k1, k2",
+ * "SELECT * FROM T ORDER BY k1, SIGN(k2)",
+ * "SELECT * FROM T ORDER BY SIGN(k1), k2",
+ */
+ List<String> queryList = new ArrayList<String>();
+ queryList.add("SELECT * FROM T ORDER BY k1, k2");
+ for (String sub : new String[] { "SIGN", "CBRT", "LN", "LOG", "EXP" }) {
+ queryList.add(String.format("SELECT * FROM T ORDER BY k1, %s(k2)", sub));
+ queryList.add(String.format("SELECT * FROM T ORDER BY %s(k1), k2", sub));
+ }
+ String[] queries = queryList.toArray(new String[queryList.size()]);
+ for (int i = 0; i < queries.length; i++) {
+ String query = queries[i];
+ QueryPlan plan = conn.createStatement().unwrap(PhoenixStatement.class).compileQuery(query);
+ assertTrue(plan.getOrderBy() == OrderBy.FWD_ROW_KEY_ORDER_BY);
+ }
+ // Negative test
+ queryList.clear();
+ for (String sub : new String[] { "SIGN", "CBRT", "LN", "LOG", "EXP" }) {
+ queryList.add(String.format("SELECT * FROM T WHERE %s(k2)=2.0", sub));
+ }
+ for (String query : queryList.toArray(new String[queryList.size()])) {
+ Scan scan = conn.createStatement().unwrap(PhoenixStatement.class).compileQuery(query).getContext().getScan();
+ assertNotNull(scan.getFilter());
+ assertTrue(scan.getStartRow().length == 0);
+ assertTrue(scan.getStopRow().length == 0);
+ }
+ }
+
+ @Test
+ public void testMathFunctionOrderByOrderPreservingRev() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ conn.createStatement().execute("CREATE TABLE t (k1 INTEGER not null, k2 double not null, k3 BIGINT not null, v varchar, constraint pk primary key(k1,k2 DESC,k3))");
+ List<String> queryList = new ArrayList<String>();
+ // "SELECT * FROM T ORDER BY k1 DESC, SIGN(k2) DESC, k3 DESC"
+ queryList.add("SELECT * FROM T ORDER BY k1 DESC");
+ queryList.add("SELECT * FROM T ORDER BY k1 DESC, k2");
+ queryList.add("SELECT * FROM T ORDER BY k1 DESC, k2, k3 DESC");
+ for (String sub : new String[] { "SIGN", "CBRT", "LN", "LOG", "EXP" }) {
+ queryList.add(String.format("SELECT * FROM T ORDER BY k1 DESC, %s(k2) DESC, k3 DESC", sub));
+ }
+ String[] queries = queryList.toArray(new String[queryList.size()]);
+ for (int i = 0; i < queries.length; i++) {
+ String query = queries[i];
+ QueryPlan plan =
+ conn.createStatement().unwrap(PhoenixStatement.class).compileQuery(query);
+ assertTrue(query, plan.getOrderBy() == OrderBy.REV_ROW_KEY_ORDER_BY);
+ }
+ // Negative test
+ queryList.clear();
+ for (String sub : new String[] { "SIGN", "CBRT", "LN", "LOG", "EXP" }) {
+ queryList.add(String.format("SELECT * FROM T WHERE %s(k2)=2.0", sub));
+ }
+ for (String query : queryList.toArray(new String[queryList.size()])) {
+ Scan scan = conn.createStatement().unwrap(PhoenixStatement.class).compileQuery(query).getContext().getScan();
+ assertNotNull(scan.getFilter());
+ assertTrue(scan.getStartRow().length == 0);
+ assertTrue(scan.getStopRow().length == 0);
+ }
+ }
+
@Test
public void testOrderByOrderPreservingFwd() throws Exception {
Connection conn = DriverManager.getConnection(getUrl());
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c2927dde/phoenix-core/src/test/java/org/apache/phoenix/expression/AbsFunctionTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/AbsFunctionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/AbsFunctionTest.java
new file mode 100644
index 0000000..46c0ed0
--- /dev/null
+++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/AbsFunctionTest.java
@@ -0,0 +1,180 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.expression;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.math.BigDecimal;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Random;
+
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.phoenix.expression.function.AbsFunction;
+import org.apache.phoenix.schema.SortOrder;
+import org.apache.phoenix.schema.types.PDecimal;
+import org.apache.phoenix.schema.types.PDouble;
+import org.apache.phoenix.schema.types.PFloat;
+import org.apache.phoenix.schema.types.PInteger;
+import org.apache.phoenix.schema.types.PLong;
+import org.apache.phoenix.schema.types.PNumericType;
+import org.apache.phoenix.schema.types.PSmallint;
+import org.apache.phoenix.schema.types.PTinyint;
+import org.apache.phoenix.schema.types.PUnsignedDouble;
+import org.apache.phoenix.schema.types.PUnsignedFloat;
+import org.apache.phoenix.schema.types.PUnsignedInt;
+import org.apache.phoenix.schema.types.PUnsignedLong;
+import org.apache.phoenix.schema.types.PUnsignedSmallint;
+import org.apache.phoenix.schema.types.PUnsignedTinyint;
+import org.junit.Test;
+
+import com.google.common.collect.Lists;
+
+/**
+ * Unit tests for {@link AbsFunction}
+ */
+public class AbsFunctionTest {
+
+ private static void testExpression(LiteralExpression literal, Number expected)
+ throws SQLException {
+ List<Expression> expressions = Lists.newArrayList((Expression) literal);
+ Expression absFunction = new AbsFunction(expressions);
+ ImmutableBytesWritable ptr = new ImmutableBytesWritable();
+ absFunction.evaluate(null, ptr);
+ Number result =
+ (Number) absFunction.getDataType().toObject(ptr, absFunction.getSortOrder());
+ assertTrue(result.getClass().equals(expected.getClass()));
+ if (result instanceof BigDecimal) {
+ assertTrue(((BigDecimal) result).compareTo((BigDecimal) expected) == 0);
+ } else {
+ assertTrue(result.equals(expected));
+ }
+ }
+
+ private static void test(Number value, PNumericType dataType, Number expected)
+ throws SQLException {
+ LiteralExpression literal;
+ literal = LiteralExpression.newConstant(value, dataType, SortOrder.ASC);
+ testExpression(literal, expected);
+ literal = LiteralExpression.newConstant(value, dataType, SortOrder.DESC);
+ testExpression(literal, expected);
+ }
+
+ private static void
+ testBatch(Number[] value, PNumericType dataType, ArrayList<Number> expected)
+ throws SQLException {
+ assertEquals(value.length, expected.size());
+ for (int i = 0; i < value.length; ++i) {
+ test(value[i], dataType, expected.get(i));
+ }
+ }
+
+ @Test
+ public void testAbsFunction() throws Exception {
+ Random random = new Random();
+ Number[] value;
+ ArrayList<Number> expected = new ArrayList<Number>();
+ value = new BigDecimal[] { BigDecimal.valueOf(1.0), BigDecimal.valueOf(0.0),
+ BigDecimal.valueOf(-1.0), BigDecimal.valueOf(123.1234),
+ BigDecimal.valueOf(-123.1234) };
+ expected.clear();
+ for (int i = 0; i < value.length; ++i)
+ expected.add(((BigDecimal) value[i]).abs());
+ testBatch(value, PDecimal.INSTANCE, expected);
+
+ value = new Float[] { 1.0f, 0.0f, -1.0f, 123.1234f, -123.1234f, Float.MIN_VALUE,
+ Float.MAX_VALUE, -Float.MIN_VALUE, -Float.MAX_VALUE, random.nextFloat(),
+ random.nextFloat(), random.nextFloat() };
+ expected.clear();
+ for (int i = 0; i < value.length; ++i)
+ expected.add(Math.abs((Float) value[i]));
+ testBatch(value, PFloat.INSTANCE, expected);
+
+ value = new Float[] { 1.0f, 0.0f, 123.1234f, Float.MIN_VALUE, Float.MAX_VALUE, };
+ expected.clear();
+ for (int i = 0; i < value.length; ++i)
+ expected.add(Math.abs((Float) value[i]));
+ testBatch(value, PUnsignedFloat.INSTANCE, expected);
+
+ value = new Double[] { 1.0, 0.0, -1.0, 123.1234, -123.1234, Double.MIN_VALUE,
+ Double.MAX_VALUE, -Double.MIN_VALUE, -Double.MAX_VALUE,
+ random.nextDouble(), random.nextDouble(), random.nextDouble() };
+ expected.clear();
+ for (int i = 0; i < value.length; ++i)
+ expected.add(Math.abs((Double) value[i]));
+ testBatch(value, PDouble.INSTANCE, expected);
+
+ value = new Double[] { 1.0, 0.0, 123.1234, Double.MIN_VALUE, Double.MAX_VALUE, };
+ expected.clear();
+ for (int i = 0; i < value.length; ++i)
+ expected.add(Math.abs((Double) value[i]));
+ testBatch(value, PUnsignedDouble.INSTANCE, expected);
+
+ value = new Long[] { 1L, 0L, -1L, 123L, -123L, Long.MIN_VALUE + 1, Long.MAX_VALUE,
+ random.nextLong(), random.nextLong(), random.nextLong(), };
+ expected.clear();
+ for (int i = 0; i < value.length; ++i)
+ expected.add(Math.abs((Long) value[i]));
+ testBatch(value, PLong.INSTANCE, expected);
+
+ value = new Long[] { 1L, 0L, 123L, Long.MAX_VALUE };
+ expected.clear();
+ for (int i = 0; i < value.length; ++i)
+ expected.add(Math.abs((Long) value[i]));
+ testBatch(value, PUnsignedLong.INSTANCE, expected);
+
+ value = new Integer[] { 1, 0, -1, 123, -123, Integer.MIN_VALUE + 1, Integer.MAX_VALUE,
+ random.nextInt(), random.nextInt(), random.nextInt(), };
+ expected.clear();
+ for (int i = 0; i < value.length; ++i)
+ expected.add(Math.abs((Integer) value[i]));
+ testBatch(value, PInteger.INSTANCE, expected);
+
+ value = new Integer[] { 1, 0, 123, Integer.MAX_VALUE };
+ expected.clear();
+ for (int i = 0; i < value.length; ++i)
+ expected.add(Math.abs((Integer) value[i]));
+ testBatch(value, PUnsignedInt.INSTANCE, expected);
+
+ value = new Short[] { 1, 0, -1, 123, -123, Short.MIN_VALUE + 1, Short.MAX_VALUE };
+ expected.clear();
+ for (int i = 0; i < value.length; ++i)
+ expected.add((short) Math.abs((Short) value[i]));
+ testBatch(value, PSmallint.INSTANCE, expected);
+
+ value = new Short[] { 1, 0, 123, Short.MAX_VALUE };
+ expected.clear();
+ for (int i = 0; i < value.length; ++i)
+ expected.add((short) Math.abs((Short) value[i]));
+ testBatch(value, PUnsignedSmallint.INSTANCE, expected);
+
+ value = new Byte[] { 1, 0, -1, 123, -123, Byte.MIN_VALUE + 1, Byte.MAX_VALUE };
+ expected.clear();
+ for (int i = 0; i < value.length; ++i)
+ expected.add((byte) Math.abs((Byte) value[i]));
+ testBatch(value, PTinyint.INSTANCE, expected);
+
+ value = new Byte[] { 1, 0, 123, Byte.MAX_VALUE };
+ expected.clear();
+ for (int i = 0; i < value.length; ++i)
+ expected.add((byte) Math.abs((Byte) value[i]));
+ testBatch(value, PUnsignedTinyint.INSTANCE, expected);
+ }
+}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c2927dde/phoenix-core/src/test/java/org/apache/phoenix/expression/CbrtFunctionTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/CbrtFunctionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/CbrtFunctionTest.java
new file mode 100644
index 0000000..2084896
--- /dev/null
+++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/CbrtFunctionTest.java
@@ -0,0 +1,127 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.expression;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.math.BigDecimal;
+import java.sql.SQLException;
+import java.util.List;
+import java.util.Random;
+
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.phoenix.expression.function.CbrtFunction;
+import org.apache.phoenix.schema.SortOrder;
+import org.apache.phoenix.schema.types.PDecimal;
+import org.apache.phoenix.schema.types.PDouble;
+import org.apache.phoenix.schema.types.PFloat;
+import org.apache.phoenix.schema.types.PInteger;
+import org.apache.phoenix.schema.types.PLong;
+import org.apache.phoenix.schema.types.PNumericType;
+import org.apache.phoenix.schema.types.PSmallint;
+import org.apache.phoenix.schema.types.PTinyint;
+import org.apache.phoenix.schema.types.PUnsignedDouble;
+import org.apache.phoenix.schema.types.PUnsignedFloat;
+import org.apache.phoenix.schema.types.PUnsignedInt;
+import org.apache.phoenix.schema.types.PUnsignedLong;
+import org.junit.Test;
+
+import com.google.common.collect.Lists;
+
+/**
+ * Unit tests for {@link CbrtFunction}
+ */
+public class CbrtFunctionTest {
+
+ private static void testExpression(LiteralExpression literal, double expected)
+ throws SQLException {
+ List<Expression> expressions = Lists.newArrayList((Expression) literal);
+ Expression cbrtFunction = new CbrtFunction(expressions);
+ ImmutableBytesWritable ptr = new ImmutableBytesWritable();
+ cbrtFunction.evaluate(null, ptr);
+ Double result =
+ (Double) cbrtFunction.getDataType().toObject(ptr, cbrtFunction.getSortOrder());
+ assertTrue(Math.abs(result.doubleValue() - expected) <= 1e-9);
+ }
+
+ private static void test(Number value, PNumericType dataType, double expected)
+ throws SQLException {
+ LiteralExpression literal;
+ literal = LiteralExpression.newConstant(value, dataType, SortOrder.ASC);
+ testExpression(literal, expected);
+ literal = LiteralExpression.newConstant(value, dataType, SortOrder.DESC);
+ testExpression(literal, expected);
+ }
+
+ private static void testBatch(Number[] value, PNumericType dataType) throws SQLException {
+ double[] expected = new double[value.length];
+ for (int i = 0; i < expected.length; ++i) {
+ expected[i] = Math.cbrt(value[i].doubleValue());
+ }
+ assertEquals(value.length, expected.length);
+ for (int i = 0; i < value.length; ++i) {
+ test(value[i], dataType, expected[i]);
+ }
+ }
+
+ @Test
+ public void testCbrtFunction() throws Exception {
+ Random random = new Random();
+
+ testBatch(
+ new BigDecimal[] { BigDecimal.valueOf(1.0), BigDecimal.valueOf(0.0),
+ BigDecimal.valueOf(-1.0), BigDecimal.valueOf(123.1234),
+ BigDecimal.valueOf(-123.1234), BigDecimal.valueOf(random.nextDouble()),
+ BigDecimal.valueOf(random.nextDouble()) }, PDecimal.INSTANCE);
+
+ testBatch(new Float[] { 1.0f, 0.0f, -1.0f, 123.1234f, -123.1234f, random.nextFloat(),
+ random.nextFloat() }, PFloat.INSTANCE);
+
+ testBatch(new Float[] { 1.0f, 0.0f, 123.1234f, }, PUnsignedFloat.INSTANCE);
+
+ testBatch(
+ new Double[] { 1.0, 0.0, -1.0, 123.1234, -123.1234, random.nextDouble(),
+ random.nextDouble() }, PDouble.INSTANCE);
+
+ testBatch(new Double[] { 1.0, 0.0, 123.1234, }, PUnsignedDouble.INSTANCE);
+
+ testBatch(
+ new Long[] { 1L, 0L, -1L, Long.MAX_VALUE, Long.MIN_VALUE, 123L, -123L,
+ random.nextLong(), random.nextLong() }, PLong.INSTANCE);
+
+ testBatch(new Long[] { 1L, 0L, Long.MAX_VALUE, 123L }, PUnsignedLong.INSTANCE);
+
+ testBatch(
+ new Integer[] { 1, 0, -1, Integer.MAX_VALUE, Integer.MIN_VALUE, 123, -123,
+ random.nextInt(), random.nextInt() }, PInteger.INSTANCE);
+
+ testBatch(new Integer[] { 1, 0, Integer.MAX_VALUE, 123 }, PUnsignedInt.INSTANCE);
+
+ testBatch(new Short[] { (short) 1, (short) 0, (short) -1, Short.MAX_VALUE, Short.MIN_VALUE,
+ (short) 123, (short) -123 }, PSmallint.INSTANCE);
+
+ testBatch(new Short[] { (short) 1, (short) 0, Short.MAX_VALUE, (short) 123 },
+ PSmallint.INSTANCE);
+
+ testBatch(new Byte[] { (byte) 1, (byte) 0, (byte) -1, Byte.MAX_VALUE, Byte.MIN_VALUE,
+ (byte) 123, (byte) -123 }, PTinyint.INSTANCE);
+
+ testBatch(new Byte[] { (byte) 1, (byte) 0, Byte.MAX_VALUE, (byte) 123 }, PTinyint.INSTANCE);
+ }
+}
[30/47] phoenix git commit: PHOENIX-1819 Build a framework to capture
and report phoenix client side request level metrics
Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/phoenix/blob/0f6595c0/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java b/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java
index 8eaeefb..4347acd 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java
@@ -27,6 +27,7 @@ import java.io.Reader;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.PreparedStatement;
+import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.SQLFeatureNotSupportedException;
import java.util.ArrayList;
@@ -34,6 +35,7 @@ import java.util.Collection;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
+import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.TreeSet;
@@ -61,8 +63,9 @@ import org.apache.phoenix.expression.OrderByExpression;
import org.apache.phoenix.expression.RowKeyColumnExpression;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.jdbc.PhoenixPreparedStatement;
-import org.apache.phoenix.monitoring.Metric;
-import org.apache.phoenix.monitoring.PhoenixMetrics;
+import org.apache.phoenix.jdbc.PhoenixResultSet;
+import org.apache.phoenix.monitoring.GlobalClientMetrics;
+import org.apache.phoenix.monitoring.GlobalMetric;
import org.apache.phoenix.query.QueryConstants;
import org.apache.phoenix.schema.AmbiguousColumnException;
import org.apache.phoenix.schema.ColumnNotFoundException;
@@ -142,7 +145,7 @@ public class PhoenixRuntime {
public static final String ANNOTATION_ATTRIB_PREFIX = "phoenix.annotation.";
/**
- * Use this connection property to explicity enable or disable auto-commit on a new connection.
+ * Use this connection property to explicitly enable or disable auto-commit on a new connection.
*/
public static final String AUTO_COMMIT_ATTRIB = "AutoCommit";
@@ -157,6 +160,11 @@ public class PhoenixRuntime {
* upserting data into them, and getting the uncommitted state through {@link #getUncommittedData(Connection)}
*/
public final static String CONNECTIONLESS = "none";
+
+ /**
+ * Use this connection property to explicitly enable or disable request level metric collection.
+ */
+ public static final String REQUEST_METRIC_ATTRIB = "RequestMetric";
private static final String HEADER_IN_LINE = "in-line";
private static final String SQL_FILE_EXT = ".sql";
@@ -985,9 +993,162 @@ public class PhoenixRuntime {
}
/**
- * Exposes the various internal phoenix metrics.
+ * Exposes the various internal phoenix metrics collected at the client JVM level.
+ */
+ public static Collection<GlobalMetric> getGlobalPhoenixClientMetrics() {
+ return GlobalClientMetrics.getMetrics();
+ }
+
+ /**
+ *
+ * @return whether or not the global client metrics are being collected
*/
- public static Collection<Metric> getInternalPhoenixMetrics() {
- return PhoenixMetrics.getMetrics();
+ public static boolean areGlobalClientMetricsBeingCollected() {
+ return GlobalClientMetrics.isMetricsEnabled();
}
-}
+
+ /**
+ * Method to expose the metrics associated with performing reads using the passed result set. A typical pattern is:
+ *
+ * <pre>
+ * {@code
+ * Map<String, Map<String, Long>> overAllQueryMetrics = null;
+ * Map<String, Map<String, Long>> requestReadMetrics = null;
+ * try (ResultSet rs = stmt.executeQuery()) {
+ * while(rs.next()) {
+ * .....
+ * }
+ * overAllQueryMetrics = PhoenixRuntime.getOverAllReadRequestMetrics(rs);
+ * requestReadMetrics = PhoenixRuntime.getRequestReadMetrics(rs);
+ * PhoenixRuntime.resetMetrics(rs);
+ * }
+ * </pre>
+ *
+ * @param rs
+ * result set to get the metrics for
+ * @return a map of (table name) -> (map of (metric name) -> (metric value))
+ * @throws SQLException
+ */
+ public static Map<String, Map<String, Long>> getRequestReadMetrics(ResultSet rs) throws SQLException {
+ PhoenixResultSet resultSet = rs.unwrap(PhoenixResultSet.class);
+ return resultSet.getReadMetrics();
+ }
+
+ /**
+ * Method to expose the overall metrics associated with executing a query via phoenix. A typical pattern of
+ * accessing request level read metrics and overall read query metrics is:
+ *
+ * <pre>
+ * {@code
+ * Map<String, Map<String, Long>> overAllQueryMetrics = null;
+ * Map<String, Map<String, Long>> requestReadMetrics = null;
+ * try (ResultSet rs = stmt.executeQuery()) {
+ * while(rs.next()) {
+ * .....
+ * }
+ * overAllQueryMetrics = PhoenixRuntime.getOverAllReadRequestMetrics(rs);
+ * requestReadMetrics = PhoenixRuntime.getRequestReadMetrics(rs);
+ * PhoenixRuntime.resetMetrics(rs);
+ * }
+ * </pre>
+ *
+ * @param rs
+ * result set to get the metrics for
+ * @return a map of metric name -> metric value
+ * @throws SQLException
+ */
+ public static Map<String, Long> getOverAllReadRequestMetrics(ResultSet rs) throws SQLException {
+ PhoenixResultSet resultSet = rs.unwrap(PhoenixResultSet.class);
+ return resultSet.getOverAllRequestReadMetrics();
+ }
+
+ /**
+ * Method to expose the metrics associated with sending over mutations to HBase. These metrics are updated when
+ * commit is called on the passed connection. Mutation metrics are accumulated for the connection till
+ * {@link #resetMetrics(Connection)} is called or the connection is closed. Example usage:
+ *
+ * <pre>
+ * {@code
+ * Map<String, Map<String, Long>> mutationWriteMetrics = null;
+ * Map<String, Map<String, Long>> mutationReadMetrics = null;
+ * try (Connection conn = DriverManager.getConnection(url)) {
+ * conn.createStatement.executeUpdate(dml1);
+ * ....
+ * conn.createStatement.executeUpdate(dml2);
+ * ...
+ * conn.createStatement.executeUpdate(dml3);
+ * ...
+ * conn.commit();
+ * mutationWriteMetrics = PhoenixRuntime.getWriteMetricsForMutationsSinceLastReset(conn);
+ * mutationReadMetrics = PhoenixRuntime.getReadMetricsForMutationsSinceLastReset(conn);
+ * PhoenixRuntime.resetMetrics(rs);
+ * }
+ * </pre>
+ *
+ * @param conn
+ * connection to get the metrics for
+ * @return a map of (table name) -> (map of (metric name) -> (metric value))
+ * @throws SQLException
+ */
+ public static Map<String, Map<String, Long>> getWriteMetricsForMutationsSinceLastReset(Connection conn) throws SQLException {
+ PhoenixConnection pConn = conn.unwrap(PhoenixConnection.class);
+ return pConn.getMutationMetrics();
+ }
+
+ /**
+ * Method to expose the read metrics associated with executing a dml statement. These metrics are updated when
+ * commit is called on the passed connection. Read metrics are accumulated till {@link #resetMetrics(Connection)} is
+ * called or the connection is closed. Example usage:
+ *
+ * <pre>
+ * {@code
+ * Map<String, Map<String, Long>> mutationWriteMetrics = null;
+ * Map<String, Map<String, Long>> mutationReadMetrics = null;
+ * try (Connection conn = DriverManager.getConnection(url)) {
+ * conn.createStatement.executeUpdate(dml1);
+ * ....
+ * conn.createStatement.executeUpdate(dml2);
+ * ...
+ * conn.createStatement.executeUpdate(dml3);
+ * ...
+ * conn.commit();
+ * mutationWriteMetrics = PhoenixRuntime.getWriteMetricsForMutationsSinceLastReset(conn);
+ * mutationReadMetrics = PhoenixRuntime.getReadMetricsForMutationsSinceLastReset(conn);
+ * PhoenixRuntime.resetMetrics(rs);
+ * }
+ * </pre>
+ * @param conn
+ * connection to get the metrics for
+ * @return a map of (table name) -> (map of (metric name) -> (metric value))
+ * @throws SQLException
+ */
+ public static Map<String, Map<String, Long>> getReadMetricsForMutationsSinceLastReset(Connection conn) throws SQLException {
+ PhoenixConnection pConn = conn.unwrap(PhoenixConnection.class);
+ return pConn.getReadMetrics();
+ }
+
+ /**
+ * Reset the read metrics collected in the result set.
+ *
+ * @see {@link #getRequestReadMetrics(ResultSet)} {@link #getOverAllReadRequestMetrics(ResultSet)}
+ * @param rs
+ * @throws SQLException
+ */
+ public static void resetMetrics(ResultSet rs) throws SQLException {
+ PhoenixResultSet prs = rs.unwrap(PhoenixResultSet.class);
+ prs.resetMetrics();
+ }
+
+ /**
+ * Reset the mutation and reads-for-mutations metrics collected in the connection.
+ *
+ * @see {@link #getReadMetricsForMutationsSinceLastReset(Connection)} {@link #getWriteMetricsForMutationsSinceLastReset(Connection)}
+ * @param conn
+ * @throws SQLException
+ */
+ public static void resetMetrics(Connection conn) throws SQLException {
+ PhoenixConnection pConn = conn.unwrap(PhoenixConnection.class);
+ pConn.clearMetrics();
+ }
+
+ }
http://git-wip-us.apache.org/repos/asf/phoenix/blob/0f6595c0/phoenix-core/src/test/java/org/apache/phoenix/iterate/SpoolingResultIteratorTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/iterate/SpoolingResultIteratorTest.java b/phoenix-core/src/test/java/org/apache/phoenix/iterate/SpoolingResultIteratorTest.java
index ab6a4a7..5ae1a56 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/iterate/SpoolingResultIteratorTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/iterate/SpoolingResultIteratorTest.java
@@ -27,6 +27,8 @@ import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.memory.DelegatingMemoryManager;
import org.apache.phoenix.memory.GlobalMemoryManager;
import org.apache.phoenix.memory.MemoryManager;
+import org.apache.phoenix.monitoring.MemoryMetricsHolder;
+import org.apache.phoenix.monitoring.SpoolingMetricsHolder;
import org.apache.phoenix.query.QueryServicesOptions;
import org.apache.phoenix.schema.tuple.SingleKeyValueTuple;
import org.apache.phoenix.schema.tuple.Tuple;
@@ -52,7 +54,7 @@ public class SpoolingResultIteratorTest {
};
MemoryManager memoryManager = new DelegatingMemoryManager(new GlobalMemoryManager(threshold, 0));
- ResultIterator scanner = new SpoolingResultIterator(iterator, memoryManager, threshold, maxSizeSpool,"/tmp");
+ ResultIterator scanner = new SpoolingResultIterator(SpoolingMetricsHolder.NO_OP_INSTANCE, MemoryMetricsHolder.NO_OP_INSTANCE, iterator, memoryManager, threshold, maxSizeSpool,"/tmp");
AssertResults.assertResults(scanner, expectedResults);
}
[34/47] phoenix git commit: LP-1277 Support nulls in CHAR fields in
CSV loader
Posted by ma...@apache.org.
LP-1277 Support nulls in CHAR fields in CSV loader
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/38ae6b75
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/38ae6b75
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/38ae6b75
Branch: refs/heads/calcite
Commit: 38ae6b754a77fd967d601e89711349e8c2e22577
Parents: 0f6595c
Author: Gabriel Reid <ga...@ngdata.com>
Authored: Thu Jun 25 21:36:51 2015 +0200
Committer: Gabriel Reid <ga...@ngdata.com>
Committed: Mon Jun 29 08:38:52 2015 +0200
----------------------------------------------------------------------
.../phoenix/end2end/CSVCommonsLoaderIT.java | 18 +++++++++++-------
.../org/apache/phoenix/schema/types/PChar.java | 3 ---
.../phoenix/util/csv/CsvUpsertExecutor.java | 5 ++++-
3 files changed, 15 insertions(+), 11 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/38ae6b75/phoenix-core/src/it/java/org/apache/phoenix/end2end/CSVCommonsLoaderIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CSVCommonsLoaderIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CSVCommonsLoaderIT.java
index d07ed8d..c7287ea 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CSVCommonsLoaderIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CSVCommonsLoaderIT.java
@@ -46,9 +46,10 @@ import org.junit.Test;
public class CSVCommonsLoaderIT extends BaseHBaseManagedTimeIT {
private static final String DATATYPE_TABLE = "DATATYPE";
- private static final String DATATYPES_CSV_VALUES = "CKEY, CVARCHAR, CINTEGER, CDECIMAL, CUNSIGNED_INT, CBOOLEAN, CBIGINT, CUNSIGNED_LONG, CTIME, CDATE\n"
- + "KEY1,A,2147483647,1.1,0,TRUE,9223372036854775807,0,1990-12-31 10:59:59,1999-12-31 23:59:59\n"
- + "KEY2,B,-2147483648,-1.1,2147483647,FALSE,-9223372036854775808,9223372036854775807,2000-01-01 00:00:01,2012-02-29 23:59:59\n";
+ private static final String DATATYPES_CSV_VALUES = "CKEY, CVARCHAR, CCHAR, CINTEGER, CDECIMAL, CUNSIGNED_INT, CBOOLEAN, CBIGINT, CUNSIGNED_LONG, CTIME, CDATE\n"
+ + "KEY1,A,A,2147483647,1.1,0,TRUE,9223372036854775807,0,1990-12-31 10:59:59,1999-12-31 23:59:59\n"
+ + "KEY2,B,B,-2147483648,-1.1,2147483647,FALSE,-9223372036854775808,9223372036854775807,2000-01-01 00:00:01,2012-02-29 23:59:59\n"
+ + "KEY3,,,,,,,,,,\n";
private static final String STOCK_TABLE = "STOCK_SYMBOL";
private static final String STOCK_TABLE_MULTI = "STOCK_SYMBOL_MULTI";
private static final String STOCK_CSV_VALUES = "AAPL,APPLE Inc.\n"
@@ -480,7 +481,7 @@ public class CSVCommonsLoaderIT extends BaseHBaseManagedTimeIT {
String statements = "CREATE TABLE IF NOT EXISTS "
+ DATATYPE_TABLE
+ " (CKEY VARCHAR NOT NULL PRIMARY KEY,"
- + " CVARCHAR VARCHAR, CINTEGER INTEGER, CDECIMAL DECIMAL(31,10), CUNSIGNED_INT UNSIGNED_INT, CBOOLEAN BOOLEAN, CBIGINT BIGINT, CUNSIGNED_LONG UNSIGNED_LONG, CTIME TIME, CDATE DATE);";
+ + " CVARCHAR VARCHAR, CCHAR CHAR(10), CINTEGER INTEGER, CDECIMAL DECIMAL(31,10), CUNSIGNED_INT UNSIGNED_INT, CBOOLEAN BOOLEAN, CBIGINT BIGINT, CUNSIGNED_LONG UNSIGNED_LONG, CTIME TIME, CDATE DATE);";
conn = DriverManager.getConnection(getUrl())
.unwrap(PhoenixConnection.class);
PhoenixRuntime.executeStatements(conn,
@@ -493,7 +494,7 @@ public class CSVCommonsLoaderIT extends BaseHBaseManagedTimeIT {
// Compare Phoenix ResultSet with CSV file content
PreparedStatement statement = conn
- .prepareStatement("SELECT CKEY, CVARCHAR, CINTEGER, CDECIMAL, CUNSIGNED_INT, CBOOLEAN, CBIGINT, CUNSIGNED_LONG, CTIME, CDATE FROM "
+ .prepareStatement("SELECT CKEY, CVARCHAR, CCHAR, CINTEGER, CDECIMAL, CUNSIGNED_INT, CBOOLEAN, CBIGINT, CUNSIGNED_LONG, CTIME, CDATE FROM "
+ DATATYPE_TABLE);
ResultSet phoenixResultSet = statement.executeQuery();
parser = new CSVParser(new StringReader(DATATYPES_CSV_VALUES),
@@ -511,9 +512,12 @@ public class CSVCommonsLoaderIT extends BaseHBaseManagedTimeIT {
i++;
}
// special case for matching date, time values
- assertEquals(DateUtil.parseTime(record.get(8)),
+ String timeFieldValue = record.get(9);
+ assertEquals(timeFieldValue.isEmpty() ? null : DateUtil.parseTime(record.get(9)),
phoenixResultSet.getTime("CTIME"));
- assertEquals(DateUtil.parseDate(record.get(9)),
+
+ String dateField = record.get(10);
+ assertEquals(dateField.isEmpty() ? null : DateUtil.parseDate(record.get(10)),
phoenixResultSet.getDate("CDATE"));
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/38ae6b75/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PChar.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PChar.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PChar.java
index c4d482c..c7cc1c1 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PChar.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PChar.java
@@ -174,9 +174,6 @@ public class PChar extends PDataType<String> {
@Override
public Object toObject(String value) {
- if (value == null || value.length() == 0) {
- throw newIllegalDataException(this + " may not be null");
- }
if (StringUtil.hasMultiByteChars(value)) {
throw newIllegalDataException("CHAR types may only contain single byte characters (" + value + ")");
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/38ae6b75/phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java b/phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java
index 0e3294b..156c3a4 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java
@@ -233,7 +233,10 @@ public class CsvUpsertExecutor implements Closeable {
@Nullable
@Override
public Object apply(@Nullable String input) {
- if(dateTimeParser != null) {
+ if (input == null || input.isEmpty()) {
+ return null;
+ }
+ if (dateTimeParser != null) {
long epochTime = dateTimeParser.parseDateTime(input);
byte[] byteValue = new byte[dataType.getByteSize()];
dataType.getCodec().encodeLong(epochTime, byteValue, 0);
[12/47] phoenix git commit: PHOENIX-2025 Phoenix-core's
hbase-default.xml prevents HBaseTestingUtility from starting up in client
apps (Geoffrey Jacoby)
Posted by ma...@apache.org.
PHOENIX-2025 Phoenix-core's hbase-default.xml prevents HBaseTestingUtility from starting up in client apps (Geoffrey Jacoby)
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/fb44f353
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/fb44f353
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/fb44f353
Branch: refs/heads/calcite
Commit: fb44f35300510670b037f597ee66f709cb4d8dbb
Parents: 14d11b1
Author: James Taylor <ja...@apache.org>
Authored: Wed Jun 17 17:01:34 2015 -0700
Committer: James Taylor <ja...@apache.org>
Committed: Wed Jun 17 17:13:25 2015 -0700
----------------------------------------------------------------------
phoenix-core/src/it/resources/hbase-default.xml | 36 --------------------
phoenix-core/src/it/resources/hbase-site.xml | 36 ++++++++++++++++++++
.../phoenix/compile/WhereCompilerTest.java | 10 ++++--
.../phoenix/query/ConnectionlessTest.java | 14 +-------
.../src/test/resources/hbase-default.xml | 36 --------------------
5 files changed, 45 insertions(+), 87 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/fb44f353/phoenix-core/src/it/resources/hbase-default.xml
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/resources/hbase-default.xml b/phoenix-core/src/it/resources/hbase-default.xml
deleted file mode 100644
index 691b702..0000000
--- a/phoenix-core/src/it/resources/hbase-default.xml
+++ /dev/null
@@ -1,36 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
- <property>
- <name>hbase.defaults.for.version.skip</name>
- <value>true</value>
- <description>
- Set to true to skip the 'hbase.defaults.for.version' check.
- Setting this to true can be useful in contexts other than
- the other side of a maven generation; i.e. running in an
- ide. You'll want to set this boolean to true to avoid
- seeing the RuntimException complaint: "hbase-default.xml file
- seems to be for and old version of HBase (@@@VERSION@@@), this
- version is X.X.X-SNAPSHOT"
- </description>
- </property>
-</configuration>
http://git-wip-us.apache.org/repos/asf/phoenix/blob/fb44f353/phoenix-core/src/it/resources/hbase-site.xml
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/resources/hbase-site.xml b/phoenix-core/src/it/resources/hbase-site.xml
new file mode 100644
index 0000000..691b702
--- /dev/null
+++ b/phoenix-core/src/it/resources/hbase-site.xml
@@ -0,0 +1,36 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+ <property>
+ <name>hbase.defaults.for.version.skip</name>
+ <value>true</value>
+ <description>
+ Set to true to skip the 'hbase.defaults.for.version' check.
+ Setting this to true can be useful in contexts other than
+ the other side of a maven generation; i.e. running in an
+ ide. You'll want to set this boolean to true to avoid
+ seeing the RuntimException complaint: "hbase-default.xml file
+ seems to be for and old version of HBase (@@@VERSION@@@), this
+ version is X.X.X-SNAPSHOT"
+ </description>
+ </property>
+</configuration>
http://git-wip-us.apache.org/repos/asf/phoenix/blob/fb44f353/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereCompilerTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereCompilerTest.java b/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereCompilerTest.java
index 3a012fb..6c040d2 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereCompilerTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereCompilerTest.java
@@ -44,6 +44,9 @@ import java.util.Arrays;
import java.util.Collections;
import java.util.List;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
import org.apache.hadoop.hbase.filter.Filter;
@@ -943,8 +946,11 @@ public class WhereCompilerTest extends BaseConnectionlessQueryTest {
PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query);
QueryPlan plan = pstmt.optimizeQuery();
Scan scan = plan.getContext().getScan();
- assertEquals(QueryServicesOptions.DEFAULT_SCAN_CACHE_SIZE, pstmt.getFetchSize());
- assertEquals(QueryServicesOptions.DEFAULT_SCAN_CACHE_SIZE, scan.getCaching());
+ Configuration config = HBaseConfiguration.create();
+ int defaultScannerCacheSize = config.getInt(HConstants.HBASE_CLIENT_SCANNER_CACHING,
+ HConstants.DEFAULT_HBASE_CLIENT_SCANNER_CACHING);
+ assertEquals(defaultScannerCacheSize, pstmt.getFetchSize());
+ assertEquals(defaultScannerCacheSize, scan.getCaching());
}
@Test
http://git-wip-us.apache.org/repos/asf/phoenix/blob/fb44f353/phoenix-core/src/test/java/org/apache/phoenix/query/ConnectionlessTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/ConnectionlessTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/ConnectionlessTest.java
index 2b2841b..732563f 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/query/ConnectionlessTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/query/ConnectionlessTest.java
@@ -22,7 +22,6 @@ import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
import java.sql.Connection;
import java.sql.Date;
@@ -36,14 +35,13 @@ import java.util.Properties;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
-import org.apache.phoenix.exception.SQLExceptionCode;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
import org.apache.phoenix.jdbc.PhoenixDriver;
+import org.apache.phoenix.schema.SaltingUtil;
import org.apache.phoenix.schema.types.PChar;
import org.apache.phoenix.schema.types.PDate;
import org.apache.phoenix.schema.types.PVarchar;
-import org.apache.phoenix.schema.SaltingUtil;
import org.apache.phoenix.util.ByteUtil;
import org.apache.phoenix.util.PhoenixRuntime;
import org.apache.phoenix.util.StringUtil;
@@ -172,16 +170,6 @@ public class ConnectionlessTest {
}
@Test
- public void testNoConnectionInfo() throws Exception {
- try {
- DriverManager.getConnection(PhoenixRuntime.JDBC_PROTOCOL);
- fail();
- } catch (SQLException e) {
- assertEquals(SQLExceptionCode.MALFORMED_CONNECTION_URL.getSQLState(),e.getSQLState());
- }
- }
-
- @Test
public void testMultipleConnectionQueryServices() throws Exception {
String url1 = getUrl();
String url2 = url1 + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR + "LongRunningQueries";
http://git-wip-us.apache.org/repos/asf/phoenix/blob/fb44f353/phoenix-core/src/test/resources/hbase-default.xml
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/resources/hbase-default.xml b/phoenix-core/src/test/resources/hbase-default.xml
deleted file mode 100644
index 691b702..0000000
--- a/phoenix-core/src/test/resources/hbase-default.xml
+++ /dev/null
@@ -1,36 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
- <property>
- <name>hbase.defaults.for.version.skip</name>
- <value>true</value>
- <description>
- Set to true to skip the 'hbase.defaults.for.version' check.
- Setting this to true can be useful in contexts other than
- the other side of a maven generation; i.e. running in an
- ide. You'll want to set this boolean to true to avoid
- seeing the RuntimException complaint: "hbase-default.xml file
- seems to be for and old version of HBase (@@@VERSION@@@), this
- version is X.X.X-SNAPSHOT"
- </description>
- </property>
-</configuration>
[45/47] phoenix git commit: PHOENIX-2059 MR index build does not
handle table with a schema name correctly
Posted by ma...@apache.org.
PHOENIX-2059 MR index build does not handle table with a schema name correctly
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/54da7d1d
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/54da7d1d
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/54da7d1d
Branch: refs/heads/calcite
Commit: 54da7d1d6b2ecd27c8c98211e84484029b6d39c2
Parents: 6a07d45
Author: Thomas D'Silva <td...@salesforce.com>
Authored: Mon Jun 22 17:45:58 2015 -0700
Committer: Thomas D'Silva <td...@salesforce.com>
Committed: Tue Jun 30 22:21:37 2015 -0700
----------------------------------------------------------------------
.../apache/phoenix/mapreduce/IndexToolIT.java | 47 ++++++++++++--------
.../phoenix/mapreduce/index/IndexTool.java | 15 ++++---
2 files changed, 36 insertions(+), 26 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/54da7d1d/phoenix-core/src/it/java/org/apache/phoenix/mapreduce/IndexToolIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/mapreduce/IndexToolIT.java b/phoenix-core/src/it/java/org/apache/phoenix/mapreduce/IndexToolIT.java
index 6761275..5d11cf2 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/mapreduce/IndexToolIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/mapreduce/IndexToolIT.java
@@ -42,6 +42,7 @@ import org.apache.phoenix.util.MetaDataUtil;
import org.apache.phoenix.util.PhoenixRuntime;
import org.apache.phoenix.util.PropertiesUtil;
import org.apache.phoenix.util.QueryUtil;
+import org.apache.phoenix.util.SchemaUtil;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
@@ -62,6 +63,7 @@ public class IndexToolIT {
public static void setUp() throws Exception {
hbaseTestUtil = new HBaseTestingUtility();
Configuration conf = hbaseTestUtil.getConfiguration();
+ conf.setBoolean("hbase.defaults.for.version.skip", true);
setUpConfigForMiniCluster(conf);
hbaseTestUtil.startMiniCluster();
hbaseTestUtil.startMiniMapReduceCluster();
@@ -71,34 +73,35 @@ public class IndexToolIT {
@Test
public void testImmutableGlobalIndex() throws Exception {
- testSecondaryIndex("DATA_TABLE1",true, false);
+ testSecondaryIndex("SCHEMA", "DATA_TABLE1", true, false);
}
@Test
public void testImmutableLocalIndex() throws Exception {
- testSecondaryIndex("DATA_TABLE2",true, true);
+ testSecondaryIndex("SCHEMA", "DATA_TABLE2", true, true);
}
@Test
public void testMutableGlobalIndex() throws Exception {
- testSecondaryIndex("DATA_TABLE3",false, false);
+ testSecondaryIndex("SCHEMA", "DATA_TABLE3", false, false);
}
@Test
public void testMutableLocalIndex() throws Exception {
- testSecondaryIndex("DATA_TABLE4",false, true);
+ testSecondaryIndex("SCHEMA", "DATA_TABLE4", false, true);
}
- public void testSecondaryIndex(final String dataTable , final boolean isImmutable , final boolean isLocal) throws Exception {
+ public void testSecondaryIndex(final String schemaName, final String dataTable, final boolean isImmutable , final boolean isLocal) throws Exception {
+ final String fullTableName = SchemaUtil.getTableName(schemaName, dataTable);
final String indxTable = String.format("%s_%s",dataTable,"INDX");
Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
Connection conn = DriverManager.getConnection(PhoenixRuntime.JDBC_PROTOCOL + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR + zkQuorum,props);
Statement stmt = conn.createStatement();
try {
- stmt.execute(String.format("CREATE TABLE %s (ID INTEGER NOT NULL PRIMARY KEY, NAME VARCHAR, ZIP INTEGER) %s",dataTable, (isImmutable ? "IMMUTABLE_ROWS=true" :"")));
- String upsertQuery = String.format("UPSERT INTO %s VALUES(?, ?, ?)",dataTable);
+ stmt.execute(String.format("CREATE TABLE %s (ID INTEGER NOT NULL PRIMARY KEY, NAME VARCHAR, ZIP INTEGER) %s", fullTableName, (isImmutable ? "IMMUTABLE_ROWS=true" :"")));
+ String upsertQuery = String.format("UPSERT INTO %s VALUES(?, ?, ?)", fullTableName);
PreparedStatement stmt1 = conn.prepareStatement(upsertQuery);
int id = 1;
@@ -107,15 +110,15 @@ public class IndexToolIT {
upsertRow(stmt1, id++);
conn.commit();
- stmt.execute(String.format("CREATE %s INDEX %s ON %s (UPPER(NAME)) ASYNC ", (isLocal ? "LOCAL" : ""), indxTable,dataTable));
+ stmt.execute(String.format("CREATE %s INDEX %s ON %s (UPPER(NAME)) ASYNC ", (isLocal ? "LOCAL" : ""), indxTable, fullTableName));
//verify rows are fetched from data table.
- String selectSql = String.format("SELECT UPPER(NAME),ID FROM %s",dataTable);
+ String selectSql = String.format("SELECT UPPER(NAME),ID FROM %s", fullTableName);
ResultSet rs = conn.createStatement().executeQuery("EXPLAIN " + selectSql);
String actualExplainPlan = QueryUtil.getExplainPlan(rs);
//assert we are pulling from data table.
- assertEquals(String.format("CLIENT 1-CHUNK PARALLEL 1-WAY FULL SCAN OVER %s",dataTable),actualExplainPlan);
+ assertEquals(String.format("CLIENT 1-CHUNK PARALLEL 1-WAY FULL SCAN OVER %s", fullTableName), actualExplainPlan);
rs = stmt1.executeQuery(selectSql);
assertTrue(rs.next());
@@ -127,7 +130,7 @@ public class IndexToolIT {
final IndexTool indexingTool = new IndexTool();
indexingTool.setConf(new Configuration(hbaseTestUtil.getConfiguration()));
- final String[] cmdArgs = getArgValues(dataTable,indxTable);
+ final String[] cmdArgs = getArgValues(schemaName, dataTable, indxTable);
int status = indexingTool.run(cmdArgs);
assertEquals(0, status);
@@ -135,11 +138,13 @@ public class IndexToolIT {
upsertRow(stmt1, 3);
upsertRow(stmt1, 4);
conn.commit();
+
+ rs = stmt1.executeQuery("SELECT * FROM "+SchemaUtil.getTableName(schemaName, indxTable));
//assert we are pulling from index table.
rs = conn.createStatement().executeQuery("EXPLAIN " + selectSql);
actualExplainPlan = QueryUtil.getExplainPlan(rs);
- assertExplainPlan(actualExplainPlan,dataTable,indxTable,isLocal);
+ assertExplainPlan(actualExplainPlan,schemaName,dataTable,indxTable,isLocal);
rs = stmt.executeQuery(selectSql);
assertTrue(rs.next());
@@ -160,7 +165,7 @@ public class IndexToolIT {
assertFalse(rs.next());
- conn.createStatement().execute(String.format("DROP INDEX %s ON %s",indxTable , dataTable));
+ conn.createStatement().execute(String.format("DROP INDEX %s ON %s",indxTable , fullTableName));
} finally {
conn.close();
}
@@ -219,14 +224,14 @@ public class IndexToolIT {
final IndexTool indexingTool = new IndexTool();
indexingTool.setConf(new Configuration(hbaseTestUtil.getConfiguration()));
- final String[] cmdArgs = getArgValues(dataTable,indxTable);
+ final String[] cmdArgs = getArgValues(null, dataTable,indxTable);
int status = indexingTool.run(cmdArgs);
assertEquals(0, status);
//assert we are pulling from index table.
rs = conn.createStatement().executeQuery("EXPLAIN " + selectSql);
actualExplainPlan = QueryUtil.getExplainPlan(rs);
- assertExplainPlan(actualExplainPlan,dataTable,indxTable,false);
+ assertExplainPlan(actualExplainPlan,null,dataTable,indxTable,false);
rs = stmt.executeQuery(selectSql);
assertTrue(rs.next());
@@ -242,23 +247,27 @@ public class IndexToolIT {
}
}
- private void assertExplainPlan(final String actualExplainPlan, String dataTable,
+ private void assertExplainPlan(final String actualExplainPlan, String schemaName, String dataTable,
String indxTable, boolean isLocal) {
String expectedExplainPlan = "";
if(isLocal) {
- final String localIndexName = MetaDataUtil.getLocalIndexTableName(dataTable);
+ final String localIndexName = MetaDataUtil.getLocalIndexTableName(SchemaUtil.getTableName(schemaName, dataTable));
expectedExplainPlan = String.format("CLIENT 1-CHUNK PARALLEL 1-WAY RANGE SCAN OVER %s [-32768]"
+ "\n SERVER FILTER BY FIRST KEY ONLY", localIndexName);
} else {
expectedExplainPlan = String.format("CLIENT 1-CHUNK PARALLEL 1-WAY FULL SCAN OVER %s"
- + "\n SERVER FILTER BY FIRST KEY ONLY",indxTable);
+ + "\n SERVER FILTER BY FIRST KEY ONLY",SchemaUtil.getTableName(schemaName, indxTable));
}
assertEquals(expectedExplainPlan,actualExplainPlan);
}
- private String[] getArgValues(String dataTable, String indxTable) {
+ private String[] getArgValues(String schemaName, String dataTable, String indxTable) {
final List<String> args = Lists.newArrayList();
+ if (schemaName!=null) {
+ args.add("-s");
+ args.add(schemaName);
+ }
args.add("-dt");
args.add(dataTable);
args.add("-it");
http://git-wip-us.apache.org/repos/asf/phoenix/blob/54da7d1d/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java
index 300f575..d3a1adf 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java
@@ -30,6 +30,7 @@ import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.commons.cli.PosixParser;
+import org.apache.commons.lang.exception.ExceptionUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
@@ -164,12 +165,12 @@ public class IndexTool extends Configured implements Tool {
final String qIndexTable = SchemaUtil.getTableName(schemaName, indexTable);
connection = ConnectionUtil.getInputConnection(configuration);
- if(!isValidIndexTable(connection, dataTable, indexTable)) {
+ if(!isValidIndexTable(connection, qDataTable, indexTable)) {
throw new IllegalArgumentException(String.format(" %s is not an index table for %s ",qIndexTable,qDataTable));
}
- final PTable pdataTable = PhoenixRuntime.getTable(connection, dataTable);
- final PTable pindexTable = PhoenixRuntime.getTable(connection, indexTable);
+ final PTable pdataTable = PhoenixRuntime.getTable(connection, qDataTable);
+ final PTable pindexTable = PhoenixRuntime.getTable(connection, qIndexTable);
// this is set to ensure index tables remains consistent post population.
long indxTimestamp = pindexTable.getTimeStamp();
@@ -178,7 +179,7 @@ public class IndexTool extends Configured implements Tool {
// check if the index type is LOCAL, if so, set the logicalIndexName that is computed from the dataTable name.
String logicalIndexTable = qIndexTable;
if(IndexType.LOCAL.equals(pindexTable.getIndexType())) {
- logicalIndexTable = MetaDataUtil.getLocalIndexTableName(dataTable);
+ logicalIndexTable = MetaDataUtil.getLocalIndexTableName(qDataTable);
}
final PhoenixConnection pConnection = connection.unwrap(PhoenixConnection.class);
@@ -187,7 +188,7 @@ public class IndexTool extends Configured implements Tool {
final List<String> indexColumns = ddlCompiler.getIndexColumnNames();
final String selectQuery = ddlCompiler.getSelectQuery();
- final String upsertQuery = QueryUtil.constructUpsertStatement(indexTable, indexColumns, Hint.NO_INDEX);
+ final String upsertQuery = QueryUtil.constructUpsertStatement(qIndexTable, indexColumns, Hint.NO_INDEX);
configuration.set(PhoenixConfigurationUtil.UPSERT_STATEMENT, upsertQuery);
PhoenixConfigurationUtil.setOutputTableName(configuration, logicalIndexTable);
@@ -231,11 +232,11 @@ public class IndexTool extends Configured implements Tool {
}
// finally update the index state to ACTIVE.
- updateIndexState(connection,dataTable,indexTable,PIndexState.ACTIVE);
+ updateIndexState(connection,qDataTable,indexTable,PIndexState.ACTIVE);
return 0;
} catch (Exception ex) {
- LOG.error(" An exception occured while performing the indexing job , error message {} ",ex.getMessage());
+ LOG.error(" An exception occured while performing the indexing job : "+ ExceptionUtils.getStackTrace(ex));
return -1;
} finally {
try {
[42/47] phoenix git commit: PHOENIX-2087 Ensure predictable column
position during alter table
Posted by ma...@apache.org.
PHOENIX-2087 Ensure predictable column position during alter table
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/72a7356b
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/72a7356b
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/72a7356b
Branch: refs/heads/calcite
Commit: 72a7356bcade01990a59cfd5d72161f18ae909f3
Parents: a8a9d01
Author: James Taylor <jt...@salesforce.com>
Authored: Tue Jun 30 08:44:37 2015 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Tue Jun 30 17:31:24 2015 -0700
----------------------------------------------------------------------
.../apache/phoenix/end2end/AlterTableIT.java | 51 +++++++++++++++-----
.../coprocessor/MetaDataEndpointImpl.java | 5 +-
.../apache/phoenix/schema/MetaDataClient.java | 9 +++-
3 files changed, 46 insertions(+), 19 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/72a7356b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
index cd46927..56bba9b 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
@@ -448,7 +448,7 @@ public class AlterTableIT extends BaseOwnClusterHBaseManagedTimeIT {
conn.commit();
assertIndexExists(conn,true);
- conn.createStatement().execute("ALTER TABLE " + DATA_TABLE_FULL_NAME + " ADD v3 VARCHAR, k2 DECIMAL PRIMARY KEY");
+ conn.createStatement().execute("ALTER TABLE " + DATA_TABLE_FULL_NAME + " ADD v3 VARCHAR, k2 DECIMAL PRIMARY KEY, k3 DECIMAL PRIMARY KEY");
rs = conn.getMetaData().getPrimaryKeys("", SCHEMA_NAME, DATA_TABLE_NAME);
assertTrue(rs.next());
assertEquals("K",rs.getString("COLUMN_NAME"));
@@ -456,6 +456,10 @@ public class AlterTableIT extends BaseOwnClusterHBaseManagedTimeIT {
assertTrue(rs.next());
assertEquals("K2",rs.getString("COLUMN_NAME"));
assertEquals(2, rs.getShort("KEY_SEQ"));
+ assertTrue(rs.next());
+ assertEquals("K3",rs.getString("COLUMN_NAME"));
+ assertEquals(3, rs.getShort("KEY_SEQ"));
+ assertFalse(rs.next());
rs = conn.getMetaData().getPrimaryKeys("", SCHEMA_NAME, INDEX_TABLE_NAME);
assertTrue(rs.next());
@@ -467,6 +471,10 @@ public class AlterTableIT extends BaseOwnClusterHBaseManagedTimeIT {
assertTrue(rs.next());
assertEquals(IndexUtil.INDEX_COLUMN_NAME_SEP + "K2",rs.getString("COLUMN_NAME"));
assertEquals(3, rs.getShort("KEY_SEQ"));
+ assertTrue(rs.next());
+ assertEquals(IndexUtil.INDEX_COLUMN_NAME_SEP + "K3",rs.getString("COLUMN_NAME"));
+ assertEquals(4, rs.getShort("KEY_SEQ"));
+ assertFalse(rs.next());
query = "SELECT * FROM " + DATA_TABLE_FULL_NAME;
rs = conn.createStatement().executeQuery(query);
@@ -478,19 +486,21 @@ public class AlterTableIT extends BaseOwnClusterHBaseManagedTimeIT {
assertFalse(rs.next());
// load some data into the table
- stmt = conn.prepareStatement("UPSERT INTO " + DATA_TABLE_FULL_NAME + "(K,K2,V1,V2) VALUES(?,?,?,?)");
+ stmt = conn.prepareStatement("UPSERT INTO " + DATA_TABLE_FULL_NAME + "(K,K2,V1,V2,K3) VALUES(?,?,?,?,?)");
stmt.setString(1, "b");
stmt.setBigDecimal(2, BigDecimal.valueOf(2));
stmt.setString(3, "y");
stmt.setString(4, "2");
+ stmt.setBigDecimal(5, BigDecimal.valueOf(3));
stmt.execute();
conn.commit();
- query = "SELECT k,k2 FROM " + DATA_TABLE_FULL_NAME + " WHERE v1='y'";
+ query = "SELECT k,k2,k3 FROM " + DATA_TABLE_FULL_NAME + " WHERE v1='y'";
rs = conn.createStatement().executeQuery(query);
assertTrue(rs.next());
assertEquals("b",rs.getString(1));
assertEquals(BigDecimal.valueOf(2),rs.getBigDecimal(2));
+ assertEquals(BigDecimal.valueOf(3),rs.getBigDecimal(3));
assertFalse(rs.next());
}
@@ -2345,6 +2355,21 @@ public class AlterTableIT extends BaseOwnClusterHBaseManagedTimeIT {
return false;
}
+ private int getIndexOfPkColumn(PhoenixConnection conn, String columnName, String tableName) throws SQLException {
+ String normalizedTableName = SchemaUtil.normalizeIdentifier(tableName);
+ PTable table = conn.getMetaDataCache().getTable(new PTableKey(conn.getTenantId(), normalizedTableName));
+ List<PColumn> pkCols = table.getPKColumns();
+ String normalizedColumnName = SchemaUtil.normalizeIdentifier(columnName);
+ int i = 0;
+ for (PColumn pkCol : pkCols) {
+ if (pkCol.getName().getString().equals(normalizedColumnName)) {
+ return i;
+ }
+ i++;
+ }
+ return -1;
+ }
+
private Connection getTenantConnection(String tenantId) throws Exception {
Properties tenantProps = new Properties();
tenantProps.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId);
@@ -2444,35 +2469,35 @@ public class AlterTableIT extends BaseOwnClusterHBaseManagedTimeIT {
ResultSet rs = tenantConn.createStatement().executeQuery("SELECT K2, K3, V3 FROM " + view1);
PhoenixConnection phxConn = tenantConn.unwrap(PhoenixConnection.class);
- assertTrue(checkColumnPartOfPk(phxConn, "k2", view1));
- assertTrue(checkColumnPartOfPk(phxConn, "k3", view1));
+ assertEquals(2, getIndexOfPkColumn(phxConn, "k2", view1));
+ assertEquals(3, getIndexOfPkColumn(phxConn, "k3", view1));
assertEquals(1, getTableSequenceNumber(phxConn, view1));
assertEquals(4, getMaxKeySequenceNumber(phxConn, view1));
verifyNewColumns(rs, "K2", "K3", "V3");
rs = tenantConn.createStatement().executeQuery("SELECT K2, K3, V3 FROM " + view2);
- assertTrue(checkColumnPartOfPk(phxConn, "k2", view2));
- assertTrue(checkColumnPartOfPk(phxConn, "k3", view2));
+ assertEquals(2, getIndexOfPkColumn(phxConn, "k2", view2));
+ assertEquals(3, getIndexOfPkColumn(phxConn, "k3", view2));
assertEquals(1, getTableSequenceNumber(phxConn, view2));
assertEquals(4, getMaxKeySequenceNumber(phxConn, view2));
verifyNewColumns(rs, "K2", "K3", "V3");
- assertTrue(checkColumnPartOfPk(phxConn, IndexUtil.getIndexColumnName(null, "k2"), view2Index));
- assertTrue(checkColumnPartOfPk(phxConn, IndexUtil.getIndexColumnName(null, "k3"), view2Index));
+ assertEquals(4, getIndexOfPkColumn(phxConn, IndexUtil.getIndexColumnName(null, "k2"), view2Index));
+ assertEquals(5, getIndexOfPkColumn(phxConn, IndexUtil.getIndexColumnName(null, "k3"), view2Index));
assertEquals(1, getTableSequenceNumber(phxConn, view2Index));
assertEquals(6, getMaxKeySequenceNumber(phxConn, view2Index));
}
try (Connection tenantConn = getTenantConnection(tenant2)) {
ResultSet rs = tenantConn.createStatement().executeQuery("SELECT K2, K3, V3 FROM " + view3);
PhoenixConnection phxConn = tenantConn.unwrap(PhoenixConnection.class);
- assertTrue(checkColumnPartOfPk(phxConn, "k2", view3));
- assertTrue(checkColumnPartOfPk(phxConn, "k3", view3));
+ assertEquals(2, getIndexOfPkColumn(phxConn, "k2", view3));
+ assertEquals(3, getIndexOfPkColumn(phxConn, "k3", view3));
assertEquals(1, getTableSequenceNumber(phxConn, view3));
verifyNewColumns(rs, "K22", "K33", "V33");
- assertTrue(checkColumnPartOfPk(phxConn, IndexUtil.getIndexColumnName(null, "k2"), view3Index));
- assertTrue(checkColumnPartOfPk(phxConn, IndexUtil.getIndexColumnName(null, "k3"), view3Index));
+ assertEquals(4, getIndexOfPkColumn(phxConn, IndexUtil.getIndexColumnName(null, "k2"), view3Index));
+ assertEquals(5, getIndexOfPkColumn(phxConn, IndexUtil.getIndexColumnName(null, "k3"), view3Index));
assertEquals(1, getTableSequenceNumber(phxConn, view3Index));
assertEquals(6, getMaxKeySequenceNumber(phxConn, view3Index));
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/72a7356b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index cc486d5..dc1a3b4 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -207,6 +207,7 @@ import com.google.protobuf.Service;
*
* @since 0.1
*/
+@SuppressWarnings("deprecation")
public class MetaDataEndpointImpl extends MetaDataProtocol implements CoprocessorService, Coprocessor {
private static final Logger logger = LoggerFactory.getLogger(MetaDataEndpointImpl.class);
@@ -526,7 +527,6 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
indexes.add(indexTable);
}
- @SuppressWarnings("deprecation")
private void addColumnToTable(List<Cell> results, PName colName, PName famName,
Cell[] colKeyValues, List<PColumn> columns, boolean isSalted) {
int i = 0;
@@ -1176,14 +1176,12 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
}
private static final byte[] PHYSICAL_TABLE_BYTES = new byte[] {PTable.LinkType.PHYSICAL_TABLE.getSerializedValue()};
- private static final byte[] PARENT_TABLE_BYTES = new byte[] {PTable.LinkType.PARENT_TABLE.getSerializedValue()};
/**
* @param tableName parent table's name
* Looks for whether child views exist for the table specified by table.
* TODO: should we pass a timestamp here?
*/
- @SuppressWarnings("deprecation")
private TableViewFinderResult findChildViews(Region region, byte[] tenantId, PTable table, byte[] linkTypeBytes) throws IOException {
byte[] schemaName = table.getSchemaName().getBytes();
byte[] tableName = table.getTableName().getBytes();
@@ -2181,7 +2179,6 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
done.run(builder.build());
}
- @SuppressWarnings("deprecation")
@Override
public void updateIndexState(RpcController controller, UpdateIndexStateRequest request,
RpcCallback<MetaDataResponse> done) {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/72a7356b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index d77ded8..0ad9b56 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -2341,7 +2341,10 @@ public class MetaDataClient {
while (true) {
ColumnResolver resolver = FromCompiler.getResolver(statement, connection);
table = resolver.getTables().get(0).getTable();
- List<Mutation> tableMetaData = Lists.newArrayListWithExpectedSize(2);
+ int nIndexes = table.getIndexes().size();
+ int nNewColumns = columnDefs.size();
+ List<Mutation> tableMetaData = Lists.newArrayListWithExpectedSize((1 + nNewColumns) * (nIndexes + 1));
+ List<Mutation> columnMetaData = Lists.newArrayListWithExpectedSize(nNewColumns * (nIndexes + 1));
if (logger.isDebugEnabled()) {
logger.debug(LogUtil.addCustomAnnotations("Resolved table to " + table.getName().getString() + " with seqNum " + table.getSequenceNumber() + " at timestamp " + table.getTimeStamp() + " with " + table.getColumns().size() + " columns: " + table.getColumns(), connection));
}
@@ -2453,7 +2456,7 @@ public class MetaDataClient {
}
}
- tableMetaData.addAll(connection.getMutationState().toMutations().next().getSecond());
+ columnMetaData.addAll(connection.getMutationState().toMutations().next().getSecond());
connection.rollback();
} else {
// Check that HBase configured properly for mutable secondary indexing
@@ -2489,6 +2492,8 @@ public class MetaDataClient {
// Force the table header row to be first
Collections.reverse(tableMetaData);
+ // Add column metadata afterwards, maintaining the order so columns have more predictable ordinal position
+ tableMetaData.addAll(columnMetaData);
byte[] family = families.size() > 0 ? families.iterator().next().getBytes() : null;
[28/47] phoenix git commit: PHOENIX-2056 Ensure PK column from base
table is added to any indexes on views
Posted by ma...@apache.org.
PHOENIX-2056 Ensure PK column from base table is added to any indexes on views
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/7918a3d9
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/7918a3d9
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/7918a3d9
Branch: refs/heads/calcite
Commit: 7918a3d94d19f1d57f55b88834680760605e575c
Parents: 11577dd
Author: Samarth <sa...@salesforce.com>
Authored: Fri Jun 26 16:04:46 2015 -0700
Committer: Samarth <sa...@salesforce.com>
Committed: Fri Jun 26 16:04:46 2015 -0700
----------------------------------------------------------------------
.../apache/phoenix/end2end/AlterTableIT.java | 184 ++++++++++++++++++-
.../coprocessor/MetaDataEndpointImpl.java | 145 ++++++++++++++-
.../java/org/apache/phoenix/util/ByteUtil.java | 10 +-
3 files changed, 319 insertions(+), 20 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7918a3d9/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
index 61dd6a9..ae5f940 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
@@ -46,10 +46,12 @@ import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeepDeletedCells;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.compile.QueryPlan;
import org.apache.phoenix.coprocessor.MetaDataProtocol;
import org.apache.phoenix.exception.SQLExceptionCode;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
+import org.apache.phoenix.jdbc.PhoenixStatement;
import org.apache.phoenix.query.QueryConstants;
import org.apache.phoenix.schema.PColumn;
import org.apache.phoenix.schema.PTable;
@@ -2303,13 +2305,23 @@ public class AlterTableIT extends BaseOwnClusterHBaseManagedTimeIT {
String alterBaseTable = "ALTER TABLE " + baseTable + " ADD NEW_PK varchar primary key ";
globalConn.createStatement().execute(alterBaseTable);
-
+
// verify that the new column new_pk is now part of the primary key for the entire hierarchy
- assertTrue(checkColumnPartOfPk(globalConn.unwrap(PhoenixConnection.class), "PK1", baseTable));
- assertTrue(checkColumnPartOfPk(tenant1Conn.unwrap(PhoenixConnection.class), "PK1", view1));
- assertTrue(checkColumnPartOfPk(tenant1Conn.unwrap(PhoenixConnection.class), "PK1", view2));
- assertTrue(checkColumnPartOfPk(tenant2Conn.unwrap(PhoenixConnection.class), "PK1", view3));
- assertTrue(checkColumnPartOfPk(globalConn.unwrap(PhoenixConnection.class), "PK1", view4));
+
+ globalConn.createStatement().execute("SELECT * FROM " + baseTable);
+ assertTrue(checkColumnPartOfPk(globalConn.unwrap(PhoenixConnection.class), "NEW_PK", baseTable));
+
+ tenant1Conn.createStatement().execute("SELECT * FROM " + view1);
+ assertTrue(checkColumnPartOfPk(tenant1Conn.unwrap(PhoenixConnection.class), "NEW_PK", view1));
+
+ tenant1Conn.createStatement().execute("SELECT * FROM " + view2);
+ assertTrue(checkColumnPartOfPk(tenant1Conn.unwrap(PhoenixConnection.class), "NEW_PK", view2));
+
+ tenant2Conn.createStatement().execute("SELECT * FROM " + view3);
+ assertTrue(checkColumnPartOfPk(tenant2Conn.unwrap(PhoenixConnection.class), "NEW_PK", view3));
+
+ globalConn.createStatement().execute("SELECT * FROM " + view4);
+ assertTrue(checkColumnPartOfPk(globalConn.unwrap(PhoenixConnection.class), "NEW_PK", view4));
} finally {
if (tenant1Conn != null) {
@@ -2344,4 +2356,164 @@ public class AlterTableIT extends BaseOwnClusterHBaseManagedTimeIT {
tenantProps.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId);
return DriverManager.getConnection(getUrl(), tenantProps);
}
+
+ @Test
+ public void testAddPKColumnToBaseTableWhoseViewsHaveIndices() throws Exception {
+ String baseTable = "testAddPKColumnToBaseTableWhoseViewsHaveIndices";
+ String view1 = "view1";
+ String view2 = "view2";
+ String view3 = "view3";
+ String tenant1 = "tenant1";
+ String tenant2 = "tenant2";
+ String view2Index = view2 + "_idx";
+ String view3Index = view3 + "_idx";
+ /* baseTable(mutli-tenant)
+ / \
+ view1(tenant1) view3(tenant2, index)
+ /
+ view2(tenant1, index)
+ */
+ try (Connection globalConn = DriverManager.getConnection(getUrl())) {
+ // make sure that the tables are empty, but reachable
+ globalConn
+ .createStatement()
+ .execute(
+ "CREATE TABLE "
+ + baseTable
+ + " (TENANT_ID VARCHAR NOT NULL, K1 varchar not null, V1 VARCHAR, V2 VARCHAR CONSTRAINT NAME_PK PRIMARY KEY(TENANT_ID, K1)) MULTI_TENANT = true ");
+
+ }
+ try (Connection tenantConn = getTenantConnection(tenant1)) {
+ // create tenant specific view for tenant1 - view1
+ tenantConn.createStatement().execute("CREATE VIEW " + view1 + " AS SELECT * FROM " + baseTable);
+ PhoenixConnection phxConn = tenantConn.unwrap(PhoenixConnection.class);
+ assertEquals(0, getTableSequenceNumber(phxConn, view1));
+ assertEquals(2, getMaxKeySequenceNumber(phxConn, view1));
+
+ // create a view - view2 on view - view1
+ tenantConn.createStatement().execute("CREATE VIEW " + view2 + " AS SELECT * FROM " + view1);
+ assertEquals(0, getTableSequenceNumber(phxConn, view2));
+ assertEquals(2, getMaxKeySequenceNumber(phxConn, view2));
+
+
+ // create an index on view2
+ tenantConn.createStatement().execute("CREATE INDEX " + view2Index + " ON " + view2 + " (v1) include (v2)");
+ assertEquals(0, getTableSequenceNumber(phxConn, view2Index));
+ assertEquals(4, getMaxKeySequenceNumber(phxConn, view2Index));
+ }
+ try (Connection tenantConn = getTenantConnection(tenant2)) {
+ // create tenant specific view for tenant2 - view3
+ tenantConn.createStatement().execute("CREATE VIEW " + view3 + " AS SELECT * FROM " + baseTable);
+ PhoenixConnection phxConn = tenantConn.unwrap(PhoenixConnection.class);
+ assertEquals(0, getTableSequenceNumber(phxConn, view3));
+ assertEquals(2, getMaxKeySequenceNumber(phxConn, view3));
+
+
+ // create an index on view3
+ tenantConn.createStatement().execute("CREATE INDEX " + view3Index + " ON " + view3 + " (v1) include (v2)");
+ assertEquals(0, getTableSequenceNumber(phxConn, view3Index));
+ assertEquals(4, getMaxKeySequenceNumber(phxConn, view3Index));
+
+
+ }
+
+ // alter the base table by adding 1 non-pk and 2 pk columns
+ try (Connection globalConn = DriverManager.getConnection(getUrl())) {
+ globalConn.createStatement().execute("ALTER TABLE " + baseTable + " ADD v3 VARCHAR, k2 VARCHAR PRIMARY KEY, k3 VARCHAR PRIMARY KEY");
+ assertEquals(4, getMaxKeySequenceNumber(globalConn.unwrap(PhoenixConnection.class), baseTable));
+
+ // Upsert records in the base table
+ String upsert = "UPSERT INTO " + baseTable + " (TENANT_ID, K1, K2, K3, V1, V2, V3) VALUES (?, ?, ?, ?, ?, ?, ?)";
+ PreparedStatement stmt = globalConn.prepareStatement(upsert);
+ stmt.setString(1, tenant1);
+ stmt.setString(2, "K1");
+ stmt.setString(3, "K2");
+ stmt.setString(4, "K3");
+ stmt.setString(5, "V1");
+ stmt.setString(6, "V2");
+ stmt.setString(7, "V3");
+ stmt.executeUpdate();
+ stmt.setString(1, tenant2);
+ stmt.setString(2, "K11");
+ stmt.setString(3, "K22");
+ stmt.setString(4, "K33");
+ stmt.setString(5, "V11");
+ stmt.setString(6, "V22");
+ stmt.setString(7, "V33");
+ stmt.executeUpdate();
+ globalConn.commit();
+ }
+
+ // Verify now that the sequence number of data table, indexes and views have changed.
+ // Also verify that the newly added pk columns show up as pk columns of data table, indexes and views.
+ try (Connection tenantConn = getTenantConnection(tenant1)) {
+
+ ResultSet rs = tenantConn.createStatement().executeQuery("SELECT K2, K3, V3 FROM " + view1);
+ PhoenixConnection phxConn = tenantConn.unwrap(PhoenixConnection.class);
+ assertTrue(checkColumnPartOfPk(phxConn, "k2", view1));
+ assertTrue(checkColumnPartOfPk(phxConn, "k3", view1));
+ assertEquals(1, getTableSequenceNumber(phxConn, view1));
+ assertEquals(4, getMaxKeySequenceNumber(phxConn, view1));
+ verifyNewColumns(rs, "K2", "K3", "V3");
+
+
+ rs = tenantConn.createStatement().executeQuery("SELECT K2, K3, V3 FROM " + view2);
+ assertTrue(checkColumnPartOfPk(phxConn, "k2", view2));
+ assertTrue(checkColumnPartOfPk(phxConn, "k3", view2));
+ assertEquals(1, getTableSequenceNumber(phxConn, view2));
+ assertEquals(4, getMaxKeySequenceNumber(phxConn, view2));
+ verifyNewColumns(rs, "K2", "K3", "V3");
+
+ assertTrue(checkColumnPartOfPk(phxConn, IndexUtil.getIndexColumnName(null, "k2"), view2Index));
+ assertTrue(checkColumnPartOfPk(phxConn, IndexUtil.getIndexColumnName(null, "k3"), view2Index));
+ assertEquals(1, getTableSequenceNumber(phxConn, view2Index));
+ assertEquals(6, getMaxKeySequenceNumber(phxConn, view2Index));
+ }
+ try (Connection tenantConn = getTenantConnection(tenant2)) {
+ ResultSet rs = tenantConn.createStatement().executeQuery("SELECT K2, K3, V3 FROM " + view3);
+ PhoenixConnection phxConn = tenantConn.unwrap(PhoenixConnection.class);
+ assertTrue(checkColumnPartOfPk(phxConn, "k2", view3));
+ assertTrue(checkColumnPartOfPk(phxConn, "k3", view3));
+ assertEquals(1, getTableSequenceNumber(phxConn, view3));
+ verifyNewColumns(rs, "K22", "K33", "V33");
+
+ assertTrue(checkColumnPartOfPk(phxConn, IndexUtil.getIndexColumnName(null, "k2"), view3Index));
+ assertTrue(checkColumnPartOfPk(phxConn, IndexUtil.getIndexColumnName(null, "k3"), view3Index));
+ assertEquals(1, getTableSequenceNumber(phxConn, view3Index));
+ assertEquals(6, getMaxKeySequenceNumber(phxConn, view3Index));
+ }
+ // Verify that the index is actually being used when using newly added pk col
+ try (Connection tenantConn = getTenantConnection(tenant1)) {
+ String upsert = "UPSERT INTO " + view2 + " (K1, K2, K3, V1, V2, V3) VALUES ('key1', 'key2', 'key3', 'value1', 'value2', 'value3')";
+ tenantConn.createStatement().executeUpdate(upsert);
+ tenantConn.commit();
+ Statement stmt = tenantConn.createStatement();
+ String sql = "SELECT V2 FROM " + view2 + " WHERE V1 = 'value1' AND K3 = 'key3'";
+ QueryPlan plan = stmt.unwrap(PhoenixStatement.class).optimizeQuery(sql);
+ assertTrue(plan.getTableRef().getTable().getName().getString().equals(SchemaUtil.normalizeIdentifier(view2Index)));
+ ResultSet rs = tenantConn.createStatement().executeQuery(sql);
+ verifyNewColumns(rs, "value2");
+ }
+
+ }
+
+ private static long getTableSequenceNumber(PhoenixConnection conn, String tableName) throws SQLException {
+ PTable table = conn.getMetaDataCache().getTable(new PTableKey(conn.getTenantId(), SchemaUtil.normalizeIdentifier(tableName)));
+ return table.getSequenceNumber();
+ }
+
+ private static short getMaxKeySequenceNumber(PhoenixConnection conn, String tableName) throws SQLException {
+ PTable table = conn.getMetaDataCache().getTable(new PTableKey(conn.getTenantId(), SchemaUtil.normalizeIdentifier(tableName)));
+ return SchemaUtil.getMaxKeySeq(table);
+ }
+
+ private static void verifyNewColumns(ResultSet rs, String ... values) throws SQLException {
+ assertTrue(rs.next());
+ int i = 1;
+ for (String value : values) {
+ assertEquals(value, rs.getString(i++));
+ }
+ assertFalse(rs.next());
+ }
+
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7918a3d9/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index b848565..43dc07a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -63,7 +63,9 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_INDEX_ID_BYTE
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_STATEMENT_BYTES;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_TYPE_BYTES;
import static org.apache.phoenix.query.QueryConstants.DIVORCED_VIEW_BASE_COLUMN_COUNT;
+import static org.apache.phoenix.query.QueryConstants.SEPARATOR_BYTE_ARRAY;
import static org.apache.phoenix.schema.PTableType.INDEX;
+import static org.apache.phoenix.util.ByteUtil.EMPTY_BYTE_ARRAY;
import static org.apache.phoenix.util.SchemaUtil.getVarCharLength;
import static org.apache.phoenix.util.SchemaUtil.getVarChars;
@@ -167,11 +169,13 @@ import org.apache.phoenix.schema.types.PBoolean;
import org.apache.phoenix.schema.types.PDataType;
import org.apache.phoenix.schema.types.PInteger;
import org.apache.phoenix.schema.types.PLong;
+import org.apache.phoenix.schema.types.PSmallint;
import org.apache.phoenix.schema.types.PVarbinary;
import org.apache.phoenix.schema.types.PVarchar;
import org.apache.phoenix.trace.util.Tracing;
import org.apache.phoenix.util.ByteUtil;
import org.apache.phoenix.util.EnvironmentEdgeManager;
+import org.apache.phoenix.util.IndexUtil;
import org.apache.phoenix.util.KeyValueUtil;
import org.apache.phoenix.util.MetaDataUtil;
import org.apache.phoenix.util.QueryUtil;
@@ -1584,13 +1588,13 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
// lock the rows corresponding to views so that no other thread can modify the view meta-data
RowLock viewRowLock = acquireLock(region, viewKey, locks);
PTable view = doGetTable(viewKey, clientTimeStamp, viewRowLock);
-
if (view.getBaseColumnCount() == QueryConstants.DIVORCED_VIEW_BASE_COLUMN_COUNT) {
// if a view has divorced itself from the base table, we don't allow schema changes
// to be propagated to it.
return;
}
int deltaNumberOfColumns = 0;
+ short deltaNumPkColsSoFar = 0;
for (Mutation m : tableMetadata) {
byte[][] rkmd = new byte[5][];
int pkCount = getVarChars(m.getRow(), rkmd);
@@ -1599,16 +1603,133 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
&& Bytes.compareTo(tableName, rkmd[TABLE_NAME_INDEX]) == 0) {
Put p = (Put)m;
- byte[] k = ByteUtil.concat(viewKey, QueryConstants.SEPARATOR_BYTE_ARRAY, rkmd[COLUMN_NAME_INDEX],
- QueryConstants.SEPARATOR_BYTE_ARRAY, rkmd[FAMILY_NAME_INDEX]);
- Put viewColumnDefinitionPut = new Put(k, clientTimeStamp);
+ byte[] columnKey = ByteUtil.concat(viewKey, QueryConstants.SEPARATOR_BYTE_ARRAY, rkmd[COLUMN_NAME_INDEX]);
+ if (rkmd[FAMILY_NAME_INDEX] != null) {
+ columnKey = ByteUtil.concat(columnKey, QueryConstants.SEPARATOR_BYTE_ARRAY, rkmd[FAMILY_NAME_INDEX]);
+ }
+ Put viewColumnDefinitionPut = new Put(columnKey, clientTimeStamp);
for (Cell cell : p.getFamilyCellMap().values().iterator().next()) {
- viewColumnDefinitionPut.add(CellUtil.createCell(k, CellUtil.cloneFamily(cell),
+ viewColumnDefinitionPut.add(CellUtil.createCell(columnKey, CellUtil.cloneFamily(cell),
CellUtil.cloneQualifier(cell), cell.getTimestamp(), cell.getTypeByte(),
CellUtil.cloneValue(cell)));
}
deltaNumberOfColumns++;
mutationsForAddingColumnsToViews.add(viewColumnDefinitionPut);
+ if (rkmd[FAMILY_NAME_INDEX] == null && rkmd[COLUMN_NAME_INDEX] != null) {
+ /*
+ * If adding a pk column to the base table (and hence the view), see if there are any indexes on
+ * the view. If yes, then generate puts for the index header row and column rows.
+ */
+ deltaNumPkColsSoFar++;
+ for (PTable index : view.getIndexes()) {
+ int oldNumberOfColsInIndex = index.getColumns().size();
+
+ byte[] indexColumnKey = ByteUtil.concat(getViewIndexHeaderRowKey(index),
+ QueryConstants.SEPARATOR_BYTE_ARRAY,
+ IndexUtil.getIndexColumnName(rkmd[FAMILY_NAME_INDEX], rkmd[COLUMN_NAME_INDEX]));
+ Put indexColumnDefinitionPut = new Put(indexColumnKey, clientTimeStamp);
+
+ // Set the index specific data type for the column
+ List<Cell> dataTypes = viewColumnDefinitionPut
+ .get(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+ PhoenixDatabaseMetaData.DATA_TYPE_BYTES);
+ if (dataTypes != null && dataTypes.size() > 0) {
+ Cell dataType = dataTypes.get(0);
+ int dataColumnDataType = PInteger.INSTANCE.getCodec().decodeInt(
+ dataType.getValueArray(), dataType.getValueOffset(), SortOrder.ASC);
+ int indexColumnDataType = IndexUtil.getIndexColumnDataType(true,
+ PDataType.fromTypeId(dataColumnDataType)).getSqlType();
+ byte[] indexColumnDataTypeBytes = new byte[PInteger.INSTANCE.getByteSize()];
+ PInteger.INSTANCE.getCodec().encodeInt(indexColumnDataType, indexColumnDataTypeBytes, 0);
+ indexColumnDefinitionPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+ PhoenixDatabaseMetaData.DATA_TYPE_BYTES, indexColumnDataTypeBytes);
+ }
+
+ // Set precision
+ List<Cell> decimalDigits = viewColumnDefinitionPut.get(
+ PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+ PhoenixDatabaseMetaData.DECIMAL_DIGITS_BYTES);
+ if (decimalDigits != null && decimalDigits.size() > 0) {
+ Cell decimalDigit = decimalDigits.get(0);
+ indexColumnDefinitionPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+ PhoenixDatabaseMetaData.DECIMAL_DIGITS_BYTES, decimalDigit.getValueArray());
+ }
+
+ // Set size
+ List<Cell> columnSizes = viewColumnDefinitionPut.get(
+ PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+ PhoenixDatabaseMetaData.COLUMN_SIZE_BYTES);
+ if (columnSizes != null && columnSizes.size() > 0) {
+ Cell columnSize = columnSizes.get(0);
+ indexColumnDefinitionPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+ PhoenixDatabaseMetaData.COLUMN_SIZE_BYTES, columnSize.getValueArray());
+ }
+
+ // Set sort order
+ List<Cell> sortOrders = viewColumnDefinitionPut.get(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+ PhoenixDatabaseMetaData.SORT_ORDER_BYTES);
+ if (sortOrders != null && sortOrders.size() > 0) {
+ Cell sortOrder = sortOrders.get(0);
+ indexColumnDefinitionPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+ PhoenixDatabaseMetaData.SORT_ORDER_BYTES, sortOrder.getValueArray());
+ }
+
+ // Set data table name
+ List<Cell> dataTableNames = viewColumnDefinitionPut.get(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+ PhoenixDatabaseMetaData.DATA_TABLE_NAME_BYTES);
+ if (dataTableNames != null && dataTableNames.size() > 0) {
+ Cell dataTableName = dataTableNames.get(0);
+ indexColumnDefinitionPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+ PhoenixDatabaseMetaData.DATA_TABLE_NAME_BYTES, dataTableName.getValueArray());
+ }
+
+ // Set the ordinal position of the new column.
+ byte[] ordinalPositionBytes = new byte[PInteger.INSTANCE.getByteSize()];
+ int ordinalPositionOfNewCol = oldNumberOfColsInIndex + deltaNumPkColsSoFar;
+ PInteger.INSTANCE.getCodec().encodeInt(ordinalPositionOfNewCol, ordinalPositionBytes, 0);
+ indexColumnDefinitionPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+ PhoenixDatabaseMetaData.ORDINAL_POSITION_BYTES, ordinalPositionBytes);
+
+ // New PK columns have to be nullable after the first DDL
+ byte[] isNullableBytes = PBoolean.INSTANCE.toBytes(true);
+ indexColumnDefinitionPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+ PhoenixDatabaseMetaData.NULLABLE_BYTES, isNullableBytes);
+
+ // Set the key sequence for the pk column to be added
+ short currentKeySeq = SchemaUtil.getMaxKeySeq(index);
+ short newKeySeq = (short)(currentKeySeq + deltaNumPkColsSoFar);
+ byte[] keySeqBytes = new byte[PSmallint.INSTANCE.getByteSize()];
+ PSmallint.INSTANCE.getCodec().encodeShort(newKeySeq, keySeqBytes, 0);
+ indexColumnDefinitionPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+ PhoenixDatabaseMetaData.KEY_SEQ_BYTES, keySeqBytes);
+
+ mutationsForAddingColumnsToViews.add(indexColumnDefinitionPut);
+ }
+ }
+ }
+ }
+ if (deltaNumPkColsSoFar > 0) {
+ for (PTable index : view.getIndexes()) {
+ byte[] indexHeaderRowKey = getViewIndexHeaderRowKey(index);
+ Put indexHeaderRowMutation = new Put(indexHeaderRowKey);
+
+ // increment sequence number
+ long newSequenceNumber = index.getSequenceNumber() + 1;
+ byte[] newSequenceNumberPtr = new byte[PLong.INSTANCE.getByteSize()];
+ PLong.INSTANCE.getCodec().encodeLong(newSequenceNumber, newSequenceNumberPtr, 0);
+ indexHeaderRowMutation.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+ PhoenixDatabaseMetaData.TABLE_SEQ_NUM_BYTES, newSequenceNumberPtr);
+
+ // increase the column count
+ int newColumnCount = index.getColumns().size() + deltaNumPkColsSoFar;
+ byte[] newColumnCountPtr = new byte[PInteger.INSTANCE.getByteSize()];
+ PInteger.INSTANCE.getCodec().encodeInt(newColumnCount, newColumnCountPtr, 0);
+ indexHeaderRowMutation.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+ PhoenixDatabaseMetaData.COLUMN_COUNT_BYTES, newColumnCountPtr);
+
+ // add index row header key to the invalidate list to force clients to fetch the latest meta-data
+ invalidateList.add(new ImmutableBytesPtr(indexHeaderRowKey));
+ mutationsForAddingColumnsToViews.add(indexHeaderRowMutation);
}
}
@@ -1635,7 +1756,10 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
int newPosition = column.getPosition() + deltaNumberOfColumns + 1;
byte[] k = ByteUtil.concat(viewKey, QueryConstants.SEPARATOR_BYTE_ARRAY, column.getName()
- .getBytes(), QueryConstants.SEPARATOR_BYTE_ARRAY, column.getFamilyName() != null ? column.getFamilyName().getBytes() : null);
+ .getBytes());
+ if (column.getFamilyName() != null) {
+ k = ByteUtil.concat(k, QueryConstants.SEPARATOR_BYTE_ARRAY, column.getFamilyName().getBytes());
+ }
Put positionUpdatePut = new Put(k, clientTimeStamp);
byte[] ptr = new byte[PInteger.INSTANCE.getByteSize()];
@@ -1648,7 +1772,14 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
invalidateList.add(new ImmutableBytesPtr(viewKey));
}
}
-
+
+ private byte[] getViewIndexHeaderRowKey(PTable index) {
+ byte[] tenantIdBytes = index.getKey().getTenantId() != null ? index.getKey().getTenantId().getBytes() : EMPTY_BYTE_ARRAY;
+ byte[] schemaNameBytes = index.getSchemaName() != null ? index.getSchemaName().getBytes() : EMPTY_BYTE_ARRAY;
+ byte[] tableNameBytes = index.getTableName().getBytes();
+ return ByteUtil.concat(tenantIdBytes, SEPARATOR_BYTE_ARRAY, schemaNameBytes, SEPARATOR_BYTE_ARRAY, tableNameBytes);
+ }
+
@Override
public void addColumn(RpcController controller, AddColumnRequest request,
RpcCallback<MetaDataResponse> done) {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7918a3d9/phoenix-core/src/main/java/org/apache/phoenix/util/ByteUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ByteUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/ByteUtil.java
index 1f4a285..1e3516d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/ByteUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ByteUtil.java
@@ -253,17 +253,13 @@ public class ByteUtil {
public static byte[] concat(byte[] first, byte[]... rest) {
int totalLength = first.length;
for (byte[] array : rest) {
- if (array != null) {
- totalLength += array.length;
- }
+ totalLength += array.length;
}
byte[] result = Arrays.copyOf(first, totalLength);
int offset = first.length;
for (byte[] array : rest) {
- if (array != null) {
- System.arraycopy(array, 0, result, offset, array.length);
- offset += array.length;
- }
+ System.arraycopy(array, 0, result, offset, array.length);
+ offset += array.length;
}
return result;
}
[31/47] phoenix git commit: PHOENIX-1819 Build a framework to capture
and report phoenix client side request level metrics
Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/phoenix/blob/0f6595c0/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkLoadTool.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkLoadTool.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkLoadTool.java
index 5270277..bb4054b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkLoadTool.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkLoadTool.java
@@ -57,6 +57,7 @@ import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
import org.apache.phoenix.jdbc.PhoenixDriver;
import org.apache.phoenix.job.JobManager;
+import org.apache.phoenix.monitoring.GlobalClientMetrics;
import org.apache.phoenix.query.QueryConstants;
import org.apache.phoenix.query.QueryServices;
import org.apache.phoenix.query.QueryServicesOptions;
@@ -255,12 +256,9 @@ public class CsvBulkLoadTool extends Configured implements Tool {
}
List<Future<Boolean>> runningJobs = new ArrayList<Future<Boolean>>();
- boolean useInstrumentedPool = conn
- .unwrap(PhoenixConnection.class)
- .getQueryServices()
- .getProps()
- .getBoolean(QueryServices.METRICS_ENABLED,
- QueryServicesOptions.DEFAULT_IS_METRICS_ENABLED);
+ boolean useInstrumentedPool = GlobalClientMetrics.isMetricsEnabled()
+ || conn.unwrap(PhoenixConnection.class).isRequestLevelMetricsEnabled();
+
ExecutorService executor =
JobManager.createThreadPoolExec(Integer.MAX_VALUE, 5, 20, useInstrumentedPool);
try{
http://git-wip-us.apache.org/repos/asf/phoenix/blob/0f6595c0/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordReader.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordReader.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordReader.java
index eb6dc3d..b500a25 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordReader.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordReader.java
@@ -17,6 +17,8 @@
*/
package org.apache.phoenix.mapreduce;
+import static org.apache.phoenix.monitoring.MetricType.SCAN_BYTES;
+
import java.io.IOException;
import java.sql.SQLException;
import java.util.List;
@@ -32,6 +34,7 @@ import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.db.DBWritable;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.phoenix.compile.QueryPlan;
+import org.apache.phoenix.compile.StatementContext;
import org.apache.phoenix.iterate.ConcatResultIterator;
import org.apache.phoenix.iterate.LookAheadResultIterator;
import org.apache.phoenix.iterate.PeekingResultIterator;
@@ -40,6 +43,7 @@ import org.apache.phoenix.iterate.RoundRobinResultIterator;
import org.apache.phoenix.iterate.SequenceResultIterator;
import org.apache.phoenix.iterate.TableResultIterator;
import org.apache.phoenix.jdbc.PhoenixResultSet;
+import org.apache.phoenix.monitoring.ReadMetricQueue;
import com.google.common.base.Preconditions;
import com.google.common.base.Throwables;
@@ -100,8 +104,12 @@ public class PhoenixRecordReader<T extends DBWritable> extends RecordReader<Null
final List<Scan> scans = pSplit.getScans();
try {
List<PeekingResultIterator> iterators = Lists.newArrayListWithExpectedSize(scans.size());
+ StatementContext ctx = queryPlan.getContext();
+ ReadMetricQueue readMetrics = ctx.getReadMetricsQueue();
+ String tableName = queryPlan.getTableRef().getTable().getPhysicalName().getString();
for (Scan scan : scans) {
- final TableResultIterator tableResultIterator = new TableResultIterator(queryPlan.getContext(), queryPlan.getTableRef(), scan);
+ final TableResultIterator tableResultIterator = new TableResultIterator(queryPlan.getContext(),
+ queryPlan.getTableRef(), scan, readMetrics.allotMetric(SCAN_BYTES, tableName));
PeekingResultIterator peekingResultIterator = LookAheadResultIterator.wrap(tableResultIterator);
iterators.add(peekingResultIterator);
}
@@ -112,7 +120,7 @@ public class PhoenixRecordReader<T extends DBWritable> extends RecordReader<Null
this.resultIterator = iterator;
// Clone the row projector as it's not thread safe and would be used simultaneously by
// multiple threads otherwise.
- this.resultSet = new PhoenixResultSet(this.resultIterator, queryPlan.getProjector().cloneIfNecessary(),queryPlan.getContext().getStatement());
+ this.resultSet = new PhoenixResultSet(this.resultIterator, queryPlan.getProjector().cloneIfNecessary(), queryPlan.getContext());
} catch (SQLException e) {
LOG.error(String.format(" Error [%s] initializing PhoenixRecordReader. ",e.getMessage()));
Throwables.propagate(e);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/0f6595c0/phoenix-core/src/main/java/org/apache/phoenix/memory/GlobalMemoryManager.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/memory/GlobalMemoryManager.java b/phoenix-core/src/main/java/org/apache/phoenix/memory/GlobalMemoryManager.java
index 02c1dea..79b49c5 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/memory/GlobalMemoryManager.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/memory/GlobalMemoryManager.java
@@ -17,9 +17,6 @@
*/
package org.apache.phoenix.memory;
-import static org.apache.phoenix.monitoring.PhoenixMetrics.SizeMetric.MEMORY_MANAGER_BYTES;
-import static org.apache.phoenix.monitoring.PhoenixMetrics.SizeMetric.MEMORY_WAIT_TIME;
-
import org.apache.http.annotation.GuardedBy;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -92,8 +89,6 @@ public class GlobalMemoryManager implements MemoryManager {
}
usedMemoryBytes += nBytes;
}
- MEMORY_WAIT_TIME.update(System.currentTimeMillis() - startTimeMs);
- MEMORY_MANAGER_BYTES.update(nBytes);
return nBytes;
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/0f6595c0/phoenix-core/src/main/java/org/apache/phoenix/monitoring/AtomicMetric.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/AtomicMetric.java b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/AtomicMetric.java
new file mode 100644
index 0000000..796e8ba
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/AtomicMetric.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.monitoring;
+
+import java.util.concurrent.atomic.AtomicLong;
+
+/**
+ * Version of {@link Metric} that can be used when the metric is being concurrently accessed or modified by multiple
+ * threads.
+ */
+public class AtomicMetric implements Metric {
+
+ private final MetricType type;
+ private final AtomicLong value = new AtomicLong();
+
+ public AtomicMetric(MetricType type) {
+ this.type = type;
+ }
+
+ @Override
+ public String getName() {
+ return type.name();
+ }
+
+ @Override
+ public String getDescription() {
+ return type.description();
+ }
+
+ @Override
+ public long getValue() {
+ return value.get();
+ }
+
+ @Override
+ public void change(long delta) {
+ value.addAndGet(delta);
+ }
+
+ @Override
+ public void increment() {
+ value.incrementAndGet();
+ }
+
+ @Override
+ public String getCurrentMetricState() {
+ return getName() + ": " + value.get();
+ }
+
+ @Override
+ public void reset() {
+ value.set(0);
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/0f6595c0/phoenix-core/src/main/java/org/apache/phoenix/monitoring/CombinableMetric.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/CombinableMetric.java b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/CombinableMetric.java
new file mode 100644
index 0000000..7ebb0c1
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/CombinableMetric.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.monitoring;
+
+
+
+/**
+ * Interface for representing a metric that could be published and possibly combined with a metric of the same
+ * type.
+ */
+public interface CombinableMetric extends Metric {
+
+ String getPublishString();
+
+ CombinableMetric combine(CombinableMetric metric);
+
+ public class NoOpRequestMetric implements CombinableMetric {
+
+ public static NoOpRequestMetric INSTANCE = new NoOpRequestMetric();
+ private static final String EMPTY_STRING = "";
+
+ @Override
+ public String getName() {
+ return EMPTY_STRING;
+ }
+
+ @Override
+ public String getDescription() {
+ return EMPTY_STRING;
+ }
+
+ @Override
+ public long getValue() {
+ return 0;
+ }
+
+ @Override
+ public void change(long delta) {}
+
+ @Override
+ public void increment() {}
+
+ @Override
+ public String getCurrentMetricState() {
+ return EMPTY_STRING;
+ }
+
+ @Override
+ public void reset() {}
+
+ @Override
+ public String getPublishString() {
+ return EMPTY_STRING;
+ }
+
+ @Override
+ public CombinableMetric combine(CombinableMetric metric) {
+ return INSTANCE;
+ }
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/0f6595c0/phoenix-core/src/main/java/org/apache/phoenix/monitoring/CombinableMetricImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/CombinableMetricImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/CombinableMetricImpl.java
new file mode 100644
index 0000000..fa6f7d3
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/CombinableMetricImpl.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.monitoring;
+
+import static com.google.common.base.Preconditions.checkArgument;
+
+public class CombinableMetricImpl implements CombinableMetric {
+
+ private final Metric metric;
+
+ public CombinableMetricImpl(MetricType type) {
+ metric = new NonAtomicMetric(type);
+ }
+
+ @Override
+ public String getName() {
+ return metric.getName();
+ }
+
+ @Override
+ public String getDescription() {
+ return metric.getDescription();
+ }
+
+ @Override
+ public long getValue() {
+ return metric.getValue();
+ }
+
+ @Override
+ public void change(long delta) {
+ metric.change(delta);
+ }
+
+ @Override
+ public void increment() {
+ metric.increment();
+ }
+
+ @Override
+ public String getCurrentMetricState() {
+ return metric.getCurrentMetricState();
+ }
+
+ @Override
+ public void reset() {
+ metric.reset();
+ }
+
+ @Override
+ public String getPublishString() {
+ return getCurrentMetricState();
+ }
+
+ @Override
+ public CombinableMetric combine(CombinableMetric metric) {
+ checkArgument(this.getClass().equals(metric.getClass()));
+ this.metric.change(metric.getValue());
+ return this;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/0f6595c0/phoenix-core/src/main/java/org/apache/phoenix/monitoring/Counter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/Counter.java b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/Counter.java
deleted file mode 100644
index 141294d..0000000
--- a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/Counter.java
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.monitoring;
-
-import java.util.concurrent.atomic.AtomicLong;
-
-import javax.annotation.concurrent.ThreadSafe;
-
-/**
- * Incrementing only counter that keeps track of the
- * number of occurrences of something.
- *
- */
-@ThreadSafe
-class Counter implements Metric {
-
- private final AtomicLong counter;
- private final String name;
- private final String description;
-
- public Counter(String name, String description) {
- this.name = name;
- this.description = description;
- this.counter = new AtomicLong(0);
- }
-
- public long increment() {
- return counter.incrementAndGet();
- }
-
- public long getCurrentCount() {
- return counter.get();
- }
-
- @Override
- public String getName() {
- return name;
- }
-
- @Override
- public String getDescription() {
- return description;
- }
-
- @Override
- public void reset() {
- counter.set(0);
- }
-
- @Override
- public String toString() {
- return "Name: " + name + ", Current count: " + counter.get();
- }
-
- @Override
- public String getCurrentMetricState() {
- return toString();
- }
-
- @Override
- public long getNumberOfSamples() {
- return getCurrentCount();
- }
-
- @Override
- public long getTotalSum() {
- return getCurrentCount();
- }
-
-}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/0f6595c0/phoenix-core/src/main/java/org/apache/phoenix/monitoring/GlobalClientMetrics.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/GlobalClientMetrics.java b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/GlobalClientMetrics.java
new file mode 100644
index 0000000..a8f3bb4
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/GlobalClientMetrics.java
@@ -0,0 +1,117 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.monitoring;
+
+import static org.apache.phoenix.monitoring.MetricType.QUERY_FAILED_COUNTER;
+import static org.apache.phoenix.monitoring.MetricType.MEMORY_CHUNK_BYTES;
+import static org.apache.phoenix.monitoring.MetricType.MEMORY_WAIT_TIME;
+import static org.apache.phoenix.monitoring.MetricType.MUTATION_BATCH_SIZE;
+import static org.apache.phoenix.monitoring.MetricType.MUTATION_BYTES;
+import static org.apache.phoenix.monitoring.MetricType.MUTATION_COMMIT_TIME;
+import static org.apache.phoenix.monitoring.MetricType.MUTATION_SQL_COUNTER;
+import static org.apache.phoenix.monitoring.MetricType.NUM_PARALLEL_SCANS;
+import static org.apache.phoenix.monitoring.MetricType.QUERY_TIME;
+import static org.apache.phoenix.monitoring.MetricType.QUERY_TIMEOUT_COUNTER;
+import static org.apache.phoenix.monitoring.MetricType.TASK_REJECTED_COUNTER;
+import static org.apache.phoenix.monitoring.MetricType.SCAN_BYTES;
+import static org.apache.phoenix.monitoring.MetricType.SELECT_SQL_COUNTER;
+import static org.apache.phoenix.monitoring.MetricType.SPOOL_FILE_COUNTER;
+import static org.apache.phoenix.monitoring.MetricType.SPOOL_FILE_SIZE;
+import static org.apache.phoenix.monitoring.MetricType.TASK_END_TO_END_TIME;
+import static org.apache.phoenix.monitoring.MetricType.TASK_EXECUTED_COUNTER;
+import static org.apache.phoenix.monitoring.MetricType.TASK_EXECUTION_TIME;
+import static org.apache.phoenix.monitoring.MetricType.TASK_QUEUE_WAIT_TIME;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+
+import org.apache.phoenix.query.QueryServicesOptions;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * Central place where we keep track of all the global client phoenix metrics. These metrics are different from
+ * {@link ReadMetricQueue} or {@link MutationMetricQueue} as they are collected at the client JVM level as opposed
+ * to the above two which are collected for every phoenix request.
+ */
+
+public enum GlobalClientMetrics {
+
+ GLOBAL_MUTATION_BATCH_SIZE(MUTATION_BATCH_SIZE),
+ GLOBAL_MUTATION_BYTES(MUTATION_BYTES),
+ GLOBAL_MUTATION_COMMIT_TIME(MUTATION_COMMIT_TIME),
+ GLOBAL_QUERY_TIME(QUERY_TIME),
+ GLOBAL_NUM_PARALLEL_SCANS(NUM_PARALLEL_SCANS),
+ GLOBAL_SCAN_BYTES(SCAN_BYTES),
+ GLOBAL_SPOOL_FILE_SIZE(SPOOL_FILE_SIZE),
+ GLOBAL_MEMORY_CHUNK_BYTES(MEMORY_CHUNK_BYTES),
+ GLOBAL_MEMORY_WAIT_TIME(MEMORY_WAIT_TIME),
+ GLOBAL_TASK_QUEUE_WAIT_TIME(TASK_QUEUE_WAIT_TIME),
+ GLOBAL_TASK_END_TO_END_TIME(TASK_END_TO_END_TIME),
+ GLOBAL_TASK_EXECUTION_TIME(TASK_EXECUTION_TIME),
+ GLOBAL_MUTATION_SQL_COUNTER(MUTATION_SQL_COUNTER),
+ GLOBAL_SELECT_SQL_COUNTER(SELECT_SQL_COUNTER),
+ GLOBAL_TASK_EXECUTED_COUNTER(TASK_EXECUTED_COUNTER),
+ GLOBAL_REJECTED_TASK_COUNTER(TASK_REJECTED_COUNTER),
+ GLOBAL_QUERY_TIMEOUT_COUNTER(QUERY_TIMEOUT_COUNTER),
+ GLOBAL_FAILED_QUERY_COUNTER(QUERY_FAILED_COUNTER),
+ GLOBAL_SPOOL_FILE_COUNTER(SPOOL_FILE_COUNTER);
+
+ private static final boolean isGlobalMetricsEnabled = QueryServicesOptions.withDefaults().isGlobalMetricsEnabled();
+ private GlobalMetric metric;
+
+ public void update(long value) {
+ if (isGlobalMetricsEnabled) {
+ metric.change(value);
+ }
+ }
+
+ @VisibleForTesting
+ public GlobalMetric getMetric() {
+ return metric;
+ }
+
+ @Override
+ public String toString() {
+ return metric.toString();
+ }
+
+ private GlobalClientMetrics(MetricType metricType) {
+ this.metric = new GlobalMetricImpl(metricType);
+ }
+
+ public void increment() {
+ if (isGlobalMetricsEnabled) {
+ metric.increment();
+ }
+ }
+
+ public static Collection<GlobalMetric> getMetrics() {
+ List<GlobalMetric> metrics = new ArrayList<>();
+ for (GlobalClientMetrics m : GlobalClientMetrics.values()) {
+ metrics.add(m.metric);
+ }
+ return metrics;
+ }
+
+ public static boolean isMetricsEnabled() {
+ return isGlobalMetricsEnabled;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/0f6595c0/phoenix-core/src/main/java/org/apache/phoenix/monitoring/GlobalMetric.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/GlobalMetric.java b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/GlobalMetric.java
new file mode 100644
index 0000000..f3b562f
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/GlobalMetric.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.monitoring;
+
+/**
+ * Class that exposes the various internal phoenix metrics collected
+ * at the JVM level. Because metrics are dynamic in nature, it is not guaranteed that the
+ * state exposed will always be in sync with each other. One should use
+ * these metrics primarily for monitoring and debugging purposes.
+ */
+public interface GlobalMetric extends Metric {
+
+ /**
+ * @return Number of samples collected since the last {@link #reset()} call.
+ */
+ public long getNumberOfSamples();
+
+ /**
+ * @return Sum of the values of the metric sampled since the last {@link #reset()} call.
+ */
+ public long getTotalSum();
+}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/0f6595c0/phoenix-core/src/main/java/org/apache/phoenix/monitoring/GlobalMetricImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/GlobalMetricImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/GlobalMetricImpl.java
new file mode 100644
index 0000000..26a16e1
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/GlobalMetricImpl.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE
+ * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by
+ * applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language
+ * governing permissions and limitations under the License.
+ */
+package org.apache.phoenix.monitoring;
+
+import java.util.concurrent.atomic.AtomicLong;
+
+public class GlobalMetricImpl implements GlobalMetric {
+
+ private AtomicLong numberOfSamples = new AtomicLong(0);
+ private Metric metric;
+
+ public GlobalMetricImpl(MetricType type) {
+ this.metric = new AtomicMetric(type);
+ }
+
+ /**
+ * Reset the internal state. Typically called after metric information has been collected and a new phase of
+ * collection is being requested for the next interval.
+ */
+ @Override
+ public void reset() {
+ metric.reset();
+ numberOfSamples.set(0);
+ }
+
+ @Override
+ public long getNumberOfSamples() {
+ return numberOfSamples.get();
+ }
+
+ @Override
+ public long getTotalSum() {
+ return metric.getValue();
+ }
+
+ @Override
+ public void change(long delta) {
+ metric.change(delta);
+ numberOfSamples.incrementAndGet();
+ }
+
+ @Override
+ public void increment() {
+ metric.increment();
+ numberOfSamples.incrementAndGet();
+ }
+
+ @Override
+ public String getName() {
+ return metric.getName();
+ }
+
+ @Override
+ public String getDescription() {
+ return metric.getDescription();
+ }
+
+ @Override
+ public long getValue() {
+ return metric.getValue();
+ }
+
+ @Override
+ public String getCurrentMetricState() {
+ return metric.getCurrentMetricState() + ", Number of samples: " + numberOfSamples.get();
+ }
+}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/0f6595c0/phoenix-core/src/main/java/org/apache/phoenix/monitoring/MemoryMetricsHolder.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/MemoryMetricsHolder.java b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/MemoryMetricsHolder.java
new file mode 100644
index 0000000..0e82ce4
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/MemoryMetricsHolder.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.monitoring;
+
+import static org.apache.phoenix.monitoring.MetricType.MEMORY_CHUNK_BYTES;
+import static org.apache.phoenix.monitoring.MetricType.MEMORY_WAIT_TIME;
+
+/**
+ * Class that encapsulates the metrics regarding memory resources needed for servicing a request.
+ */
+public class MemoryMetricsHolder {
+ private final CombinableMetric memoryChunkSizeMetric;
+ private final CombinableMetric memoryWaitTimeMetric;
+ public static final MemoryMetricsHolder NO_OP_INSTANCE = new MemoryMetricsHolder(new ReadMetricQueue(false), null);
+
+ public MemoryMetricsHolder(ReadMetricQueue readMetrics, String tableName) {
+ this.memoryChunkSizeMetric = readMetrics.allotMetric(MEMORY_CHUNK_BYTES, tableName);
+ this.memoryWaitTimeMetric = readMetrics.allotMetric(MEMORY_WAIT_TIME, tableName);
+ }
+
+ public CombinableMetric getMemoryChunkSizeMetric() {
+ return memoryChunkSizeMetric;
+ }
+
+ public CombinableMetric getMemoryWaitTimeMetric() {
+ return memoryWaitTimeMetric;
+ }
+}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/0f6595c0/phoenix-core/src/main/java/org/apache/phoenix/monitoring/Metric.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/Metric.java b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/Metric.java
index aef792c..1ad1c7a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/Metric.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/Metric.java
@@ -18,47 +18,46 @@
package org.apache.phoenix.monitoring;
/**
- * Interface that exposes the various internal phoenix metrics collected.
- * Because metrics are dynamic in nature, it is not guaranteed that the
- * state exposed will always be in sync with each other. One should use
- * these metrics primarily for monitoring and debugging purposes.
+ * Interface that represents phoenix-internal metric.
*/
public interface Metric {
-
/**
- *
* @return Name of the metric
*/
public String getName();
-
+
/**
- *
* @return Description of the metric
*/
public String getDescription();
-
+
/**
- * Reset the internal state. Typically called after
- * metric information has been collected and a new
- * phase of collection is being requested for the next
- * interval.
+ * @return Current value of the metric
*/
- public void reset();
-
+ public long getValue();
+
/**
+ * Change the metric by the specified amount
*
- * @return String that represents the current state of the metric.
- * Typically used to log the current state.
+ * @param delta
+ * amount by which the metric value should be changed
*/
- public String getCurrentMetricState();
-
+ public void change(long delta);
+
+ /**
+ * Change the value of metric by 1
+ */
+ public void increment();
+
/**
- * @return Number of samples collected since the last {@link #reset()} call.
+ * @return String that represents the current state of the metric. Typically used for logging or reporting purposes.
*/
- public long getNumberOfSamples();
+ public String getCurrentMetricState();
/**
- * @return Sum of the values of the metric sampled since the last {@link #reset()} call.
+ * Reset the metric
*/
- public long getTotalSum();
+ public void reset();
+
}
+
http://git-wip-us.apache.org/repos/asf/phoenix/blob/0f6595c0/phoenix-core/src/main/java/org/apache/phoenix/monitoring/MetricType.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/MetricType.java b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/MetricType.java
new file mode 100644
index 0000000..a0c2a4a
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/MetricType.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.monitoring;
+
+public enum MetricType {
+
+ MUTATION_BATCH_SIZE("Batch sizes of mutations"),
+ MUTATION_BYTES("Size of mutations in bytes"),
+ MUTATION_COMMIT_TIME("Time it took to commit mutations"),
+ QUERY_TIME("Query times"),
+ NUM_PARALLEL_SCANS("Number of scans that were executed in parallel"),
+ SCAN_BYTES("Number of bytes read by scans"),
+ MEMORY_CHUNK_BYTES("Number of bytes allocated by the memory manager"),
+ MEMORY_WAIT_TIME("Number of milliseconds threads needed to wait for memory to be allocated through memory manager"),
+ MUTATION_SQL_COUNTER("Counter for number of mutation sql statements"),
+ SELECT_SQL_COUNTER("Counter for number of sql queries"),
+ TASK_QUEUE_WAIT_TIME("Time in milliseconds tasks had to wait in the queue of the thread pool executor"),
+ TASK_END_TO_END_TIME("Time in milliseconds spent by tasks from creation to completion"),
+ TASK_EXECUTION_TIME("Time in milliseconds tasks took to execute"),
+ TASK_EXECUTED_COUNTER("Counter for number of tasks submitted to the thread pool executor"),
+ TASK_REJECTED_COUNTER("Counter for number of tasks that were rejected by the thread pool executor"),
+ QUERY_TIMEOUT_COUNTER("Number of times query timed out"),
+ QUERY_FAILED_COUNTER("Number of times query failed"),
+ SPOOL_FILE_SIZE("Size of spool files created in bytes"),
+ SPOOL_FILE_COUNTER("Number of spool files created"),
+ CACHE_REFRESH_SPLITS_COUNTER("Number of times cache was refreshed because of splits"),
+ WALL_CLOCK_TIME_MS("Wall clock time elapsed for the overall query execution"),
+ RESULT_SET_TIME_MS("Wall clock time elapsed for reading all records using resultSet.next()");
+
+ private final String description;
+
+ private MetricType(String description) {
+ this.description = description;
+ }
+
+ public String description() {
+ return description;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/0f6595c0/phoenix-core/src/main/java/org/apache/phoenix/monitoring/MetricsStopWatch.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/MetricsStopWatch.java b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/MetricsStopWatch.java
new file mode 100644
index 0000000..bffb9ad
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/MetricsStopWatch.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.monitoring;
+
+import com.google.common.base.Stopwatch;
+
+/**
+ *
+ * Stop watch that is cognizant of the fact whether or not metrics is enabled.
+ * If metrics isn't enabled it doesn't do anything. Otherwise, it delegates
+ * calls to a {@code Stopwatch}.
+ *
+ */
+final class MetricsStopWatch {
+
+ private final boolean isMetricsEnabled;
+ private final Stopwatch stopwatch;
+
+ MetricsStopWatch(boolean isMetricsEnabled) {
+ this.isMetricsEnabled = isMetricsEnabled;
+ this.stopwatch = new Stopwatch();
+ }
+
+ void start() {
+ if (isMetricsEnabled) {
+ stopwatch.start();
+ }
+ }
+
+ void stop() {
+ if (isMetricsEnabled) {
+ if (stopwatch.isRunning()) {
+ stopwatch.stop();
+ }
+ }
+ }
+
+ long getElapsedTimeInMs() {
+ if (isMetricsEnabled) {
+ return stopwatch.elapsedMillis();
+ }
+ return 0;
+ }
+}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/0f6595c0/phoenix-core/src/main/java/org/apache/phoenix/monitoring/MutationMetricQueue.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/MutationMetricQueue.java b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/MutationMetricQueue.java
new file mode 100644
index 0000000..e90da46
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/MutationMetricQueue.java
@@ -0,0 +1,131 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.monitoring;
+
+import static org.apache.phoenix.monitoring.MetricType.MUTATION_BATCH_SIZE;
+import static org.apache.phoenix.monitoring.MetricType.MUTATION_BYTES;
+import static org.apache.phoenix.monitoring.MetricType.MUTATION_COMMIT_TIME;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+
+/**
+ * Queue that tracks various writes/mutations related phoenix request metrics.
+ */
+public class MutationMetricQueue {
+
+ // Map of table name -> mutation metric
+ private Map<String, MutationMetric> tableMutationMetric = new HashMap<>();
+
+ public void addMetricsForTable(String tableName, MutationMetric metric) {
+ MutationMetric tableMetric = tableMutationMetric.get(tableName);
+ if (tableMetric == null) {
+ tableMutationMetric.put(tableName, metric);
+ } else {
+ tableMetric.combineMetric(metric);
+ }
+ }
+
+ public void combineMetricQueues(MutationMetricQueue other) {
+ Map<String, MutationMetric> tableMetricMap = other.tableMutationMetric;
+ for (Entry<String, MutationMetric> entry : tableMetricMap.entrySet()) {
+ addMetricsForTable(entry.getKey(), entry.getValue());
+ }
+ }
+
+ /**
+ * Publish the metrics to wherever you want them published. The internal state is cleared out after every publish.
+ * @return map of table name -> list of pair of (metric name, metric value)
+ */
+ public Map<String, Map<String, Long>> aggregate() {
+ Map<String, Map<String, Long>> publishedMetrics = new HashMap<>();
+ for (Entry<String, MutationMetric> entry : tableMutationMetric.entrySet()) {
+ String tableName = entry.getKey();
+ MutationMetric metric = entry.getValue();
+ Map<String, Long> publishedMetricsForTable = publishedMetrics.get(tableName);
+ if (publishedMetricsForTable == null) {
+ publishedMetricsForTable = new HashMap<>();
+ publishedMetrics.put(tableName, publishedMetricsForTable);
+ }
+ publishedMetricsForTable.put(metric.getNumMutations().getName(), metric.getNumMutations().getValue());
+ publishedMetricsForTable.put(metric.getMutationsSizeBytes().getName(), metric.getMutationsSizeBytes().getValue());
+ publishedMetricsForTable.put(metric.getCommitTimeForMutations().getName(), metric.getCommitTimeForMutations().getValue());
+ }
+ return publishedMetrics;
+ }
+
+ public void clearMetrics() {
+ tableMutationMetric.clear(); // help gc
+ }
+
+ /**
+ * Class that holds together the various metrics associated with mutations.
+ */
+ public static class MutationMetric {
+ private final CombinableMetric numMutations = new CombinableMetricImpl(MUTATION_BATCH_SIZE);
+ private final CombinableMetric mutationsSizeBytes = new CombinableMetricImpl(MUTATION_BYTES);
+ private final CombinableMetric totalCommitTimeForMutations = new CombinableMetricImpl(MUTATION_COMMIT_TIME);
+
+ public MutationMetric(long numMutations, long mutationsSizeBytes, long commitTimeForMutations) {
+ this.numMutations.change(numMutations);
+ this.mutationsSizeBytes.change(mutationsSizeBytes);
+ this.totalCommitTimeForMutations.change(commitTimeForMutations);
+ }
+
+ public CombinableMetric getCommitTimeForMutations() {
+ return totalCommitTimeForMutations;
+ }
+
+ public CombinableMetric getNumMutations() {
+ return numMutations;
+ }
+
+ public CombinableMetric getMutationsSizeBytes() {
+ return mutationsSizeBytes;
+ }
+
+ public void combineMetric(MutationMetric other) {
+ this.numMutations.combine(other.numMutations);
+ this.mutationsSizeBytes.combine(other.mutationsSizeBytes);
+ this.totalCommitTimeForMutations.combine(other.totalCommitTimeForMutations);
+ }
+
+ }
+
+ /**
+ * Class to represent a no-op mutation metric. Used in places where request level metric tracking for mutations is not
+ * needed or desired.
+ */
+ public static class NoOpMutationMetricsQueue extends MutationMetricQueue {
+
+ public static final NoOpMutationMetricsQueue NO_OP_MUTATION_METRICS_QUEUE = new NoOpMutationMetricsQueue();
+
+ private NoOpMutationMetricsQueue() {}
+
+ @Override
+ public void addMetricsForTable(String tableName, MutationMetric metric) {}
+
+ @Override
+ public Map<String, Map<String, Long>> aggregate() { return Collections.emptyMap(); }
+
+
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/0f6595c0/phoenix-core/src/main/java/org/apache/phoenix/monitoring/NonAtomicMetric.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/NonAtomicMetric.java b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/NonAtomicMetric.java
new file mode 100644
index 0000000..2d92116
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/NonAtomicMetric.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.monitoring;
+
+/**
+ * Version of {@link Metric} that can be used when the metric isn't getting concurrently modified/accessed by multiple
+ * threads and the memory consistency effects of happen-before can be established. For example - phoenix client side
+ * metrics are modified/accessed by only one thread at a time. Further, the actions of threads in the phoenix client
+ * thread pool happen-before the actions of the thread that performs the aggregation of metrics. This makes
+ * {@link NonAtomicMetric} a good fit for storing Phoenix's client side request level metrics.
+ */
+class NonAtomicMetric implements Metric {
+
+ private final MetricType type;
+ private long value;
+
+ public NonAtomicMetric(MetricType type) {
+ this.type = type;
+ }
+
+ @Override
+ public String getName() {
+ return type.name();
+ }
+
+ @Override
+ public String getDescription() {
+ return type.description();
+ }
+
+ @Override
+ public long getValue() {
+ return value;
+ }
+
+ @Override
+ public void change(long delta) {
+ value += delta;
+ }
+
+ @Override
+ public void increment() {
+ value++;
+ }
+
+ @Override
+ public String getCurrentMetricState() {
+ return getName() + ": " + value;
+ }
+
+ @Override
+ public void reset() {
+ value = 0;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/0f6595c0/phoenix-core/src/main/java/org/apache/phoenix/monitoring/OverAllQueryMetrics.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/OverAllQueryMetrics.java b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/OverAllQueryMetrics.java
new file mode 100644
index 0000000..1f71542
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/OverAllQueryMetrics.java
@@ -0,0 +1,121 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.monitoring;
+
+import static org.apache.phoenix.monitoring.MetricType.CACHE_REFRESH_SPLITS_COUNTER;
+import static org.apache.phoenix.monitoring.MetricType.NUM_PARALLEL_SCANS;
+import static org.apache.phoenix.monitoring.MetricType.QUERY_FAILED_COUNTER;
+import static org.apache.phoenix.monitoring.MetricType.QUERY_TIMEOUT_COUNTER;
+import static org.apache.phoenix.monitoring.MetricType.RESULT_SET_TIME_MS;
+import static org.apache.phoenix.monitoring.MetricType.WALL_CLOCK_TIME_MS;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.phoenix.monitoring.CombinableMetric.NoOpRequestMetric;
+
+/**
+ * Class that represents the overall metrics associated with a query being executed by the phoenix.
+ */
+public class OverAllQueryMetrics {
+ private final MetricsStopWatch queryWatch;
+ private final MetricsStopWatch resultSetWatch;
+ private final CombinableMetric numParallelScans;
+ private final CombinableMetric wallClockTimeMS;
+ private final CombinableMetric resultSetTimeMS;
+ private final CombinableMetric queryTimedOut;
+ private final CombinableMetric queryFailed;
+ private final CombinableMetric cacheRefreshedDueToSplits;
+
+ public OverAllQueryMetrics(boolean isMetricsEnabled) {
+ queryWatch = new MetricsStopWatch(isMetricsEnabled);
+ resultSetWatch = new MetricsStopWatch(isMetricsEnabled);
+ numParallelScans = isMetricsEnabled ? new CombinableMetricImpl(NUM_PARALLEL_SCANS) : NoOpRequestMetric.INSTANCE;
+ wallClockTimeMS = isMetricsEnabled ? new CombinableMetricImpl(WALL_CLOCK_TIME_MS) : NoOpRequestMetric.INSTANCE;
+ resultSetTimeMS = isMetricsEnabled ? new CombinableMetricImpl(RESULT_SET_TIME_MS) : NoOpRequestMetric.INSTANCE;
+ queryTimedOut = isMetricsEnabled ? new CombinableMetricImpl(QUERY_TIMEOUT_COUNTER) : NoOpRequestMetric.INSTANCE;
+ queryFailed = isMetricsEnabled ? new CombinableMetricImpl(QUERY_FAILED_COUNTER) : NoOpRequestMetric.INSTANCE;
+ cacheRefreshedDueToSplits = isMetricsEnabled ? new CombinableMetricImpl(CACHE_REFRESH_SPLITS_COUNTER)
+ : NoOpRequestMetric.INSTANCE;
+ }
+
+ public void updateNumParallelScans(long numParallelScans) {
+ this.numParallelScans.change(numParallelScans);
+ }
+
+ public void queryTimedOut() {
+ queryTimedOut.increment();
+ }
+
+ public void queryFailed() {
+ queryFailed.increment();
+ }
+
+ public void cacheRefreshedDueToSplits() {
+ cacheRefreshedDueToSplits.increment();
+ }
+
+ public void startQuery() {
+ queryWatch.start();
+ }
+
+ public void endQuery() {
+ queryWatch.stop();
+ wallClockTimeMS.change(queryWatch.getElapsedTimeInMs());
+ }
+
+ public void startResultSetWatch() {
+ resultSetWatch.start();
+ }
+
+ public void stopResultSetWatch() {
+ resultSetWatch.stop();
+ resultSetTimeMS.change(resultSetWatch.getElapsedTimeInMs());
+ }
+
+ public Map<String, Long> publish() {
+ Map<String, Long> metricsForPublish = new HashMap<>();
+ metricsForPublish.put(numParallelScans.getName(), numParallelScans.getValue());
+ metricsForPublish.put(wallClockTimeMS.getName(), wallClockTimeMS.getValue());
+ metricsForPublish.put(resultSetTimeMS.getName(), resultSetTimeMS.getValue());
+ metricsForPublish.put(queryTimedOut.getName(), queryTimedOut.getValue());
+ metricsForPublish.put(queryFailed.getName(), queryFailed.getValue());
+ metricsForPublish.put(cacheRefreshedDueToSplits.getName(), cacheRefreshedDueToSplits.getValue());
+ return metricsForPublish;
+ }
+
+ public void reset() {
+ numParallelScans.reset();
+ wallClockTimeMS.reset();
+ resultSetTimeMS.reset();
+ queryTimedOut.reset();
+ queryFailed.reset();
+ cacheRefreshedDueToSplits.reset();
+ queryWatch.stop();
+ resultSetWatch.stop();
+ }
+
+ public OverAllQueryMetrics combine(OverAllQueryMetrics metric) {
+ cacheRefreshedDueToSplits.combine(metric.cacheRefreshedDueToSplits);
+ queryFailed.combine(metric.queryFailed);
+ queryTimedOut.combine(metric.queryTimedOut);
+ numParallelScans.combine(metric.numParallelScans);
+ return this;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/0f6595c0/phoenix-core/src/main/java/org/apache/phoenix/monitoring/PhoenixMetrics.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/PhoenixMetrics.java b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/PhoenixMetrics.java
deleted file mode 100644
index 28e2f2e..0000000
--- a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/PhoenixMetrics.java
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.monitoring;
-
-/**
- * Central place where we keep track of all the internal
- * phoenix metrics that we track.
- *
- */
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-
-import org.apache.phoenix.query.QueryServicesOptions;
-
-public class PhoenixMetrics {
- private static final boolean isMetricsEnabled = QueryServicesOptions.withDefaults().isMetricsEnabled();
-
- public static boolean isMetricsEnabled() {
- return isMetricsEnabled;
- }
-
- public enum SizeMetric {
- MUTATION_BATCH_SIZE("CumulativeBatchSizesOfMutations", "Cumulative batch sizes of mutations"),
- MUTATION_BYTES("CumulativeMutationSize", "Cumulative size of mutations in bytes"),
- MUTATION_COMMIT_TIME("CumulativeMutationTime", "Cumulative time it took to send mutations"),
- QUERY_TIME("QueryTime", "Cumulative query times"),
- PARALLEL_SCANS("CumulativeNumberOfParallelScans", "Cumulative number of scans executed that were executed in parallel"),
- SCAN_BYTES("CumulativeScanBytesSize", "Cumulative number of bytes read by scans"),
- SPOOL_FILE_SIZE("CumulativeSpoolFilesSize", "Cumulative size of spool files created in bytes"),
- MEMORY_MANAGER_BYTES("CumulativeBytesAllocated", "Cumulative number of bytes allocated by the memory manager"),
- MEMORY_WAIT_TIME("CumulativeMemoryWaitTime", "Cumulative number of milliseconds threads needed to wait for memory to be allocated through memory manager"),
- TASK_QUEUE_WAIT_TIME("CumulativeTaskQueueWaitTime", "Cumulative time in milliseconds tasks had to wait in the queue of the thread pool executor"),
- TASK_END_TO_END_TIME("CumulativeTaskEndToEndTime", "Cumulative time in milliseconds spent by tasks from creation to completion"),
- TASK_EXECUTION_TIME("CumulativeTaskExecutionTime", "Cumulative time in milliseconds tasks took to execute");
-
- private final SizeStatistic metric;
-
- private SizeMetric(String metricName, String metricDescription) {
- metric = new SizeStatistic(metricName, metricDescription);
- }
-
- public void update(long value) {
- if (isMetricsEnabled) {
- metric.add(value);
- }
- }
-
- // exposed for testing.
- public Metric getMetric() {
- return metric;
- }
-
- @Override
- public String toString() {
- return metric.toString();
- }
- }
-
- public enum CountMetric {
- MUTATION_COUNT("NumMutationCounter", "Counter for number of mutation statements"),
- QUERY_COUNT("NumQueryCounter", "Counter for number of queries"),
- TASK_COUNT("NumberOfTasksCounter", "Counter for number of tasks submitted to the thread pool executor"),
- REJECTED_TASK_COUNT("RejectedTasksCounter", "Counter for number of tasks that were rejected by the thread pool executor"),
- QUERY_TIMEOUT("QueryTimeoutCounter", "Number of times query timed out"),
- FAILED_QUERY("QueryFailureCounter", "Number of times query failed"),
- NUM_SPOOL_FILE("NumSpoolFilesCounter", "Number of spool files created");
-
- private final Counter metric;
-
- private CountMetric(String metricName, String metricDescription) {
- metric = new Counter(metricName, metricDescription);
- }
-
- public void increment() {
- if (isMetricsEnabled) {
- metric.increment();
- }
- }
-
- // exposed for testing.
- public Metric getMetric() {
- return metric;
- }
-
- @Override
- public String toString() {
- return metric.toString();
- }
- }
-
- public static Collection<Metric> getMetrics() {
- List<Metric> metrics = new ArrayList<>();
- for (SizeMetric s : SizeMetric.values()) {
- metrics.add(s.metric);
- }
- for (CountMetric s : CountMetric.values()) {
- metrics.add(s.metric);
- }
- return metrics;
- }
-
-}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/phoenix/blob/0f6595c0/phoenix-core/src/main/java/org/apache/phoenix/monitoring/ReadMetricQueue.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/ReadMetricQueue.java b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/ReadMetricQueue.java
new file mode 100644
index 0000000..e6c6be2
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/ReadMetricQueue.java
@@ -0,0 +1,180 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.monitoring;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Queue;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.LinkedBlockingQueue;
+
+import javax.annotation.Nonnull;
+
+import org.apache.phoenix.monitoring.CombinableMetric.NoOpRequestMetric;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * Queue of all metrics associated with performing reads from the cluster.
+ */
+public class ReadMetricQueue {
+
+ private static final int MAX_QUEUE_SIZE = 20000; // TODO: should this be configurable?
+
+ private final ConcurrentMap<MetricKey, Queue<CombinableMetric>> metricsMap = new ConcurrentHashMap<>();
+
+ private final boolean isRequestMetricsEnabled;
+
+ public ReadMetricQueue(boolean isRequestMetricsEnabled) {
+ this.isRequestMetricsEnabled = isRequestMetricsEnabled;
+ }
+
+ public CombinableMetric allotMetric(MetricType type, String tableName) {
+ if (!isRequestMetricsEnabled) { return NoOpRequestMetric.INSTANCE; }
+ MetricKey key = new MetricKey(type, tableName);
+ Queue<CombinableMetric> q = getMetricQueue(key);
+ CombinableMetric metric = getMetric(type);
+ q.offer(metric);
+ return metric;
+ }
+
+ @VisibleForTesting
+ public CombinableMetric getMetric(MetricType type) {
+ CombinableMetric metric = new CombinableMetricImpl(type);
+ return metric;
+ }
+
+ /**
+ * @return map of table name -> list of pair of (metric name, metric value)
+ */
+ public Map<String, Map<String, Long>> aggregate() {
+ Map<String, Map<String, Long>> publishedMetrics = new HashMap<>();
+ for (Entry<MetricKey, Queue<CombinableMetric>> entry : metricsMap.entrySet()) {
+ String tableNameToPublish = entry.getKey().tableName;
+ Collection<CombinableMetric> metrics = entry.getValue();
+ if (metrics.size() > 0) {
+ CombinableMetric m = combine(metrics);
+ Map<String, Long> map = publishedMetrics.get(tableNameToPublish);
+ if (map == null) {
+ map = new HashMap<>();
+ publishedMetrics.put(tableNameToPublish, map);
+ }
+ map.put(m.getName(), m.getValue());
+ }
+ }
+ return publishedMetrics;
+ }
+
+ public void clearMetrics() {
+ metricsMap.clear(); // help gc
+ }
+
+ private static CombinableMetric combine(Collection<CombinableMetric> metrics) {
+ int size = metrics.size();
+ if (size == 0) { throw new IllegalArgumentException("Metrics collection needs to have at least one element"); }
+ Iterator<CombinableMetric> itr = metrics.iterator();
+ CombinableMetric combinedMetric = itr.next();
+ while (itr.hasNext()) {
+ combinedMetric = combinedMetric.combine(itr.next());
+ }
+ return combinedMetric;
+ }
+
+ /**
+ * Combine the metrics. This method should only be called in a single threaded manner when the two metric holders
+ * are not getting modified.
+ */
+ public ReadMetricQueue combineReadMetrics(ReadMetricQueue other) {
+ ConcurrentMap<MetricKey, Queue<CombinableMetric>> otherMetricsMap = other.metricsMap;
+ for (Entry<MetricKey, Queue<CombinableMetric>> entry : otherMetricsMap.entrySet()) {
+ MetricKey key = entry.getKey();
+ Queue<CombinableMetric> otherQueue = entry.getValue();
+ CombinableMetric combinedMetric = null;
+ // combine the metrics corresponding to this metric key before putting it in the queue.
+ for (CombinableMetric m : otherQueue) {
+ if (combinedMetric == null) {
+ combinedMetric = m;
+ } else {
+ combinedMetric.combine(m);
+ }
+ }
+ if (combinedMetric != null) {
+ Queue<CombinableMetric> thisQueue = getMetricQueue(key);
+ thisQueue.offer(combinedMetric);
+ }
+ }
+ return this;
+ }
+
+ /**
+ * Inner class whose instances are used as keys in the metrics map.
+ */
+ private static class MetricKey {
+ @Nonnull
+ private final MetricType type;
+
+ @Nonnull
+ private final String tableName;
+
+ MetricKey(MetricType type, String tableName) {
+ checkNotNull(type);
+ checkNotNull(tableName);
+ this.type = type;
+ this.tableName = tableName;
+ }
+
+ @Override
+ public int hashCode() {
+ final int prime = 31;
+ int result = 1;
+ result = prime * result + tableName.hashCode();
+ result = prime * result + type.hashCode();
+ return result;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) return true;
+ if (obj == null) return false;
+ if (getClass() != obj.getClass()) return false;
+ MetricKey other = (MetricKey)obj;
+ if (tableName.equals(other.tableName) && type == other.type) return true;
+ return false;
+ }
+
+ }
+
+ private Queue<CombinableMetric> getMetricQueue(MetricKey key) {
+ Queue<CombinableMetric> q = metricsMap.get(key);
+ if (q == null) {
+ q = new LinkedBlockingQueue<CombinableMetric>(MAX_QUEUE_SIZE);
+ Queue<CombinableMetric> curQ = metricsMap.putIfAbsent(key, q);
+ if (curQ != null) {
+ q = curQ;
+ }
+ }
+ return q;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/0f6595c0/phoenix-core/src/main/java/org/apache/phoenix/monitoring/SizeStatistic.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/SizeStatistic.java b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/SizeStatistic.java
deleted file mode 100644
index 9eca754..0000000
--- a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/SizeStatistic.java
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.monitoring;
-
-import java.util.concurrent.atomic.AtomicLong;
-
-/**
- *
- * Statistic that keeps track of the sum of long values that
- * could be used to represent a phoenix metric. For performance
- * reasons the internal state in this metric is not strictly covariant
- * and hence should only be used for monitoring and debugging purposes.
- */
-class SizeStatistic implements Metric {
-
- private final AtomicLong total = new AtomicLong(0);
- private final AtomicLong numSamples = new AtomicLong(0);
- private final String name;
- private final String description;
-
- public SizeStatistic(String name, String description) {
- this.name = name;
- this.description = description;
- }
-
- @Override
- public String getName() {
- return name;
- }
-
- @Override
- public String getDescription() {
- return description;
- }
-
- @Override
- public void reset() {
- total.set(0);
- numSamples.set(0);
- }
-
- @Override
- public String getCurrentMetricState() {
- return "Name:" + description + ", Total: " + total.get() + ", Number of samples: " + numSamples.get();
- }
-
- @Override
- public long getNumberOfSamples() {
- return numSamples.get();
- }
-
- @Override
- public long getTotalSum() {
- return total.get();
- }
-
- public long add(long value) {
- // there is a race condition here but what the heck.
- numSamples.incrementAndGet();
- return total.addAndGet(value);
- }
-
-}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/0f6595c0/phoenix-core/src/main/java/org/apache/phoenix/monitoring/SpoolingMetricsHolder.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/SpoolingMetricsHolder.java b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/SpoolingMetricsHolder.java
new file mode 100644
index 0000000..4373887
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/SpoolingMetricsHolder.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.monitoring;
+
+
+/**
+ * Class that encapsulates the various metrics associated with the spooling done by phoenix as part of servicing a
+ * request.
+ */
+public class SpoolingMetricsHolder {
+
+ private final CombinableMetric spoolFileSizeMetric;
+ private final CombinableMetric numSpoolFileMetric;
+ public static final SpoolingMetricsHolder NO_OP_INSTANCE = new SpoolingMetricsHolder(new ReadMetricQueue(false), "");
+
+ public SpoolingMetricsHolder(ReadMetricQueue readMetrics, String tableName) {
+ this.spoolFileSizeMetric = readMetrics.allotMetric(MetricType.SPOOL_FILE_SIZE, tableName);
+ this.numSpoolFileMetric = readMetrics.allotMetric(MetricType.SPOOL_FILE_COUNTER, tableName);
+ }
+
+ public CombinableMetric getSpoolFileSizeMetric() {
+ return spoolFileSizeMetric;
+ }
+
+ public CombinableMetric getNumSpoolFileMetric() {
+ return numSpoolFileMetric;
+ }
+}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/0f6595c0/phoenix-core/src/main/java/org/apache/phoenix/monitoring/TaskExecutionMetricsHolder.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/TaskExecutionMetricsHolder.java b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/TaskExecutionMetricsHolder.java
new file mode 100644
index 0000000..98ff57c
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/TaskExecutionMetricsHolder.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.monitoring;
+
+import static org.apache.phoenix.monitoring.MetricType.TASK_END_TO_END_TIME;
+import static org.apache.phoenix.monitoring.MetricType.TASK_EXECUTED_COUNTER;
+import static org.apache.phoenix.monitoring.MetricType.TASK_EXECUTION_TIME;
+import static org.apache.phoenix.monitoring.MetricType.TASK_QUEUE_WAIT_TIME;
+import static org.apache.phoenix.monitoring.MetricType.TASK_REJECTED_COUNTER;
+
+
+/**
+ * Class to encapsulate the various metrics associated with submitting and executing a task to the phoenix client
+ * thread pool.
+ */
+public class TaskExecutionMetricsHolder {
+
+ private final CombinableMetric taskQueueWaitTime;
+ private final CombinableMetric taskEndToEndTime;
+ private final CombinableMetric taskExecutionTime;
+ private final CombinableMetric numTasks;
+ private final CombinableMetric numRejectedTasks;
+ public static final TaskExecutionMetricsHolder NO_OP_INSTANCE = new TaskExecutionMetricsHolder(new ReadMetricQueue(false), "");
+
+ public TaskExecutionMetricsHolder(ReadMetricQueue readMetrics, String tableName) {
+ taskQueueWaitTime = readMetrics.allotMetric(TASK_QUEUE_WAIT_TIME, tableName);
+ taskEndToEndTime = readMetrics.allotMetric(TASK_END_TO_END_TIME, tableName);
+ taskExecutionTime = readMetrics.allotMetric(TASK_EXECUTION_TIME, tableName);
+ numTasks = readMetrics.allotMetric(TASK_EXECUTED_COUNTER, tableName);
+ numRejectedTasks = readMetrics.allotMetric(TASK_REJECTED_COUNTER, tableName);
+ }
+
+ public CombinableMetric getTaskQueueWaitTime() {
+ return taskQueueWaitTime;
+ }
+
+ public CombinableMetric getTaskEndToEndTime() {
+ return taskEndToEndTime;
+ }
+
+ public CombinableMetric getTaskExecutionTime() {
+ return taskExecutionTime;
+ }
+
+ public CombinableMetric getNumTasks() {
+ return numTasks;
+ }
+
+ public CombinableMetric getNumRejectedTasks() {
+ return numRejectedTasks;
+ }
+
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/phoenix/blob/0f6595c0/phoenix-core/src/main/java/org/apache/phoenix/query/BaseQueryServicesImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/BaseQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/BaseQueryServicesImpl.java
index 898a919..c16b86d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/BaseQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/BaseQueryServicesImpl.java
@@ -45,7 +45,7 @@ public abstract class BaseQueryServicesImpl implements QueryServices {
options.getKeepAliveMs(),
options.getThreadPoolSize(),
options.getQueueSize(),
- options.isMetricsEnabled());
+ options.isGlobalMetricsEnabled());
this.memoryManager = new GlobalMemoryManager(
Runtime.getRuntime().maxMemory() * options.getMaxMemoryPerc() / 100,
options.getMaxMemoryWaitMs());
http://git-wip-us.apache.org/repos/asf/phoenix/blob/0f6595c0/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
index 9183a70..62b080c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
@@ -159,7 +159,7 @@ public interface QueryServices extends SQLCloseable {
public static final String DELAY_FOR_SCHEMA_UPDATE_CHECK = "phoenix.schema.change.delay";
public static final String DEFAULT_KEEP_DELETED_CELLS_ATTRIB = "phoenix.table.default.keep.deleted.cells";
public static final String DEFAULT_STORE_NULLS_ATTRIB = "phoenix.table.default.store.nulls";
- public static final String METRICS_ENABLED = "phoenix.query.metrics.enabled";
+ public static final String GLOBAL_METRICS_ENABLED = "phoenix.query.global.metrics.enabled";
// rpc queue configs
public static final String INDEX_HANDLER_COUNT_ATTRIB = "phoenix.rpc.index.handler.count";
@@ -167,6 +167,7 @@ public interface QueryServices extends SQLCloseable {
public static final String FORCE_ROW_KEY_ORDER_ATTRIB = "phoenix.query.force.rowkeyorder";
public static final String ALLOW_USER_DEFINED_FUNCTIONS_ATTRIB = "phoenix.functions.allowUserDefinedFunctions";
+ public static final String COLLECT_REQUEST_LEVEL_METRICS = "phoenix.query.request.metrics.enabled";
/**
http://git-wip-us.apache.org/repos/asf/phoenix/blob/0f6595c0/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
index 972bf26..3efd79f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
@@ -18,15 +18,16 @@
package org.apache.phoenix.query;
import static org.apache.phoenix.query.QueryServices.ALLOW_ONLINE_TABLE_SCHEMA_UPDATE;
-import static org.apache.phoenix.query.QueryServices.ALLOW_USER_DEFINED_FUNCTIONS_ATTRIB;
import static org.apache.phoenix.query.QueryServices.CALL_QUEUE_PRODUCER_ATTRIB_NAME;
import static org.apache.phoenix.query.QueryServices.CALL_QUEUE_ROUND_ROBIN_ATTRIB;
+import static org.apache.phoenix.query.QueryServices.COLLECT_REQUEST_LEVEL_METRICS;
import static org.apache.phoenix.query.QueryServices.DATE_FORMAT_ATTRIB;
import static org.apache.phoenix.query.QueryServices.DATE_FORMAT_TIMEZONE_ATTRIB;
import static org.apache.phoenix.query.QueryServices.DELAY_FOR_SCHEMA_UPDATE_CHECK;
import static org.apache.phoenix.query.QueryServices.DROP_METADATA_ATTRIB;
import static org.apache.phoenix.query.QueryServices.EXPLAIN_CHUNK_COUNT_ATTRIB;
import static org.apache.phoenix.query.QueryServices.FORCE_ROW_KEY_ORDER_ATTRIB;
+import static org.apache.phoenix.query.QueryServices.GLOBAL_METRICS_ENABLED;
import static org.apache.phoenix.query.QueryServices.GROUPBY_MAX_CACHE_SIZE_ATTRIB;
import static org.apache.phoenix.query.QueryServices.GROUPBY_SPILLABLE_ATTRIB;
import static org.apache.phoenix.query.QueryServices.GROUPBY_SPILL_FILES_ATTRIB;
@@ -43,7 +44,6 @@ import static org.apache.phoenix.query.QueryServices.MAX_SERVER_CACHE_TIME_TO_LI
import static org.apache.phoenix.query.QueryServices.MAX_SERVER_METADATA_CACHE_SIZE_ATTRIB;
import static org.apache.phoenix.query.QueryServices.MAX_SPOOL_TO_DISK_BYTES_ATTRIB;
import static org.apache.phoenix.query.QueryServices.MAX_TENANT_MEMORY_PERC_ATTRIB;
-import static org.apache.phoenix.query.QueryServices.METRICS_ENABLED;
import static org.apache.phoenix.query.QueryServices.MIN_STATS_UPDATE_FREQ_MS_ATTRIB;
import static org.apache.phoenix.query.QueryServices.MUTATE_BATCH_SIZE_ATTRIB;
import static org.apache.phoenix.query.QueryServices.NUM_RETRIES_FOR_SCHEMA_UPDATE_CHECK;
@@ -188,7 +188,7 @@ public class QueryServicesOptions {
// TODO Change this to true as part of PHOENIX-1543
public static final boolean DEFAULT_AUTO_COMMIT = false;
- public static final boolean DEFAULT_IS_METRICS_ENABLED = true;
+ public static final boolean DEFAULT_IS_GLOBAL_METRICS_ENABLED = true;
private static final String DEFAULT_CLIENT_RPC_CONTROLLER_FACTORY = ClientRpcControllerFactory.class.getName();
@@ -197,6 +197,7 @@ public class QueryServicesOptions {
public static final boolean DEFAULT_USE_BYTE_BASED_REGEX = false;
public static final boolean DEFAULT_FORCE_ROW_KEY_ORDER = false;
public static final boolean DEFAULT_ALLOW_USER_DEFINED_FUNCTIONS = false;
+ public static final boolean DEFAULT_REQUEST_LEVEL_METRICS_ENABLED = false;
private final Configuration config;
@@ -249,10 +250,11 @@ public class QueryServicesOptions {
.setIfUnset(ALLOW_ONLINE_TABLE_SCHEMA_UPDATE, DEFAULT_ALLOW_ONLINE_TABLE_SCHEMA_UPDATE)
.setIfUnset(NUM_RETRIES_FOR_SCHEMA_UPDATE_CHECK, DEFAULT_RETRIES_FOR_SCHEMA_UPDATE_CHECK)
.setIfUnset(DELAY_FOR_SCHEMA_UPDATE_CHECK, DEFAULT_DELAY_FOR_SCHEMA_UPDATE_CHECK)
- .setIfUnset(METRICS_ENABLED, DEFAULT_IS_METRICS_ENABLED)
+ .setIfUnset(GLOBAL_METRICS_ENABLED, DEFAULT_IS_GLOBAL_METRICS_ENABLED)
.setIfUnset(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY, DEFAULT_CLIENT_RPC_CONTROLLER_FACTORY)
.setIfUnset(USE_BYTE_BASED_REGEX_ATTRIB, DEFAULT_USE_BYTE_BASED_REGEX)
- .setIfUnset(FORCE_ROW_KEY_ORDER_ATTRIB, DEFAULT_FORCE_ROW_KEY_ORDER);
+ .setIfUnset(FORCE_ROW_KEY_ORDER_ATTRIB, DEFAULT_FORCE_ROW_KEY_ORDER)
+ .setIfUnset(COLLECT_REQUEST_LEVEL_METRICS, DEFAULT_REQUEST_LEVEL_METRICS_ENABLED)
;
// HBase sets this to 1, so we reset it to something more appropriate.
// Hopefully HBase will change this, because we can't know if a user set
@@ -448,10 +450,10 @@ public class QueryServicesOptions {
return config.getInt(GROUPBY_SPILL_FILES_ATTRIB, DEFAULT_GROUPBY_SPILL_FILES);
}
- public boolean isMetricsEnabled() {
- return config.getBoolean(METRICS_ENABLED, DEFAULT_IS_METRICS_ENABLED);
+ public boolean isGlobalMetricsEnabled() {
+ return config.getBoolean(GLOBAL_METRICS_ENABLED, DEFAULT_IS_GLOBAL_METRICS_ENABLED);
}
-
+
public boolean isUseByteBasedRegex() {
return config.getBoolean(USE_BYTE_BASED_REGEX_ATTRIB, DEFAULT_USE_BYTE_BASED_REGEX);
}
@@ -530,11 +532,7 @@ public class QueryServicesOptions {
return this;
}
- public QueryServicesOptions setMetricsEnabled(boolean flag) {
- config.setBoolean(METRICS_ENABLED, flag);
- return this;
- }
-
+
public QueryServicesOptions setUseByteBasedRegex(boolean flag) {
config.setBoolean(USE_BYTE_BASED_REGEX_ATTRIB, flag);
return this;
@@ -544,5 +542,4 @@ public class QueryServicesOptions {
config.setBoolean(FORCE_ROW_KEY_ORDER_ATTRIB, forceRowKeyOrder);
return this;
}
-
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/0f6595c0/phoenix-core/src/main/java/org/apache/phoenix/trace/PhoenixMetricsSink.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/trace/PhoenixMetricsSink.java b/phoenix-core/src/main/java/org/apache/phoenix/trace/PhoenixMetricsSink.java
index 265fc78..159e0c9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/trace/PhoenixMetricsSink.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/trace/PhoenixMetricsSink.java
@@ -17,12 +17,23 @@
*/
package org.apache.phoenix.trace;
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Function;
-import com.google.common.base.Joiner;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Iterators;
-import org.apache.commons.configuration.Configuration;
+import static org.apache.phoenix.metrics.MetricInfo.ANNOTATION;
+import static org.apache.phoenix.metrics.MetricInfo.DESCRIPTION;
+import static org.apache.phoenix.metrics.MetricInfo.END;
+import static org.apache.phoenix.metrics.MetricInfo.HOSTNAME;
+import static org.apache.phoenix.metrics.MetricInfo.PARENT;
+import static org.apache.phoenix.metrics.MetricInfo.SPAN;
+import static org.apache.phoenix.metrics.MetricInfo.START;
+import static org.apache.phoenix.metrics.MetricInfo.TAG;
+import static org.apache.phoenix.metrics.MetricInfo.TRACE;
+
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Properties;
+
import org.apache.commons.configuration.SubsetConfiguration;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -31,20 +42,15 @@ import org.apache.hadoop.metrics2.AbstractMetric;
import org.apache.hadoop.metrics2.MetricsRecord;
import org.apache.hadoop.metrics2.MetricsSink;
import org.apache.hadoop.metrics2.MetricsTag;
-import org.apache.phoenix.metrics.*;
+import org.apache.phoenix.metrics.MetricInfo;
+import org.apache.phoenix.metrics.Metrics;
import org.apache.phoenix.query.QueryServices;
import org.apache.phoenix.query.QueryServicesOptions;
import org.apache.phoenix.trace.util.Tracing;
import org.apache.phoenix.util.QueryUtil;
-import javax.annotation.Nullable;
-import java.sql.Connection;
-import java.sql.PreparedStatement;
-import java.sql.SQLException;
-import java.util.*;
-
-import static org.apache.phoenix.metrics.MetricInfo.*;
-import static org.apache.phoenix.metrics.MetricInfo.HOSTNAME;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Joiner;
/**
* Write the metrics to a phoenix table.
http://git-wip-us.apache.org/repos/asf/phoenix/blob/0f6595c0/phoenix-core/src/main/java/org/apache/phoenix/util/JDBCUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/JDBCUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/JDBCUtil.java
index ddd9753..aede947 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/JDBCUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/JDBCUtil.java
@@ -161,7 +161,6 @@ public class JDBCUtil {
}
return Boolean.valueOf(autoCommit);
}
-
/**
* Retrieve the value of the optional consistency read setting from JDBC url or connection
* properties.
@@ -182,4 +181,9 @@ public class JDBCUtil {
return Consistency.STRONG;
}
+
+ public static boolean isCollectingRequestLevelMetricsEnabled(String url, Properties overrideProps, ReadOnlyProps queryServicesProps) throws SQLException {
+ String batchSizeStr = findProperty(url, overrideProps, PhoenixRuntime.REQUEST_METRIC_ATTRIB);
+ return (batchSizeStr == null ? queryServicesProps.getBoolean(QueryServices.COLLECT_REQUEST_LEVEL_METRICS, QueryServicesOptions.DEFAULT_REQUEST_LEVEL_METRICS_ENABLED) : Boolean.parseBoolean(batchSizeStr));
+ }
}
[36/47] phoenix git commit: PHOENIX-978: allow views to extend
parent's PK
Posted by ma...@apache.org.
PHOENIX-978: allow views to extend parent's PK
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/d6044944
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/d6044944
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/d6044944
Branch: refs/heads/calcite
Commit: d6044944b7b1f4981a897c1c7626614659a972c7
Parents: d02b361
Author: Eli Levine <el...@apache.org>
Authored: Mon Jun 29 10:37:48 2015 -0700
Committer: Eli Levine <el...@apache.org>
Committed: Mon Jun 29 10:39:17 2015 -0700
----------------------------------------------------------------------
.../end2end/TenantSpecificTablesDDLIT.java | 22 +----
.../java/org/apache/phoenix/end2end/ViewIT.java | 96 ++++++++++++++++++++
.../phoenix/exception/SQLExceptionCode.java | 2 -
.../apache/phoenix/schema/MetaDataClient.java | 21 +----
4 files changed, 101 insertions(+), 40 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/d6044944/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDDLIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDDLIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDDLIT.java
index e1a1970..bf86818 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDDLIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDDLIT.java
@@ -17,9 +17,7 @@
*/
package org.apache.phoenix.end2end;
-import static org.apache.phoenix.exception.SQLExceptionCode.CANNOT_DEFINE_PK_FOR_VIEW;
import static org.apache.phoenix.exception.SQLExceptionCode.CANNOT_DROP_PK;
-import static org.apache.phoenix.exception.SQLExceptionCode.CANNOT_MODIFY_VIEW_PK;
import static org.apache.phoenix.exception.SQLExceptionCode.CANNOT_MUTATE_TABLE;
import static org.apache.phoenix.exception.SQLExceptionCode.TABLE_UNDEFINED;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.KEY_SEQ;
@@ -158,16 +156,10 @@ public class TenantSpecificTablesDDLIT extends BaseTenantSpecificTablesIT {
}
@Test
- public void testTenantSpecificTableCannotDeclarePK() throws SQLException {
- try {
+ public void testTenantSpecificTableCanDeclarePK() throws SQLException {
createTestTable(PHOENIX_JDBC_TENANT_SPECIFIC_URL, "CREATE VIEW TENANT_TABLE2 ( \n" +
" tenant_col VARCHAR PRIMARY KEY) AS SELECT *\n" +
" FROM PARENT_TABLE", null, nextTimestamp());
- fail();
- }
- catch (SQLException expected) {
- assertEquals(CANNOT_DEFINE_PK_FOR_VIEW.getErrorCode(), expected.getErrorCode());
- }
}
@Test(expected=ColumnAlreadyExistsException.class)
@@ -259,20 +251,12 @@ public class TenantSpecificTablesDDLIT extends BaseTenantSpecificTablesIT {
}
@Test
- public void testMutationOfPKInTenantTablesNotAllowed() throws Exception {
+ public void testDropOfPKInTenantTablesNotAllowed() throws Exception {
Properties props = new Properties();
props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(nextTimestamp()));
Connection conn = DriverManager.getConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL, props);
try {
- try {
- conn.createStatement().execute("alter table " + TENANT_TABLE_NAME + " add new_tenant_pk char(1) primary key");
- fail();
- }
- catch (SQLException expected) {
- assertEquals(CANNOT_MODIFY_VIEW_PK.getErrorCode(), expected.getErrorCode());
- }
-
- // try removing a non-PK col
+ // try removing a PK col
try {
conn.createStatement().execute("alter table " + TENANT_TABLE_NAME + " drop column id");
fail();
http://git-wip-us.apache.org/repos/asf/phoenix/blob/d6044944/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
index fb58a8f..db38ab3 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
@@ -17,8 +17,11 @@
*/
package org.apache.phoenix.end2end;
+import static com.google.common.collect.Lists.newArrayListWithExpectedSize;
+import static org.apache.phoenix.exception.SQLExceptionCode.NOT_NULLABLE_COLUMN_IN_ROW_KEY;
import static org.apache.phoenix.util.TestUtil.analyzeTable;
import static org.apache.phoenix.util.TestUtil.getAllSplits;
+import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
@@ -32,6 +35,7 @@ import java.util.List;
import org.apache.phoenix.exception.SQLExceptionCode;
import org.apache.phoenix.query.KeyRange;
+import org.apache.phoenix.schema.ColumnAlreadyExistsException;
import org.apache.phoenix.schema.ReadOnlyTableException;
import org.apache.phoenix.schema.TableNotFoundException;
import org.apache.phoenix.util.QueryUtil;
@@ -461,4 +465,96 @@ public class ViewIT extends BaseViewIT {
"CLIENT PARALLEL 1-WAY SKIP SCAN ON 4 KEYS OVER I1 [1,100] - [2,109]\n" +
" SERVER FILTER BY (\"S2\" = 'bas' AND \"S1\" = 'foo')", queryPlan);
}
+
+ @Test
+ public void testCreateViewDefinesPKColumn() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ String ddl = "CREATE TABLE tp (k1 INTEGER NOT NULL, k2 INTEGER NOT NULL, v1 DECIMAL, CONSTRAINT pk PRIMARY KEY (k1, k2))";
+ conn.createStatement().execute(ddl);
+ ddl = "CREATE VIEW v1(v2 VARCHAR, k3 VARCHAR PRIMARY KEY) AS SELECT * FROM tp WHERE K1 = 1";
+ conn.createStatement().execute(ddl);
+
+ // assert PK metadata
+ ResultSet rs = conn.getMetaData().getPrimaryKeys(null, null, "V1");
+ assertPKs(rs, new String[] {"K1", "K2", "K3"});
+
+ // sanity check upserts into base table and view
+ conn.createStatement().executeUpdate("upsert into tp (k1, k2, v1) values (1, 1, 1)");
+ conn.createStatement().executeUpdate("upsert into v1 (k1, k2, k3, v2) values (1, 1, 'abc', 'def')");
+ conn.commit();
+
+ // expect 2 rows in the base table
+ rs = conn.createStatement().executeQuery("select count(*) from tp");
+ assertTrue(rs.next());
+ assertEquals(2, rs.getInt(1));
+
+ // expect 2 row in the view
+ rs = conn.createStatement().executeQuery("select count(*) from v1");
+ assertTrue(rs.next());
+ assertEquals(2, rs.getInt(1));
+ }
+
+ @Test
+ public void testCreateViewDefinesPKConstraint() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ String ddl = "CREATE TABLE tp (k1 INTEGER NOT NULL, k2 INTEGER NOT NULL, v1 DECIMAL, CONSTRAINT pk PRIMARY KEY (k1, k2))";
+ conn.createStatement().execute(ddl);
+ ddl = "CREATE VIEW v1(v2 VARCHAR, k3 VARCHAR, k4 INTEGER NOT NULL, CONSTRAINT PKVEW PRIMARY KEY (k3, k4)) AS SELECT * FROM tp WHERE K1 = 1";
+ conn.createStatement().execute(ddl);
+
+ // assert PK metadata
+ ResultSet rs = conn.getMetaData().getPrimaryKeys(null, null, "V1");
+ assertPKs(rs, new String[] {"K1", "K2", "K3", "K4"});
+ }
+
+ @Test
+ public void testViewAddsPKColumn() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ String ddl = "CREATE TABLE tp (k1 INTEGER NOT NULL, k2 INTEGER NOT NULL, v1 DECIMAL, CONSTRAINT pk PRIMARY KEY (k1, k2))";
+ conn.createStatement().execute(ddl);
+ ddl = "CREATE VIEW v1 AS SELECT * FROM tp WHERE v1 = 1.0";
+ conn.createStatement().execute(ddl);
+ ddl = "ALTER VIEW V1 ADD k3 VARCHAR PRIMARY KEY, k4 VARCHAR PRIMARY KEY, v2 INTEGER";
+ conn.createStatement().execute(ddl);
+
+ // assert PK metadata
+ ResultSet rs = conn.getMetaData().getPrimaryKeys(null, null, "V1");
+ assertPKs(rs, new String[] {"K1", "K2", "K3", "K4"});
+ }
+
+ @Test(expected=ColumnAlreadyExistsException.class)
+ public void testViewAddsClashingPKColumn() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ String ddl = "CREATE TABLE tp (k1 INTEGER NOT NULL, k2 INTEGER NOT NULL, v1 DECIMAL, CONSTRAINT pk PRIMARY KEY (k1, k2))";
+ conn.createStatement().execute(ddl);
+ ddl = "CREATE VIEW v1 AS SELECT * FROM tp WHERE v1 = 1.0";
+ conn.createStatement().execute(ddl);
+ ddl = "ALTER VIEW V1 ADD k3 VARCHAR PRIMARY KEY, k2 VARCHAR PRIMARY KEY, v2 INTEGER";
+ conn.createStatement().execute(ddl);
+ }
+
+ @Test
+ public void testViewAddsNotNullPKColumn() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ String ddl = "CREATE TABLE tp (k1 INTEGER NOT NULL, k2 INTEGER NOT NULL, v1 DECIMAL, CONSTRAINT pk PRIMARY KEY (k1, k2))";
+ conn.createStatement().execute(ddl);
+ ddl = "CREATE VIEW v1 AS SELECT * FROM tp WHERE v1 = 1.0";
+ conn.createStatement().execute(ddl);
+ try {
+ ddl = "ALTER VIEW V1 ADD k3 VARCHAR NOT NULL PRIMARY KEY";
+ conn.createStatement().execute(ddl);
+ fail("can only add nullable PKs via ALTER VIEW/TABLE");
+ } catch (SQLException e) {
+ assertEquals(NOT_NULLABLE_COLUMN_IN_ROW_KEY.getErrorCode(), e.getErrorCode());
+ }
+ }
+
+ private void assertPKs(ResultSet rs, String[] expectedPKs) throws SQLException {
+ List<String> pkCols = newArrayListWithExpectedSize(expectedPKs.length);
+ while (rs.next()) {
+ pkCols.add(rs.getString("COLUMN_NAME"));
+ }
+ String[] actualPKs = pkCols.toArray(new String[0]);
+ assertArrayEquals(expectedPKs, actualPKs);
+ }
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/d6044944/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java b/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
index cf72384..cc8b02a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
@@ -189,7 +189,6 @@ public enum SQLExceptionCode {
NOT_NULLABLE_COLUMN_IN_ROW_KEY(1006, "42J04", "Only nullable columns may be added to a multi-part row key."),
VARBINARY_LAST_PK(1015, "42J04", "Cannot add column to table when the last PK column is of type VARBINARY or ARRAY."),
NULLABLE_FIXED_WIDTH_LAST_PK(1023, "42J04", "Cannot add column to table when the last PK column is nullable and fixed width."),
- CANNOT_MODIFY_VIEW_PK(1036, "42J04", "Cannot modify the primary key of a VIEW."),
BASE_TABLE_COLUMN(1037, "42J04", "Cannot modify columns of base table used by tenant-specific tables."),
// Key/value column related errors
KEY_VALUE_NOT_NULL(1007, "42K01", "A key/value column may not be declared as not null."),
@@ -234,7 +233,6 @@ public enum SQLExceptionCode {
CANNOT_CREATE_TENANT_SPECIFIC_TABLE(1030, "42Y89", "Cannot create table for tenant-specific connection"),
- CANNOT_DEFINE_PK_FOR_VIEW(1031, "42Y90", "Defining PK columns for a VIEW is not allowed."),
DEFAULT_COLUMN_FAMILY_ONLY_ON_CREATE_TABLE(1034, "42Y93", "Default column family may only be specified when creating a table."),
INSUFFICIENT_MULTI_TENANT_COLUMNS(1040, "42Y96", "A MULTI_TENANT table must have two or more PK columns with the first column being NOT NULL and of type VARCHAR or CHAR."),
VIEW_WHERE_IS_CONSTANT(1045, "43A02", "WHERE clause in VIEW should not evaluate to a constant."),
http://git-wip-us.apache.org/repos/asf/phoenix/blob/d6044944/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index e7c3cd5..d77ded8 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -1636,6 +1636,7 @@ public class MetaDataClient {
pkColumns.add(SaltingUtil.SALTING_COLUMN);
}
}
+ int pkPositionOffset = pkColumns.size();
int position = positionOffset;
for (ColumnDef colDef : colDefs) {
@@ -1666,13 +1667,6 @@ public class MetaDataClient {
.setColumnName(column.getName().getString())
.build().buildException();
}
- if (tableType == PTableType.VIEW && viewType != ViewType.MAPPED) {
- throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_DEFINE_PK_FOR_VIEW)
- .setSchemaName(schemaName)
- .setTableName(tableName)
- .setColumnName(colDef.getColumnDefName().getColumnName())
- .build().buildException();
- }
if (!pkColumns.add(column)) {
throw new ColumnAlreadyExistsException(schemaName, tableName, column.getName().getString());
}
@@ -1702,7 +1696,7 @@ public class MetaDataClient {
.setTableName(tableName)
.build().buildException();
}
- if (!pkColumnsNames.isEmpty() && pkColumnsNames.size() != pkColumns.size() - positionOffset) { // Then a column name in the primary key constraint wasn't resolved
+ if (!pkColumnsNames.isEmpty() && pkColumnsNames.size() != pkColumns.size() - pkPositionOffset) { // Then a column name in the primary key constraint wasn't resolved
Iterator<Pair<ColumnName,SortOrder>> pkColumnNamesIterator = pkColumnsNames.iterator();
while (pkColumnNamesIterator.hasNext()) {
ColumnName colName = pkColumnNamesIterator.next().getFirst();
@@ -2414,7 +2408,6 @@ public class MetaDataClient {
.setColumnName(colDef.getColumnDefName().getColumnName()).build().buildException();
}
}
- throwIfAlteringViewPK(colDef, table);
PColumn column = newColumn(position++, colDef, PrimaryKeyConstraint.EMPTY, table.getDefaultFamilyName() == null ? null : table.getDefaultFamilyName().getString(), true);
columns.add(column);
String pkName = null;
@@ -2914,16 +2907,6 @@ public class MetaDataClient {
return result.getFunctions();
}
- private void throwIfAlteringViewPK(ColumnDef col, PTable table) throws SQLException {
- if (col != null && col.isPK() && table.getType() == PTableType.VIEW) {
- throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_MODIFY_VIEW_PK)
- .setSchemaName(table.getSchemaName().getString())
- .setTableName(table.getTableName().getString())
- .setColumnName(col.getColumnDefName().getColumnName())
- .build().buildException();
- }
- }
-
public PTableStats getTableStats(PTable table) throws SQLException {
/*
* The shared view index case is tricky, because we don't have
[40/47] phoenix git commit: PHOENIX-2060 - ARRAY_FILL Push the new
files
Posted by ma...@apache.org.
PHOENIX-2060 - ARRAY_FILL Push the new files
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/fb8c9413
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/fb8c9413
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/fb8c9413
Branch: refs/heads/calcite
Commit: fb8c9413f6583798059741fb7c03c8c04a2c3336
Parents: c0ad8cf
Author: ramkrishna <ra...@gmail.com>
Authored: Tue Jun 30 22:52:40 2015 +0530
Committer: ramkrishna <ra...@gmail.com>
Committed: Tue Jun 30 22:52:40 2015 +0530
----------------------------------------------------------------------
.../phoenix/end2end/ArrayFillFunctionIT.java | 531 +++++++++++++++++++
.../expression/function/ArrayFillFunction.java | 79 +++
.../expression/ArrayFillFunctionTest.java | 221 ++++++++
3 files changed, 831 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/fb8c9413/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArrayFillFunctionIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArrayFillFunctionIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArrayFillFunctionIT.java
new file mode 100644
index 0000000..f9ce88d
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArrayFillFunctionIT.java
@@ -0,0 +1,531 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.phoenix.end2end;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.math.BigDecimal;
+import java.sql.*;
+
+import org.junit.Test;
+
+public class ArrayFillFunctionIT extends BaseHBaseManagedTimeIT {
+
+ private void initTables(Connection conn) throws Exception {
+ String ddl = "CREATE TABLE regions (region_name VARCHAR PRIMARY KEY,length1 INTEGER, length2 INTEGER,date DATE,time TIME,timestamp TIMESTAMP,varchar VARCHAR,integer INTEGER,double DOUBLE,bigint BIGINT,char CHAR(15),double1 DOUBLE,char1 CHAR(17),nullcheck INTEGER,chars2 CHAR(15)[], varchars2 VARCHAR[])";
+ conn.createStatement().execute(ddl);
+ String dml = "UPSERT INTO regions(region_name,length1,length2,date,time,timestamp,varchar,integer,double,bigint,char,double1,char1,nullcheck,chars2,varchars2) VALUES('SF Bay Area'," +
+ "0," +
+ "-3," +
+ "to_date('2015-05-20 06:12:14.184')," +
+ "to_time('2015-05-20 06:12:14.184')," +
+ "to_timestamp('2015-05-20 06:12:14.184')," +
+ "'foo'," +
+ "34," +
+ "23.45," +
+ "34567," +
+ "'foo'," +
+ "23.45," +
+ "'wert'," +
+ "NULL," +
+ "ARRAY['hello','hello','hello']," +
+ "ARRAY['hello','hello','hello']" +
+ ")";
+ PreparedStatement stmt = conn.prepareStatement(dml);
+ stmt.execute();
+ conn.commit();
+ }
+
+ @Test
+ public void testArrayFillFunctionVarchar() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ initTables(conn);
+
+ ResultSet rs;
+ rs = conn.createStatement().executeQuery("SELECT ARRAY_FILL(varchar,5) FROM regions WHERE region_name = 'SF Bay Area'");
+ assertTrue(rs.next());
+
+ String[] strings = new String[]{"foo", "foo", "foo", "foo", "foo"};
+
+ Array array = conn.createArrayOf("VARCHAR", strings);
+
+ assertEquals(array, rs.getArray(1));
+ assertFalse(rs.next());
+ }
+
+ @Test
+ public void testArrayFillFunctionInteger() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ initTables(conn);
+
+ ResultSet rs;
+ rs = conn.createStatement().executeQuery("SELECT ARRAY_FILL(integer,4) FROM regions WHERE region_name = 'SF Bay Area'");
+ assertTrue(rs.next());
+
+ Object[] objects = new Object[]{34, 34, 34, 34};
+
+ Array array = conn.createArrayOf("INTEGER", objects);
+
+ assertEquals(array, rs.getArray(1));
+ assertFalse(rs.next());
+ }
+
+ @Test
+ public void testArrayFillFunctionDouble() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ initTables(conn);
+
+ ResultSet rs;
+ rs = conn.createStatement().executeQuery("SELECT ARRAY_FILL(double,4) FROM regions WHERE region_name = 'SF Bay Area'");
+ assertTrue(rs.next());
+
+ Object[] objects = new Object[]{23.45, 23.45, 23.45, 23.45};
+
+ Array array = conn.createArrayOf("DOUBLE", objects);
+
+ assertEquals(array, rs.getArray(1));
+ assertFalse(rs.next());
+ }
+
+ @Test
+ public void testArrayFillFunctionBigint() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ initTables(conn);
+
+ ResultSet rs;
+ rs = conn.createStatement().executeQuery("SELECT ARRAY_FILL(bigint,4) FROM regions WHERE region_name = 'SF Bay Area'");
+ assertTrue(rs.next());
+
+ Object[] objects = new Object[]{34567l, 34567l, 34567l, 34567l};
+
+ Array array = conn.createArrayOf("BIGINT", objects);
+
+ assertEquals(array, rs.getArray(1));
+ assertFalse(rs.next());
+ }
+
+ @Test
+ public void testArrayFillFunctionChar() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ initTables(conn);
+
+ ResultSet rs;
+ rs = conn.createStatement().executeQuery("SELECT ARRAY_FILL(char,4) FROM regions WHERE region_name = 'SF Bay Area'");
+ assertTrue(rs.next());
+
+ Object[] objects = new Object[]{"foo", "foo", "foo", "foo"};
+
+ Array array = conn.createArrayOf("CHAR", objects);
+ assertEquals(array, rs.getArray(1));
+ assertFalse(rs.next());
+ }
+
+ @Test
+ public void testArrayFillFunctionVarChar() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ initTables(conn);
+
+ ResultSet rs;
+ rs = conn.createStatement().executeQuery("SELECT ARRAY_FILL(varchar,4) FROM regions WHERE region_name = 'SF Bay Area'");
+ assertTrue(rs.next());
+
+ Object[] objects = new Object[]{"foo", "foo", "foo", "foo"};
+
+ Array array = conn.createArrayOf("VARCHAR", objects);
+ assertEquals(array, rs.getArray(1));
+ assertFalse(rs.next());
+ }
+
+ @Test
+ public void testArrayFillFunctionDate() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ initTables(conn);
+
+ ResultSet rs;
+ rs = conn.createStatement().executeQuery("SELECT ARRAY_FILL(date,3) FROM regions WHERE region_name = 'SF Bay Area'");
+ assertTrue(rs.next());
+
+ Object[] objects = new Object[]{new Date(1432102334184l), new Date(1432102334184l), new Date(1432102334184l)};
+
+ Array array = conn.createArrayOf("DATE", objects);
+ assertEquals(array, rs.getArray(1));
+ assertFalse(rs.next());
+ }
+
+ @Test
+ public void testArrayFillFunctionTime() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ initTables(conn);
+
+ ResultSet rs;
+ rs = conn.createStatement().executeQuery("SELECT ARRAY_FILL(time,3) FROM regions WHERE region_name = 'SF Bay Area'");
+ assertTrue(rs.next());
+
+ Object[] objects = new Object[]{new Time(1432102334184l), new Time(1432102334184l), new Time(1432102334184l)};
+
+ Array array = conn.createArrayOf("TIME", objects);
+ assertEquals(array, rs.getArray(1));
+ assertFalse(rs.next());
+ }
+
+ @Test
+ public void testArrayFillFunctionTimestamp() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ initTables(conn);
+
+ ResultSet rs;
+ rs = conn.createStatement().executeQuery("SELECT ARRAY_FILL(timestamp,3) FROM regions WHERE region_name = 'SF Bay Area'");
+ assertTrue(rs.next());
+
+ Object[] objects = new Object[]{new Timestamp(1432102334184l), new Timestamp(1432102334184l), new Timestamp(1432102334184l)};
+
+ Array array = conn.createArrayOf("TIMESTAMP", objects);
+ assertEquals(array, rs.getArray(1));
+ assertFalse(rs.next());
+ }
+
+ @Test(expected = IllegalArgumentException.class)
+ public void testArrayFillFunctionInvalidLength1() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ initTables(conn);
+
+ ResultSet rs;
+ rs = conn.createStatement().executeQuery("SELECT ARRAY_FILL(timestamp,length2) FROM regions WHERE region_name = 'SF Bay Area'");
+ assertTrue(rs.next());
+
+ Object[] objects = new Object[]{new Timestamp(1432102334184l), new Timestamp(1432102334184l), new Timestamp(1432102334184l)};
+
+ Array array = conn.createArrayOf("TIMESTAMP", objects);
+ assertEquals(array, rs.getArray(1));
+ assertFalse(rs.next());
+ }
+
+ @Test(expected = IllegalArgumentException.class)
+ public void testArrayFillFunctionInvalidLength2() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ initTables(conn);
+
+ ResultSet rs;
+ rs = conn.createStatement().executeQuery("SELECT ARRAY_FILL(timestamp,length1) FROM regions WHERE region_name = 'SF Bay Area'");
+ assertTrue(rs.next());
+
+ Object[] objects = new Object[]{new Timestamp(1432102334184l), new Timestamp(1432102334184l), new Timestamp(1432102334184l)};
+
+ Array array = conn.createArrayOf("TIMESTAMP", objects);
+ assertEquals(array, rs.getArray(1));
+ assertFalse(rs.next());
+ }
+
+ @Test
+ public void testArrayFillFunctionWithNestedFunctions1() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ initTables(conn);
+
+ ResultSet rs;
+ rs = conn.createStatement().executeQuery("SELECT ARRAY_FILL(ARRAY_ELEM(ARRAY[23,45],1),3) FROM regions WHERE region_name = 'SF Bay Area'");
+ assertTrue(rs.next());
+
+ Integer[] integers = new Integer[]{23, 23, 23};
+
+ Array array = conn.createArrayOf("INTEGER", integers);
+
+ assertEquals(array, rs.getArray(1));
+ assertFalse(rs.next());
+ }
+
+ @Test
+ public void testArrayFillFunctionWithNestedFunctions2() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ initTables(conn);
+
+ ResultSet rs;
+ rs = conn.createStatement().executeQuery("SELECT ARRAY_FILL('hello', ARRAY_LENGTH(ARRAY[34, 45])) FROM regions WHERE region_name = 'SF Bay Area'");
+ assertTrue(rs.next());
+
+ Object[] objects = new Object[]{"hello", "hello"};
+
+ Array array = conn.createArrayOf("VARCHAR", objects);
+
+ assertEquals(array, rs.getArray(1));
+ assertFalse(rs.next());
+ }
+
+ @Test
+ public void testArrayFillFunctionWithNestedFunctions3() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ initTables(conn);
+
+ ResultSet rs;
+ rs = conn.createStatement().executeQuery("SELECT ARRAY_FILL(3.4, ARRAY_LENGTH(ARRAY[34, 45])) FROM regions WHERE region_name = 'SF Bay Area'");
+ assertTrue(rs.next());
+
+ Object[] objects = new Object[]{BigDecimal.valueOf(3.4), BigDecimal.valueOf(3.4)};
+
+ Array array = conn.createArrayOf("DECIMAL", objects);
+
+ assertEquals(array, rs.getArray(1));
+ assertFalse(rs.next());
+ }
+
+ @Test
+ public void testArrayFillFunctionWithUpsert1() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+
+ String ddl = "CREATE TABLE regions (region_name VARCHAR PRIMARY KEY,varchars VARCHAR[])";
+ conn.createStatement().execute(ddl);
+
+ String dml = "UPSERT INTO regions(region_name,varchars) VALUES('SF Bay Area',ARRAY_FILL('hello',3))";
+ conn.createStatement().execute(dml);
+ conn.commit();
+
+ ResultSet rs;
+ rs = conn.createStatement().executeQuery("SELECT varchars FROM regions WHERE region_name = 'SF Bay Area'");
+ assertTrue(rs.next());
+
+ String[] strings = new String[]{"hello", "hello", "hello"};
+
+ Array array = conn.createArrayOf("VARCHAR", strings);
+
+ assertEquals(array, rs.getArray(1));
+ assertFalse(rs.next());
+ }
+
+ @Test
+ public void testArrayFillFunctionWithUpsert2() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+
+ String ddl = "CREATE TABLE regions (region_name VARCHAR PRIMARY KEY,integers INTEGER[])";
+ conn.createStatement().execute(ddl);
+
+ String dml = "UPSERT INTO regions(region_name,integers) VALUES('SF Bay Area',ARRAY_FILL(3456,3))";
+ conn.createStatement().execute(dml);
+ conn.commit();
+
+ ResultSet rs;
+ rs = conn.createStatement().executeQuery("SELECT integers FROM regions WHERE region_name = 'SF Bay Area'");
+ assertTrue(rs.next());
+
+ Integer[] integers = new Integer[]{3456, 3456, 3456};
+
+ Array array = conn.createArrayOf("INTEGER", integers);
+
+ assertEquals(array, rs.getArray(1));
+ assertFalse(rs.next());
+ }
+
+ @Test
+ public void testArrayFillFunctionWithUpsert3() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+
+ String ddl = "CREATE TABLE regions (region_name VARCHAR PRIMARY KEY,doubles DOUBLE[])";
+ conn.createStatement().execute(ddl);
+
+ String dml = "UPSERT INTO regions(region_name,doubles) VALUES('SF Bay Area',ARRAY_FILL(2.5,3))";
+ conn.createStatement().execute(dml);
+ conn.commit();
+
+ ResultSet rs;
+ rs = conn.createStatement().executeQuery("SELECT doubles FROM regions WHERE region_name = 'SF Bay Area'");
+ assertTrue(rs.next());
+
+ Double[] doubles = new Double[]{2.5, 2.5, 2.5};
+
+ Array array = conn.createArrayOf("DOUBLE", doubles);
+
+ assertEquals(array, rs.getArray(1));
+ assertFalse(rs.next());
+ }
+
+ @Test
+ public void testArrayFillFunctionWithUpsertSelect1() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+
+ String ddl = "CREATE TABLE source (region_name VARCHAR PRIMARY KEY,doubles DOUBLE[])";
+ conn.createStatement().execute(ddl);
+
+ ddl = "CREATE TABLE target (region_name VARCHAR PRIMARY KEY,doubles DOUBLE[],doubles2 DOUBLE[])";
+ conn.createStatement().execute(ddl);
+
+ String dml = "UPSERT INTO source(region_name,doubles) VALUES('SF Bay Area',ARRAY_FILL(3.4,3))";
+ conn.createStatement().execute(dml);
+
+ dml = "UPSERT INTO source(region_name,doubles) VALUES('SF Bay Area2',ARRAY_FILL(2.3,3))";
+ conn.createStatement().execute(dml);
+ conn.commit();
+
+ dml = "UPSERT INTO target(region_name, doubles, doubles2) SELECT region_name, doubles,ARRAY_FILL(4.5,5) FROM source";
+ conn.createStatement().execute(dml);
+ conn.commit();
+
+ ResultSet rs;
+ rs = conn.createStatement().executeQuery("SELECT doubles, doubles2 FROM target");
+ assertTrue(rs.next());
+
+ Double[] doubles = new Double[]{3.4, 3.4, 3.4};
+ Double[] doubles2 = new Double[]{4.5, 4.5, 4.5, 4.5, 4.5};
+ Array array = conn.createArrayOf("DOUBLE", doubles);
+ Array array2 = conn.createArrayOf("DOUBLE", doubles2);
+
+ assertEquals(array, rs.getArray(1));
+ assertEquals(array2, rs.getArray(2));
+ assertTrue(rs.next());
+
+ doubles = new Double[]{2.3, 2.3, 2.3};
+ array = conn.createArrayOf("DOUBLE", doubles);
+
+ assertEquals(array, rs.getArray(1));
+ assertEquals(array2, rs.getArray(2));
+ assertFalse(rs.next());
+ }
+
+ @Test
+ public void testArrayFillFunctionWithUpsertSelect2() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+
+ String ddl = "CREATE TABLE source (region_name VARCHAR PRIMARY KEY,varchars VARCHAR[])";
+ conn.createStatement().execute(ddl);
+
+ ddl = "CREATE TABLE target (region_name VARCHAR PRIMARY KEY,varchars VARCHAR[],varchars2 VARCHAR[])";
+ conn.createStatement().execute(ddl);
+
+ String dml = "UPSERT INTO source(region_name,varchars) VALUES('SF Bay Area',ARRAY_FILL('foo',3))";
+ conn.createStatement().execute(dml);
+
+ dml = "UPSERT INTO source(region_name,varchars) VALUES('SF Bay Area2',ARRAY_FILL('hello',3))";
+ conn.createStatement().execute(dml);
+ conn.commit();
+
+ dml = "UPSERT INTO target(region_name, varchars, varchars2) SELECT region_name, varchars,ARRAY_FILL(':-)',5) FROM source";
+ conn.createStatement().execute(dml);
+ conn.commit();
+
+ ResultSet rs;
+ rs = conn.createStatement().executeQuery("SELECT varchars, varchars2 FROM target");
+ assertTrue(rs.next());
+
+ String[] strings = new String[]{"foo", "foo", "foo"};
+ String[] strings2 = new String[]{":-)", ":-)", ":-)", ":-)", ":-)"};
+ Array array = conn.createArrayOf("VARCHAR", strings);
+ Array array2 = conn.createArrayOf("VARCHAR", strings2);
+
+ assertEquals(array, rs.getArray(1));
+ assertEquals(array2, rs.getArray(2));
+ assertTrue(rs.next());
+
+ strings = new String[]{"hello", "hello", "hello"};
+ array = conn.createArrayOf("VARCHAR", strings);
+
+ assertEquals(array, rs.getArray(1));
+ assertEquals(array2, rs.getArray(2));
+ assertFalse(rs.next());
+ }
+
+ @Test
+ public void testArrayFillFunctionInWhere1() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ initTables(conn);
+
+ ResultSet rs;
+ rs = conn.createStatement().executeQuery("SELECT region_name FROM regions WHERE ARRAY[12, 12, 12, 12]=ARRAY_FILL(12,4)");
+ assertTrue(rs.next());
+
+ assertEquals("SF Bay Area", rs.getString(1));
+ assertFalse(rs.next());
+ }
+
+ @Test
+ public void testArrayFillFunctionInWhere2() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ initTables(conn);
+
+ ResultSet rs;
+ rs = conn.createStatement().executeQuery("SELECT region_name FROM regions WHERE varchar=ANY(ARRAY_FILL('foo',3))");
+ assertTrue(rs.next());
+
+ assertEquals("SF Bay Area", rs.getString(1));
+ assertFalse(rs.next());
+ }
+
+ @Test
+ public void testArrayFillFunctionInWhere3() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ initTables(conn);
+
+ ResultSet rs;
+ rs = conn.createStatement().executeQuery("SELECT region_name FROM regions WHERE ARRAY['2345', '2345', '2345', '2345']=ARRAY_FILL('2345', 4)");
+ assertTrue(rs.next());
+
+ assertEquals("SF Bay Area", rs.getString(1));
+ assertFalse(rs.next());
+ }
+
+ @Test
+ public void testArrayFillFunctionInWhere4() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ initTables(conn);
+
+ ResultSet rs;
+ rs = conn.createStatement().executeQuery("SELECT region_name FROM regions WHERE ARRAY[23.45, 23.45, 23.45]=ARRAY_FILL(23.45, 3)");
+ assertTrue(rs.next());
+
+ assertEquals("SF Bay Area", rs.getString(1));
+ assertFalse(rs.next());
+ }
+
+ @Test
+ public void testArrayFillFunctionInWhere5() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ initTables(conn);
+
+ ResultSet rs;
+ rs = conn.createStatement().executeQuery("SELECT region_name FROM regions WHERE ARRAY['foo','foo','foo','foo','foo']=ARRAY_FILL(varchar,5)");
+ assertTrue(rs.next());
+
+ assertEquals("SF Bay Area", rs.getString(1));
+ assertFalse(rs.next());
+ }
+
+ @Test
+ public void testArrayFillFunctionInWhere6() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ initTables(conn);
+
+ ResultSet rs;
+ rs = conn.createStatement().executeQuery("SELECT region_name FROM regions WHERE varchars2=ARRAY_FILL('hello',3)");
+ assertTrue(rs.next());
+
+ assertEquals("SF Bay Area", rs.getString(1));
+ assertFalse(rs.next());
+ }
+
+ @Test
+ public void testArrayFillFunctionInWhere7() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ initTables(conn);
+
+ ResultSet rs;
+ rs = conn.createStatement().executeQuery("SELECT region_name FROM regions WHERE ARRAY[2,2,2,2]=ARRAY_FILL(2,4)");
+ assertTrue(rs.next());
+
+ assertEquals("SF Bay Area", rs.getString(1));
+ assertFalse(rs.next());
+ }
+}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/fb8c9413/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ArrayFillFunction.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ArrayFillFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ArrayFillFunction.java
new file mode 100644
index 0000000..5c3a2e5
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ArrayFillFunction.java
@@ -0,0 +1,79 @@
+package org.apache.phoenix.expression.function;
+
+import java.util.Arrays;
+import java.util.List;
+
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.parse.FunctionParseNode;
+import org.apache.phoenix.schema.SortOrder;
+import org.apache.phoenix.schema.TypeMismatchException;
+import org.apache.phoenix.schema.tuple.Tuple;
+import org.apache.phoenix.schema.types.*;
+
+@FunctionParseNode.BuiltInFunction(name = ArrayFillFunction.NAME, args = {
+ @FunctionParseNode.Argument(allowedTypes = {PVarbinary.class}),
+ @FunctionParseNode.Argument(allowedTypes = {PInteger.class})})
+public class ArrayFillFunction extends ScalarFunction {
+
+ public static final String NAME = "ARRAY_FILL";
+
+ public ArrayFillFunction() {
+ }
+
+ public ArrayFillFunction(List<Expression> children) throws TypeMismatchException {
+ super(children);
+ }
+
+ @Override
+ public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) {
+ if (!getElementExpr().evaluate(tuple, ptr)) {
+ return false;
+ }
+ Object element = getElementExpr().getDataType().toObject(ptr, getElementExpr().getSortOrder(), getElementExpr().getMaxLength(), getElementExpr().getScale());
+ if (!getLengthExpr().evaluate(tuple, ptr) || ptr.getLength() == 0) {
+ return false;
+ }
+ int length = (Integer) getLengthExpr().getDataType().toObject(ptr, getLengthExpr().getSortOrder(), getLengthExpr().getMaxLength(), getLengthExpr().getScale());
+ if (length <= 0) {
+ throw new IllegalArgumentException("Array length should be greater than 0");
+ }
+ Object[] elements = new Object[length];
+ Arrays.fill(elements, element);
+ PhoenixArray array = PDataType.instantiatePhoenixArray(getElementExpr().getDataType(), elements);
+ //When max length of a char array is not the max length of the element passed in
+ if (getElementExpr().getDataType().isFixedWidth() && getMaxLength() != null && getMaxLength() != array.getMaxLength()) {
+ array = new PhoenixArray(array, getMaxLength());
+ }
+ ptr.set(((PArrayDataType) getDataType()).toBytes(array, getElementExpr().getDataType(), getElementExpr().getSortOrder()));
+ return true;
+ }
+
+ @Override
+ public String getName() {
+ return NAME;
+ }
+
+ @Override
+ public PDataType getDataType() {
+ return PArrayDataType.fromTypeId(PDataType.ARRAY_TYPE_BASE + getElementExpr().getDataType().getSqlType());
+ }
+
+ @Override
+ public Integer getMaxLength() {
+ return getElementExpr().getDataType().getByteSize() == null ? getElementExpr().getMaxLength() : null;
+ }
+
+ @Override
+ public SortOrder getSortOrder() {
+ return children.get(0).getSortOrder();
+ }
+
+ public Expression getElementExpr() {
+ return children.get(0);
+ }
+
+ public Expression getLengthExpr() {
+ return children.get(1);
+ }
+}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/fb8c9413/phoenix-core/src/test/java/org/apache/phoenix/expression/ArrayFillFunctionTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/ArrayFillFunctionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/ArrayFillFunctionTest.java
new file mode 100644
index 0000000..be8cc6f
--- /dev/null
+++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/ArrayFillFunctionTest.java
@@ -0,0 +1,221 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.phoenix.expression;
+
+import static org.junit.Assert.assertEquals;
+
+import java.math.BigDecimal;
+import java.sql.Date;
+import java.sql.SQLException;
+import java.sql.Time;
+import java.util.List;
+
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.phoenix.expression.function.ArrayFillFunction;
+import org.apache.phoenix.schema.SortOrder;
+import org.apache.phoenix.schema.types.*;
+import org.junit.Test;
+
+import com.google.common.collect.Lists;
+
+public class ArrayFillFunctionTest {
+
+ private static void testExpression(LiteralExpression element, LiteralExpression length, PhoenixArray expected)
+ throws SQLException {
+ List<Expression> expressions = Lists.newArrayList((Expression) element);
+ expressions.add(length);
+
+ Expression arrayFillFunction = new ArrayFillFunction(expressions);
+ ImmutableBytesWritable ptr = new ImmutableBytesWritable();
+ arrayFillFunction.evaluate(null, ptr);
+ PhoenixArray result = (PhoenixArray) arrayFillFunction.getDataType().toObject(ptr, arrayFillFunction.getSortOrder(), arrayFillFunction.getMaxLength(), arrayFillFunction.getScale());
+ assertEquals(expected, result);
+ }
+
+ private static void test(Object element, Object length, PDataType elementDataType, Integer elementMaxLen, Integer elementScale, PDataType lengthDataType, Integer lengthMaxlen, Integer lengthScale, PhoenixArray expected, SortOrder elementSortOrder, SortOrder lengthSortOrder) throws SQLException {
+ LiteralExpression elementLiteral, lengthLiteral;
+ elementLiteral = LiteralExpression.newConstant(element, elementDataType, elementMaxLen, elementScale, elementSortOrder, Determinism.ALWAYS);
+ lengthLiteral = LiteralExpression.newConstant(length, lengthDataType, lengthMaxlen, lengthScale, lengthSortOrder, Determinism.ALWAYS);
+ testExpression(elementLiteral, lengthLiteral, expected);
+ }
+
+ @Test
+ public void testForInt() throws SQLException {
+ Object element = 5;
+ Object length = 3;
+ PDataType baseType = PInteger.INSTANCE;
+ PhoenixArray e = new PhoenixArray.PrimitiveIntPhoenixArray(baseType, new Object[]{5, 5, 5});
+ test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, SortOrder.ASC);
+ test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, SortOrder.DESC);
+ test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, SortOrder.DESC);
+ test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, SortOrder.ASC);
+ }
+
+ @Test
+ public void testForBoolean() throws SQLException {
+ Object element = false;
+ Object length = 3;
+ PDataType baseType = PBoolean.INSTANCE;
+ PhoenixArray e = new PhoenixArray.PrimitiveBooleanPhoenixArray(baseType, new Object[]{false, false, false});
+ test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, SortOrder.ASC);
+ test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, SortOrder.DESC);
+ test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, SortOrder.DESC);
+ test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, SortOrder.ASC);
+ }
+
+ @Test
+ public void testForVarchar() throws SQLException {
+ Object element = "foo";
+ Object length = 3;
+ PDataType baseType = PVarchar.INSTANCE;
+ PhoenixArray e = new PhoenixArray(baseType, new Object[]{"foo", "foo", "foo"});
+ test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, SortOrder.ASC);
+ test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, SortOrder.DESC);
+ test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, SortOrder.DESC);
+ test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, SortOrder.ASC);
+ }
+
+ @Test
+ public void testForChar() throws SQLException {
+ Object element = "foo";
+ Object length = 3;
+ PDataType baseType = PChar.INSTANCE;
+ PhoenixArray e = new PhoenixArray(baseType, new Object[]{"foo", "foo", "foo"});
+ test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, SortOrder.ASC);
+ test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, SortOrder.DESC);
+ test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, SortOrder.DESC);
+ test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, SortOrder.ASC);
+ }
+
+ @Test
+ public void testForDouble() throws SQLException {
+ Object element = 34.67;
+ Object length = 3;
+ PDataType baseType = PDouble.INSTANCE;
+ PhoenixArray e = new PhoenixArray.PrimitiveDoublePhoenixArray(baseType, new Object[]{34.67, 34.67, 34.67});
+ test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, SortOrder.ASC);
+ test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, SortOrder.DESC);
+ }
+
+ @Test
+ public void testForFloat() throws SQLException {
+ Object element = 5.6;
+ Object length = 3;
+ PDataType baseType = PFloat.INSTANCE;
+ PhoenixArray e = new PhoenixArray.PrimitiveFloatPhoenixArray(baseType, new Object[]{(float) 5.6, (float) 5.6, (float) 5.6});
+ test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, SortOrder.ASC);
+ test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, SortOrder.DESC);
+ }
+
+ @Test
+ public void testForSmallint() throws SQLException {
+ Object element = 5;
+ Object length = 3;
+ PDataType baseType = PSmallint.INSTANCE;
+ PhoenixArray e = new PhoenixArray.PrimitiveShortPhoenixArray(baseType, new Object[]{(short) 5, (short) 5, (short) 5});
+ test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, SortOrder.ASC);
+ test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, SortOrder.DESC);
+ test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, SortOrder.DESC);
+ test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, SortOrder.ASC);
+ }
+
+ @Test
+ public void testForTinyint() throws SQLException {
+ Object element = 6;
+ Object length = 3;
+ PDataType baseType = PTinyint.INSTANCE;
+ PhoenixArray e = new PhoenixArray.PrimitiveBytePhoenixArray(baseType, new Object[]{(byte) 6, (byte) 6, (byte) 6});
+ test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, SortOrder.ASC);
+ test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, SortOrder.DESC);
+ test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, SortOrder.DESC);
+ test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, SortOrder.ASC);
+ }
+
+ @Test
+ public void testForLong() throws SQLException {
+ Object element = 34567l;
+ Object length = 3;
+ PDataType baseType = PLong.INSTANCE;
+ PhoenixArray e = new PhoenixArray.PrimitiveLongPhoenixArray(baseType, new Object[]{34567l, 34567l, 34567l});
+ test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, SortOrder.ASC);
+ test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, SortOrder.DESC);
+ test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, SortOrder.DESC);
+ test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, SortOrder.ASC);
+ }
+
+ @Test
+ public void testForDecimal() throws SQLException {
+ Object element = BigDecimal.valueOf(345.67);
+ Object length = 3;
+ PDataType baseType = PDecimal.INSTANCE;
+ PhoenixArray e = new PhoenixArray(baseType, new Object[]{BigDecimal.valueOf(345.67), BigDecimal.valueOf(345.67), BigDecimal.valueOf(345.67)});
+ test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, SortOrder.ASC);
+ test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, SortOrder.DESC);
+ test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, SortOrder.DESC);
+ test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, SortOrder.ASC);
+ }
+
+ @Test
+ public void testForDate() throws SQLException {
+ Object element = new Date(23);
+ Object length = 3;
+ PDataType baseType = PDate.INSTANCE;
+ PhoenixArray e = new PhoenixArray(baseType, new Object[]{new Date(23), new Date(23), new Date(23)});
+ test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, SortOrder.ASC);
+ test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, SortOrder.DESC);
+ test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, SortOrder.DESC);
+ test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, SortOrder.ASC);
+ }
+
+ @Test
+ public void testForTime() throws SQLException {
+ Object element = new Time(23);
+ Object length = 3;
+ PDataType baseType = PTime.INSTANCE;
+ PhoenixArray e = new PhoenixArray(baseType, new Object[]{new Time(23), new Time(23), new Time(23)});
+ test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, SortOrder.ASC);
+ test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, SortOrder.DESC);
+ test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, SortOrder.DESC);
+ test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, SortOrder.ASC);
+ }
+
+ @Test
+ public void testForNulls1() throws SQLException {
+ Object element = null;
+ Object length = 3;
+ PDataType baseType = PInteger.INSTANCE;
+ PhoenixArray e = new PhoenixArray.PrimitiveIntPhoenixArray(baseType, new Object[]{0, 0, 0});
+ test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, SortOrder.ASC);
+ test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, SortOrder.DESC);
+ test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, SortOrder.DESC);
+ test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, SortOrder.ASC);
+ }
+
+ @Test
+ public void testForNulls2() throws SQLException {
+ Object element = null;
+ Object length = 3;
+ PDataType baseType = PVarchar.INSTANCE;
+ PhoenixArray e = new PhoenixArray(baseType, new Object[]{null, null, null});
+ test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, SortOrder.ASC);
+ test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.ASC, SortOrder.DESC);
+ test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, SortOrder.DESC);
+ test(element, length, baseType, null, null, PInteger.INSTANCE, null, null, e, SortOrder.DESC, SortOrder.ASC);
+ }
+}
[13/47] phoenix git commit: PHOENIX-2049 Change ArraysWithNullsIT to
be derived from BaseHBaseManagedTimeIT
Posted by ma...@apache.org.
PHOENIX-2049 Change ArraysWithNullsIT to be derived from BaseHBaseManagedTimeIT
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/2d70eff6
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/2d70eff6
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/2d70eff6
Branch: refs/heads/calcite
Commit: 2d70eff6594d0f46b10f2d9c4c8fa5d43d6ba5ab
Parents: fb44f35
Author: James Taylor <ja...@apache.org>
Authored: Wed Jun 17 17:09:33 2015 -0700
Committer: James Taylor <ja...@apache.org>
Committed: Wed Jun 17 17:13:31 2015 -0700
----------------------------------------------------------------------
.../src/it/java/org/apache/phoenix/end2end/ArraysWithNullsIT.java | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d70eff6/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArraysWithNullsIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArraysWithNullsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArraysWithNullsIT.java
index b034193..e95a386 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArraysWithNullsIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArraysWithNullsIT.java
@@ -26,7 +26,7 @@ import org.apache.phoenix.schema.types.PTimestamp;
import org.apache.phoenix.schema.types.PhoenixArray;
import org.junit.Test;
-public class ArraysWithNullsIT extends BaseClientManagedTimeIT {
+public class ArraysWithNullsIT extends BaseHBaseManagedTimeIT {
@Test
public void testArrayUpsertIntWithNulls() throws Exception {
[09/47] phoenix git commit: PHOENIX-1941 Phoenix tests are failing in
linux env with missing class: StaticMapping (Alicia Ying Shu)
Posted by ma...@apache.org.
PHOENIX-1941 Phoenix tests are failing in linux env with missing class: StaticMapping (Alicia Ying Shu)
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/db7b5753
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/db7b5753
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/db7b5753
Branch: refs/heads/calcite
Commit: db7b5753bfecaefd4fb32e7e9b9b5223787d4c62
Parents: 03a6ac0
Author: Nick Dimiduk <nd...@apache.org>
Authored: Wed Jun 17 12:17:33 2015 -0700
Committer: Nick Dimiduk <nd...@apache.org>
Committed: Wed Jun 17 12:17:33 2015 -0700
----------------------------------------------------------------------
.../phoenix/end2end/End2EndTestDriver.java | 19 +++++++++++++++----
1 file changed, 15 insertions(+), 4 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/db7b5753/phoenix-core/src/it/java/org/apache/phoenix/end2end/End2EndTestDriver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/End2EndTestDriver.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/End2EndTestDriver.java
index 26d18cf..743f729 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/End2EndTestDriver.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/End2EndTestDriver.java
@@ -21,6 +21,7 @@ package org.apache.phoenix.end2end;
import java.io.IOException;
import java.io.PrintStream;
+import java.lang.annotation.Annotation;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
@@ -79,10 +80,20 @@ public class End2EndTestDriver extends AbstractHBaseTool {
@Override
public boolean isCandidateClass(Class<?> c) {
- return testFilterRe.matcher(c.getName()).find() &&
- // Our pattern will match the below NON-IntegrationTest. Rather than
- // do exotic regex, just filter it out here
- super.isCandidateClass(c);
+ Annotation[] annotations = c.getAnnotations();
+ for (Annotation curAnnotation : annotations) {
+ if (curAnnotation.toString().contains("NeedsOwnMiniClusterTest")) {
+ /* Skip tests that aren't designed to run against a live cluster.
+ * For a live cluster, we cannot bring it up and down as required
+ * for these tests to run.
+ */
+ return false;
+ }
+ }
+ return testFilterRe.matcher(c.getName()).find() &&
+ // Our pattern will match the below NON-IntegrationTest. Rather than
+ // do exotic regex, just filter it out here
+ super.isCandidateClass(c);
}
}
[35/47] phoenix git commit: PHOENIX-1659
PhoenixDatabaseMetaData.getColumns does not return REMARKS column
Posted by ma...@apache.org.
PHOENIX-1659 PhoenixDatabaseMetaData.getColumns does not return REMARKS column
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/d02b3610
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/d02b3610
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/d02b3610
Branch: refs/heads/calcite
Commit: d02b3610991616ab3920d40d719c9000601d8722
Parents: 38ae6b7
Author: Josh Mahonin <jm...@interset.com>
Authored: Thu Jun 25 15:52:37 2015 -0400
Committer: Josh Mahonin <jm...@interset.com>
Committed: Mon Jun 29 12:27:09 2015 -0400
----------------------------------------------------------------------
.../end2end/QueryDatabaseMetaDataIT.java | 31 ++++++++++++++++++++
.../phoenix/jdbc/PhoenixDatabaseMetaData.java | 1 +
2 files changed, 32 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/d02b3610/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryDatabaseMetaDataIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryDatabaseMetaDataIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryDatabaseMetaDataIT.java
index 61459a5..2fdccf6 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryDatabaseMetaDataIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryDatabaseMetaDataIT.java
@@ -1118,4 +1118,35 @@ public class QueryDatabaseMetaDataIT extends BaseClientManagedTimeIT {
assertFalse(rs.next());
}
+ @Test
+ public void testRemarkColumn() throws SQLException {
+ long ts = nextTimestamp();
+ Properties props = new Properties();
+ props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 5));
+ Connection conn = DriverManager.getConnection(getUrl(), props);
+
+ // Retrieve the database metadata
+ DatabaseMetaData dbmd = conn.getMetaData();
+ ResultSet rs = dbmd.getColumns(null, null, null, null);
+ rs.next();
+
+ // Lookup column by name, this should return null but not throw an exception
+ String remarks = rs.getString("REMARKS");
+ assertNull(remarks);
+
+ // Same as above, but lookup by position
+ remarks = rs.getString(12);
+ assertNull(remarks);
+
+ // Iterate through metadata columns to find 'COLUMN_NAME' == 'REMARKS'
+ boolean foundRemarksColumn = false;
+ while(rs.next()) {
+ String colName = rs.getString("COLUMN_NAME");
+ if(PhoenixDatabaseMetaData.REMARKS.equals(colName)) {
+ foundRemarksColumn = true;
+ break;
+ }
+ }
+ assertTrue("Could not find REMARKS column", foundRemarksColumn);
+ }
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/d02b3610/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
index 2dd8af4..314af2e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
@@ -439,6 +439,7 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData, org.apache.pho
DECIMAL_DIGITS + "," +
NUM_PREC_RADIX + "," +
NULLABLE + "," +
+ REMARKS + "," +
COLUMN_DEF + "," +
SQL_DATA_TYPE + "," +
SQL_DATETIME_SUB + "," +
[23/47] phoenix git commit: PHOENIX-2068 UserDefinedFunctionsIT is
failing in windows with InvocationTargetException(Rajeshbabu)
Posted by ma...@apache.org.
PHOENIX-2068 UserDefinedFunctionsIT is failing in windows with InvocationTargetException(Rajeshbabu)
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/50f3a041
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/50f3a041
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/50f3a041
Branch: refs/heads/calcite
Commit: 50f3a04126c4fea59dc9eb978cef1399892d9a4a
Parents: b58a62a
Author: Rajeshbabu Chintaguntla <ra...@apache.org>
Authored: Thu Jun 25 00:44:25 2015 +0530
Committer: Rajeshbabu Chintaguntla <ra...@apache.org>
Committed: Thu Jun 25 00:44:25 2015 +0530
----------------------------------------------------------------------
.../phoenix/end2end/UserDefinedFunctionsIT.java | 22 ++++++++++++++++----
1 file changed, 18 insertions(+), 4 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/50f3a041/phoenix-core/src/it/java/org/apache/phoenix/end2end/UserDefinedFunctionsIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UserDefinedFunctionsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UserDefinedFunctionsIT.java
index c6bd62f..cee1c85 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UserDefinedFunctionsIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UserDefinedFunctionsIT.java
@@ -36,8 +36,10 @@ import java.io.OutputStream;
import java.sql.Connection;
import java.sql.ResultSet;
import java.sql.Statement;
+import java.util.HashSet;
import java.util.Map;
import java.util.Properties;
+import java.util.Set;
import java.util.jar.Attributes;
import java.util.jar.JarEntry;
import java.util.jar.JarOutputStream;
@@ -614,10 +616,22 @@ public class UserDefinedFunctionsIT extends BaseOwnClusterIT{
manifest.getMainAttributes().put(Attributes.Name.MANIFEST_VERSION, "1.0");
FileOutputStream jarFos = new FileOutputStream(jarPath);
JarOutputStream jarOutputStream = new JarOutputStream(jarFos, manifest);
- String pathToAdd =packageName.replace('.', File.separatorChar)
- + File.separator;
- jarOutputStream.putNextEntry(new JarEntry(pathToAdd));
- jarOutputStream.closeEntry();
+ String pathToAdd = packageName.replace('.', '/') + '/';
+ String jarPathStr = new String(pathToAdd);
+ Set<String> pathsInJar = new HashSet<String>();
+
+ while (pathsInJar.add(jarPathStr)) {
+ int ix = jarPathStr.lastIndexOf('/', jarPathStr.length() - 2);
+ if (ix < 0) {
+ break;
+ }
+ jarPathStr = jarPathStr.substring(0, ix);
+ }
+ for (String pathInJar : pathsInJar) {
+ jarOutputStream.putNextEntry(new JarEntry(pathInJar));
+ jarOutputStream.closeEntry();
+ }
+
jarOutputStream.putNextEntry(new JarEntry(pathToAdd + classFile.getName()));
byte[] allBytes = new byte[(int) classFile.length()];
FileInputStream fis = new FileInputStream(classFile);
[11/47] phoenix git commit: PHOENIX-2014 WHERE search condition
ignored when also using row value constructor in view
Posted by ma...@apache.org.
PHOENIX-2014 WHERE search condition ignored when also using row value constructor in view
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/14d11b13
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/14d11b13
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/14d11b13
Branch: refs/heads/calcite
Commit: 14d11b130ca0b3726e7724a1f4a9770bc1cb2453
Parents: 8060048
Author: James Taylor <ja...@apache.org>
Authored: Wed Jun 17 16:58:51 2015 -0700
Committer: James Taylor <ja...@apache.org>
Committed: Wed Jun 17 16:58:51 2015 -0700
----------------------------------------------------------------------
.../phoenix/end2end/RowValueConstructorIT.java | 28 ++++++++++++++++++++
.../apache/phoenix/compile/WhereOptimizer.java | 25 ++++++++++-------
.../phoenix/compile/WhereOptimizerTest.java | 20 ++++++++++++++
3 files changed, 64 insertions(+), 9 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/14d11b13/phoenix-core/src/it/java/org/apache/phoenix/end2end/RowValueConstructorIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/RowValueConstructorIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/RowValueConstructorIT.java
index 3859785..e227eb0 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/RowValueConstructorIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/RowValueConstructorIT.java
@@ -1395,4 +1395,32 @@ public class RowValueConstructorIT extends BaseClientManagedTimeIT {
assertEquals(1, numRecords);
}
+ @Test
+ public void testRVCInView() throws Exception {
+ Connection conn = nextConnection(getUrl());
+ conn.createStatement().execute("CREATE TABLE TEST_TABLE.TEST1 (\n" +
+ "PK1 CHAR(3) NOT NULL, \n" +
+ "PK2 CHAR(3) NOT NULL,\n" +
+ "DATA1 CHAR(10)\n" +
+ "CONSTRAINT PK PRIMARY KEY (PK1, PK2))");
+ conn.close();
+ conn = nextConnection(getUrl());
+ conn.createStatement().execute("CREATE VIEW TEST_TABLE.FOO AS SELECT * FROM TEST_TABLE.TEST1 WHERE PK1 = 'FOO'");
+ conn.close();
+ conn = nextConnection(getUrl());
+ conn.createStatement().execute("UPSERT INTO TEST_TABLE.TEST1 VALUES('FOO','001','SOMEDATA')");
+ conn.createStatement().execute("UPSERT INTO TEST_TABLE.TEST1 VALUES('FOO','002','SOMEDATA')");
+ conn.createStatement().execute("UPSERT INTO TEST_TABLE.TEST1 VALUES('FOO','003','SOMEDATA')");
+ conn.createStatement().execute("UPSERT INTO TEST_TABLE.TEST1 VALUES('FOO','004','SOMEDATA')");
+ conn.createStatement().execute("UPSERT INTO TEST_TABLE.TEST1 VALUES('FOO','005','SOMEDATA')");
+ conn.commit();
+ conn.close();
+
+ conn = nextConnection(getUrl());
+ ResultSet rs = conn.createStatement().executeQuery("SELECT * FROM TEST_TABLE.FOO WHERE PK2 < '004' AND (PK1,PK2) > ('FOO','002') LIMIT 2");
+ assertTrue(rs.next());
+ assertEquals("003", rs.getString("PK2"));
+ assertFalse(rs.next());
+ conn.close();
+ }
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/14d11b13/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereOptimizer.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereOptimizer.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereOptimizer.java
index a5aef02..b7f04e0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereOptimizer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereOptimizer.java
@@ -647,23 +647,30 @@ public class WhereOptimizer {
if (childSlot == EMPTY_KEY_SLOTS) {
return EMPTY_KEY_SLOTS;
}
- // FIXME: get rid of this min/max range BS now that a key range can span multiple columns
+ // FIXME: get rid of this special-cased min/max range now that a key range can span multiple columns
if (childSlot.getMinMaxRange() != null) { // Only set if in initial pk position
- // TODO: potentially use KeySlot.intersect here. However, we can't intersect the key ranges in the slot
- // with our minMaxRange, since it spans columns and this would mess up our skip scan.
+ // TODO: fix intersectSlots so that it works with RVCs. We'd just need to fill in the leading parts
+ // of the key with the minMaxRange and then intersect the key parts that overlap.
minMaxRange = minMaxRange.intersect(childSlot.getMinMaxRange());
for (KeySlot slot : childSlot) {
if (slot != null) {
- minMaxExtractNodes.addAll(slot.getKeyPart().getExtractNodes());
+ // We can only definitely extract the expression nodes that start from the
+ // leading PK column. They may get extracted at the end if we end up having
+ // expressions matching the leading PK columns, but otherwise we'll be forced
+ // to execute the expression in a filter.
+ if (slot.getPKPosition() == initPosition) {
+ minMaxExtractNodes.addAll(slot.getKeyPart().getExtractNodes());
+ } else {
+ if (!intersectSlots(keySlot, slot)) {
+ return EMPTY_KEY_SLOTS;
+ }
+ }
}
}
} else {
for (KeySlot slot : childSlot) {
- // We have a nested AND with nothing for this slot, so continue
- if (slot == null) {
- continue;
- }
- if (!intersectSlots(keySlot, slot)) {
+ // The slot will be null if we have no condition for this slot
+ if (slot != null && !intersectSlots(keySlot, slot)) {
return EMPTY_KEY_SLOTS;
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/14d11b13/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereOptimizerTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereOptimizerTest.java b/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereOptimizerTest.java
index f40afc3..adbd9a2 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereOptimizerTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereOptimizerTest.java
@@ -1780,6 +1780,26 @@ public class WhereOptimizerTest extends BaseConnectionlessQueryTest {
PChar.INSTANCE.toBytes(entityId2), 15)), k2.getLowerRange());
}
+
+ @Test
+ public void testRVCInView() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ conn.createStatement().execute("CREATE TABLE TEST_TABLE.TEST1 (\n" +
+ "PK1 CHAR(3) NOT NULL, \n" +
+ "PK2 CHAR(3) NOT NULL,\n" +
+ "DATA1 CHAR(10)\n" +
+ "CONSTRAINT PK PRIMARY KEY (PK1, PK2))");
+ conn.createStatement().execute("CREATE VIEW TEST_TABLE.FOO AS SELECT * FROM TEST_TABLE.TEST1 WHERE PK1 = 'FOO'");
+ String query = "SELECT * FROM TEST_TABLE.FOO WHERE PK2 < '004' AND (PK1,PK2) > ('FOO','002') LIMIT 2";
+ Scan scan = compileStatement(query, Collections.emptyList(), 2).getScan();
+ byte[] startRow = ByteUtil.nextKey(ByteUtil.concat(PChar.INSTANCE.toBytes("FOO"),
+ PVarchar.INSTANCE.toBytes("002")));
+ assertArrayEquals(startRow, scan.getStartRow());
+ byte[] stopRow = ByteUtil.concat(PChar.INSTANCE.toBytes("FOO"),
+ PChar.INSTANCE.toBytes("004"));
+ assertArrayEquals(stopRow, scan.getStopRow());
+ }
+
private static StatementContext compileStatementTenantSpecific(String tenantId, String query, List<Object> binds) throws Exception {
PhoenixConnection pconn = getTenantSpecificConnection("tenantId").unwrap(PhoenixConnection.class);
PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query);
[05/47] phoenix git commit: minor changes based on jesses feedback
Posted by ma...@apache.org.
minor changes based on jesses feedback
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/d1f7dede
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/d1f7dede
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/d1f7dede
Branch: refs/heads/calcite
Commit: d1f7dedeccbb0befce071cb87efd38290271039a
Parents: a4aa780
Author: Prashant Kommireddi <pk...@pkommireddi-ltm.internal.salesforce.com>
Authored: Mon Jun 15 16:18:47 2015 -0700
Committer: Eli Levine <el...@apache.org>
Committed: Mon Jun 15 18:17:45 2015 -0700
----------------------------------------------------------------------
.../src/main/java/org/apache/phoenix/pig/util/TypeUtil.java | 7 +++----
1 file changed, 3 insertions(+), 4 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/d1f7dede/phoenix-pig/src/main/java/org/apache/phoenix/pig/util/TypeUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-pig/src/main/java/org/apache/phoenix/pig/util/TypeUtil.java b/phoenix-pig/src/main/java/org/apache/phoenix/pig/util/TypeUtil.java
index 6e32fb5..5820ec6 100644
--- a/phoenix-pig/src/main/java/org/apache/phoenix/pig/util/TypeUtil.java
+++ b/phoenix-pig/src/main/java/org/apache/phoenix/pig/util/TypeUtil.java
@@ -76,9 +76,7 @@ public final class TypeUtil {
private TypeUtil() {}
/**
- * A map of Phoenix to Pig data types.
- *
- * @return
+ * @return map of Phoenix to Pig data types.
*/
private static ImmutableMap<PDataType, Byte> init() {
final ImmutableMap.Builder<PDataType, Byte> builder = new Builder<PDataType, Byte>();
@@ -160,7 +158,8 @@ public final class TypeUtil {
/**
* This method encodes a value with Phoenix data type. It begins with checking whether an object is BINARY and makes
- * a call to {@link #castBytes(Object, PDataType)} to convery bytes to targetPhoenixType
+ * a call to {@link #castBytes(Object, PDataType)} to convert bytes to targetPhoenixType. It returns a {@link RuntimeException}
+ * when object can not be coerced.
*
* @param o
* @param targetPhoenixType
[44/47] phoenix git commit: PHOENIX-2075 MR integration uses single
mapper unless table is salted
Posted by ma...@apache.org.
PHOENIX-2075 MR integration uses single mapper unless table is salted
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/6a07d45a
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/6a07d45a
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/6a07d45a
Branch: refs/heads/calcite
Commit: 6a07d45a76d6c07666b777676627ebc313d0d7b5
Parents: d2392be
Author: Thomas D'Silva <td...@salesforce.com>
Authored: Fri Jun 26 14:54:31 2015 -0700
Committer: Thomas D'Silva <td...@salesforce.com>
Committed: Tue Jun 30 22:13:59 2015 -0700
----------------------------------------------------------------------
.../org/apache/phoenix/compile/QueryPlan.java | 3 +
.../apache/phoenix/compile/TraceQueryPlan.java | 9 ++-
.../apache/phoenix/execute/AggregatePlan.java | 3 +-
.../apache/phoenix/execute/BaseQueryPlan.java | 14 +++--
.../phoenix/execute/ClientAggregatePlan.java | 5 +-
.../phoenix/execute/ClientProcessingPlan.java | 9 +++
.../apache/phoenix/execute/ClientScanPlan.java | 5 +-
.../phoenix/execute/DegenerateQueryPlan.java | 3 +-
.../apache/phoenix/execute/HashJoinPlan.java | 11 +++-
.../org/apache/phoenix/execute/ScanPlan.java | 7 ++-
.../phoenix/execute/SortMergeJoinPlan.java | 13 +++-
.../phoenix/execute/TupleProjectionPlan.java | 11 +++-
.../org/apache/phoenix/execute/UnionPlan.java | 6 ++
.../phoenix/iterate/BaseResultIterators.java | 24 ++------
.../iterate/DefaultParallelScanGrouper.java | 62 ++++++++++++++++++++
.../iterate/MapReduceParallelScanGrouper.java | 45 ++++++++++++++
.../phoenix/iterate/ParallelIterators.java | 9 ++-
.../phoenix/iterate/ParallelScanGrouper.java | 41 +++++++++++++
.../apache/phoenix/iterate/SerialIterators.java | 4 +-
.../apache/phoenix/jdbc/PhoenixStatement.java | 7 +++
.../phoenix/mapreduce/PhoenixInputFormat.java | 6 +-
.../phoenix/mapreduce/PhoenixInputSplit.java | 1 +
.../query/ParallelIteratorsSplitTest.java | 6 ++
23 files changed, 259 insertions(+), 45 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/6a07d45a/phoenix-core/src/main/java/org/apache/phoenix/compile/QueryPlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/QueryPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/QueryPlan.java
index d0c63fa..1c0c469 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/QueryPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/QueryPlan.java
@@ -24,6 +24,7 @@ import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.phoenix.compile.GroupByCompiler.GroupBy;
import org.apache.phoenix.compile.OrderByCompiler.OrderBy;
+import org.apache.phoenix.iterate.ParallelScanGrouper;
import org.apache.phoenix.iterate.ResultIterator;
import org.apache.phoenix.parse.FilterableStatement;
import org.apache.phoenix.query.KeyRange;
@@ -46,6 +47,8 @@ public interface QueryPlan extends StatementPlan {
*/
public ResultIterator iterator() throws SQLException;
+ public ResultIterator iterator(ParallelScanGrouper scanGrouper) throws SQLException;
+
public long getEstimatedSize();
// TODO: change once joins are supported
http://git-wip-us.apache.org/repos/asf/phoenix/blob/6a07d45a/phoenix-core/src/main/java/org/apache/phoenix/compile/TraceQueryPlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/TraceQueryPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/TraceQueryPlan.java
index 11377de..93a2da0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/TraceQueryPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/TraceQueryPlan.java
@@ -38,6 +38,8 @@ import org.apache.phoenix.expression.Determinism;
import org.apache.phoenix.expression.Expression;
import org.apache.phoenix.expression.LiteralExpression;
import org.apache.phoenix.expression.RowKeyColumnExpression;
+import org.apache.phoenix.iterate.DefaultParallelScanGrouper;
+import org.apache.phoenix.iterate.ParallelScanGrouper;
import org.apache.phoenix.iterate.ResultIterator;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.jdbc.PhoenixParameterMetaData;
@@ -105,9 +107,14 @@ public class TraceQueryPlan implements QueryPlan {
public ExplainPlan getExplainPlan() throws SQLException {
return ExplainPlan.EMPTY_PLAN;
}
-
+
@Override
public ResultIterator iterator() throws SQLException {
+ return iterator(DefaultParallelScanGrouper.getInstance());
+ }
+
+ @Override
+ public ResultIterator iterator(ParallelScanGrouper scanGrouper) throws SQLException {
final PhoenixConnection conn = stmt.getConnection();
if (conn.getTraceScope() == null && !traceStatement.isTraceOn()) {
return ResultIterator.EMPTY_ITERATOR;
http://git-wip-us.apache.org/repos/asf/phoenix/blob/6a07d45a/phoenix-core/src/main/java/org/apache/phoenix/execute/AggregatePlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/AggregatePlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/AggregatePlan.java
index 00e843d..67222d3 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/AggregatePlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/AggregatePlan.java
@@ -45,6 +45,7 @@ import org.apache.phoenix.iterate.OrderedAggregatingResultIterator;
import org.apache.phoenix.iterate.OrderedResultIterator;
import org.apache.phoenix.iterate.ParallelIteratorFactory;
import org.apache.phoenix.iterate.ParallelIterators;
+import org.apache.phoenix.iterate.ParallelScanGrouper;
import org.apache.phoenix.iterate.PeekingResultIterator;
import org.apache.phoenix.iterate.ResultIterator;
import org.apache.phoenix.iterate.SequenceResultIterator;
@@ -141,7 +142,7 @@ public class AggregatePlan extends BaseQueryPlan {
}
@Override
- protected ResultIterator newIterator() throws SQLException {
+ protected ResultIterator newIterator(ParallelScanGrouper scanGrouper) throws SQLException {
if (groupBy.isEmpty()) {
UngroupedAggregateRegionObserver.serializeIntoScan(context.getScan());
} else {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/6a07d45a/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
index 8b6de1d..37b73c0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
@@ -46,8 +46,10 @@ import org.apache.phoenix.compile.StatementContext;
import org.apache.phoenix.coprocessor.BaseScannerRegionObserver;
import org.apache.phoenix.expression.ProjectedColumnExpression;
import org.apache.phoenix.index.IndexMaintainer;
+import org.apache.phoenix.iterate.DefaultParallelScanGrouper;
import org.apache.phoenix.iterate.DelegateResultIterator;
import org.apache.phoenix.iterate.ParallelIteratorFactory;
+import org.apache.phoenix.iterate.ParallelScanGrouper;
import org.apache.phoenix.iterate.ResultIterator;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.parse.FilterableStatement;
@@ -155,11 +157,15 @@ public abstract class BaseQueryPlan implements QueryPlan {
// }
@Override
+ public final ResultIterator iterator(ParallelScanGrouper scanGrouper) throws SQLException {
+ return iterator(Collections.<SQLCloseable>emptyList(), scanGrouper);
+ }
+
public final ResultIterator iterator() throws SQLException {
- return iterator(Collections.<SQLCloseable>emptyList());
+ return iterator(Collections.<SQLCloseable>emptyList(), DefaultParallelScanGrouper.getInstance());
}
- public final ResultIterator iterator(final List<? extends SQLCloseable> dependencies) throws SQLException {
+ public final ResultIterator iterator(final List<? extends SQLCloseable> dependencies, ParallelScanGrouper scanGrouper) throws SQLException {
if (context.getScanRanges() == ScanRanges.NOTHING) {
return ResultIterator.EMPTY_ITERATOR;
}
@@ -235,7 +241,7 @@ public abstract class BaseQueryPlan implements QueryPlan {
LOG.debug(LogUtil.addCustomAnnotations("Scan ready for iteration: " + scan, connection));
}
- ResultIterator iterator = newIterator();
+ ResultIterator iterator = newIterator(scanGrouper);
iterator = dependencies.isEmpty() ?
iterator : new DelegateResultIterator(iterator) {
@Override
@@ -361,7 +367,7 @@ public abstract class BaseQueryPlan implements QueryPlan {
}
}
- abstract protected ResultIterator newIterator() throws SQLException;
+ abstract protected ResultIterator newIterator(ParallelScanGrouper scanGrouper) throws SQLException;
@Override
public long getEstimatedSize() {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/6a07d45a/phoenix-core/src/main/java/org/apache/phoenix/execute/ClientAggregatePlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/ClientAggregatePlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/ClientAggregatePlan.java
index 30adbe9..3df0447 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/ClientAggregatePlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/ClientAggregatePlan.java
@@ -48,6 +48,7 @@ import org.apache.phoenix.iterate.LimitingResultIterator;
import org.apache.phoenix.iterate.LookAheadResultIterator;
import org.apache.phoenix.iterate.OrderedAggregatingResultIterator;
import org.apache.phoenix.iterate.OrderedResultIterator;
+import org.apache.phoenix.iterate.ParallelScanGrouper;
import org.apache.phoenix.iterate.PeekingResultIterator;
import org.apache.phoenix.iterate.ResultIterator;
import org.apache.phoenix.iterate.SequenceResultIterator;
@@ -80,8 +81,8 @@ public class ClientAggregatePlan extends ClientProcessingPlan {
}
@Override
- public ResultIterator iterator() throws SQLException {
- ResultIterator iterator = delegate.iterator();
+ public ResultIterator iterator(ParallelScanGrouper scanGrouper) throws SQLException {
+ ResultIterator iterator = delegate.iterator(scanGrouper);
if (where != null) {
iterator = new FilterResultIterator(iterator, where);
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/6a07d45a/phoenix-core/src/main/java/org/apache/phoenix/execute/ClientProcessingPlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/ClientProcessingPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/ClientProcessingPlan.java
index 8e787b4..b189933 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/ClientProcessingPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/ClientProcessingPlan.java
@@ -17,11 +17,15 @@
*/
package org.apache.phoenix.execute;
+import java.sql.SQLException;
+
import org.apache.phoenix.compile.QueryPlan;
import org.apache.phoenix.compile.RowProjector;
import org.apache.phoenix.compile.StatementContext;
import org.apache.phoenix.compile.OrderByCompiler.OrderBy;
import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.iterate.DefaultParallelScanGrouper;
+import org.apache.phoenix.iterate.ResultIterator;
import org.apache.phoenix.parse.FilterableStatement;
import org.apache.phoenix.schema.TableRef;
@@ -79,4 +83,9 @@ public abstract class ClientProcessingPlan extends DelegateQueryPlan {
public FilterableStatement getStatement() {
return statement;
}
+
+ @Override
+ public ResultIterator iterator() throws SQLException {
+ return iterator(DefaultParallelScanGrouper.getInstance());
+ }
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/6a07d45a/phoenix-core/src/main/java/org/apache/phoenix/execute/ClientScanPlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/ClientScanPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/ClientScanPlan.java
index 01fbd11..4bf1889 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/ClientScanPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/ClientScanPlan.java
@@ -29,6 +29,7 @@ import org.apache.phoenix.expression.Expression;
import org.apache.phoenix.iterate.FilterResultIterator;
import org.apache.phoenix.iterate.LimitingResultIterator;
import org.apache.phoenix.iterate.OrderedResultIterator;
+import org.apache.phoenix.iterate.ParallelScanGrouper;
import org.apache.phoenix.iterate.ResultIterator;
import org.apache.phoenix.iterate.SequenceResultIterator;
import org.apache.phoenix.parse.FilterableStatement;
@@ -49,8 +50,8 @@ public class ClientScanPlan extends ClientProcessingPlan {
}
@Override
- public ResultIterator iterator() throws SQLException {
- ResultIterator iterator = delegate.iterator();
+ public ResultIterator iterator(ParallelScanGrouper scanGrouper) throws SQLException {
+ ResultIterator iterator = delegate.iterator(scanGrouper);
if (where != null) {
iterator = new FilterResultIterator(iterator, where);
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/6a07d45a/phoenix-core/src/main/java/org/apache/phoenix/execute/DegenerateQueryPlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/DegenerateQueryPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/DegenerateQueryPlan.java
index fda53ea..98eb2dd 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/DegenerateQueryPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/DegenerateQueryPlan.java
@@ -27,6 +27,7 @@ import org.apache.phoenix.compile.OrderByCompiler.OrderBy;
import org.apache.phoenix.compile.RowProjector;
import org.apache.phoenix.compile.ScanRanges;
import org.apache.phoenix.compile.StatementContext;
+import org.apache.phoenix.iterate.ParallelScanGrouper;
import org.apache.phoenix.iterate.ResultIterator;
import org.apache.phoenix.jdbc.PhoenixParameterMetaData;
import org.apache.phoenix.parse.FilterableStatement;
@@ -51,7 +52,7 @@ public class DegenerateQueryPlan extends BaseQueryPlan {
}
@Override
- protected ResultIterator newIterator() throws SQLException {
+ protected ResultIterator newIterator(ParallelScanGrouper scanGrouper) throws SQLException {
return null;
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/6a07d45a/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java
index 57fa25a..05ef1ec 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java
@@ -49,7 +49,9 @@ import org.apache.phoenix.expression.Expression;
import org.apache.phoenix.expression.InListExpression;
import org.apache.phoenix.expression.LiteralExpression;
import org.apache.phoenix.expression.RowValueConstructorExpression;
+import org.apache.phoenix.iterate.DefaultParallelScanGrouper;
import org.apache.phoenix.iterate.FilterResultIterator;
+import org.apache.phoenix.iterate.ParallelScanGrouper;
import org.apache.phoenix.iterate.ResultIterator;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.job.JobManager.JobCallable;
@@ -113,9 +115,14 @@ public class HashJoinPlan extends DelegateQueryPlan {
this.subPlans = subPlans;
this.recompileWhereClause = recompileWhereClause;
}
-
+
@Override
public ResultIterator iterator() throws SQLException {
+ return iterator(DefaultParallelScanGrouper.getInstance());
+ }
+
+ @Override
+ public ResultIterator iterator(ParallelScanGrouper scanGrouper) throws SQLException {
int count = subPlans.length;
PhoenixConnection connection = getContext().getConnection();
ConnectionQueryServices services = connection.getQueryServices();
@@ -191,7 +198,7 @@ public class HashJoinPlan extends DelegateQueryPlan {
HashJoinInfo.serializeHashJoinIntoScan(scan, joinInfo);
}
- ResultIterator iterator = joinInfo == null ? delegate.iterator() : ((BaseQueryPlan) delegate).iterator(dependencies);
+ ResultIterator iterator = joinInfo == null ? delegate.iterator(scanGrouper) : ((BaseQueryPlan) delegate).iterator(dependencies, scanGrouper);
if (statement.getInnerSelectStatement() != null && postFilter != null) {
iterator = new FilterResultIterator(iterator, postFilter);
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/6a07d45a/phoenix-core/src/main/java/org/apache/phoenix/execute/ScanPlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/ScanPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/ScanPlan.java
index 884d835..b9dd2f2 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/ScanPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/ScanPlan.java
@@ -38,6 +38,7 @@ import org.apache.phoenix.iterate.MergeSortRowKeyResultIterator;
import org.apache.phoenix.iterate.MergeSortTopNResultIterator;
import org.apache.phoenix.iterate.ParallelIteratorFactory;
import org.apache.phoenix.iterate.ParallelIterators;
+import org.apache.phoenix.iterate.ParallelScanGrouper;
import org.apache.phoenix.iterate.ResultIterator;
import org.apache.phoenix.iterate.ResultIterators;
import org.apache.phoenix.iterate.RoundRobinResultIterator;
@@ -161,7 +162,7 @@ public class ScanPlan extends BaseQueryPlan {
}
@Override
- protected ResultIterator newIterator() throws SQLException {
+ protected ResultIterator newIterator(ParallelScanGrouper scanGrouper) throws SQLException {
// Set any scan attributes before creating the scanner, as it will be too late afterwards
Scan scan = context.getScan();
scan.setAttribute(BaseScannerRegionObserver.NON_AGGREGATE_QUERY, QueryConstants.TRUE);
@@ -177,9 +178,9 @@ public class ScanPlan extends BaseQueryPlan {
Integer perScanLimit = !allowPageFilter || isOrdered ? null : limit;
ResultIterators iterators;
if (isSerial) {
- iterators = new SerialIterators(this, perScanLimit, parallelIteratorFactory);
+ iterators = new SerialIterators(this, perScanLimit, parallelIteratorFactory, scanGrouper);
} else {
- iterators = new ParallelIterators(this, perScanLimit, parallelIteratorFactory);
+ iterators = new ParallelIterators(this, perScanLimit, parallelIteratorFactory, scanGrouper);
}
splits = iterators.getSplits();
scans = iterators.getScans();
http://git-wip-us.apache.org/repos/asf/phoenix/blob/6a07d45a/phoenix-core/src/main/java/org/apache/phoenix/execute/SortMergeJoinPlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/SortMergeJoinPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/SortMergeJoinPlan.java
index 46ade33..1bbda07 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/SortMergeJoinPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/SortMergeJoinPlan.java
@@ -44,7 +44,9 @@ import org.apache.phoenix.exception.SQLExceptionCode;
import org.apache.phoenix.exception.SQLExceptionInfo;
import org.apache.phoenix.execute.TupleProjector.ProjectedValueTuple;
import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.iterate.DefaultParallelScanGrouper;
import org.apache.phoenix.iterate.MappedByteBufferQueue;
+import org.apache.phoenix.iterate.ParallelScanGrouper;
import org.apache.phoenix.iterate.ResultIterator;
import org.apache.phoenix.jdbc.PhoenixParameterMetaData;
import org.apache.phoenix.parse.FilterableStatement;
@@ -114,10 +116,15 @@ public class SortMergeJoinPlan implements QueryPlan {
}
@Override
- public ResultIterator iterator() throws SQLException {
+ public ResultIterator iterator(ParallelScanGrouper scanGrouper) throws SQLException {
return type == JoinType.Semi || type == JoinType.Anti ?
- new SemiAntiJoinIterator(lhsPlan.iterator(), rhsPlan.iterator()) :
- new BasicJoinIterator(lhsPlan.iterator(), rhsPlan.iterator());
+ new SemiAntiJoinIterator(lhsPlan.iterator(scanGrouper), rhsPlan.iterator(scanGrouper)) :
+ new BasicJoinIterator(lhsPlan.iterator(scanGrouper), rhsPlan.iterator(scanGrouper));
+ }
+
+ @Override
+ public ResultIterator iterator() throws SQLException {
+ return iterator(DefaultParallelScanGrouper.getInstance());
}
@Override
http://git-wip-us.apache.org/repos/asf/phoenix/blob/6a07d45a/phoenix-core/src/main/java/org/apache/phoenix/execute/TupleProjectionPlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/TupleProjectionPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/TupleProjectionPlan.java
index c9cbd15..e8d9af0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/TupleProjectionPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/TupleProjectionPlan.java
@@ -23,8 +23,10 @@ import java.util.List;
import org.apache.phoenix.compile.ExplainPlan;
import org.apache.phoenix.compile.QueryPlan;
import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.iterate.DefaultParallelScanGrouper;
import org.apache.phoenix.iterate.DelegateResultIterator;
import org.apache.phoenix.iterate.FilterResultIterator;
+import org.apache.phoenix.iterate.ParallelScanGrouper;
import org.apache.phoenix.iterate.ResultIterator;
import org.apache.phoenix.schema.tuple.Tuple;
@@ -50,10 +52,15 @@ public class TupleProjectionPlan extends DelegateQueryPlan {
return new ExplainPlan(planSteps);
}
-
+
@Override
public ResultIterator iterator() throws SQLException {
- ResultIterator iterator = new DelegateResultIterator(delegate.iterator()) {
+ return iterator(DefaultParallelScanGrouper.getInstance());
+ }
+
+ @Override
+ public ResultIterator iterator(ParallelScanGrouper scanGrouper) throws SQLException {
+ ResultIterator iterator = new DelegateResultIterator(delegate.iterator(scanGrouper)) {
@Override
public Tuple next() throws SQLException {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/6a07d45a/phoenix-core/src/main/java/org/apache/phoenix/execute/UnionPlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/UnionPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/UnionPlan.java
index 2bed3a0..53745fe 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/UnionPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/UnionPlan.java
@@ -34,6 +34,7 @@ import org.apache.phoenix.compile.StatementContext;
import org.apache.phoenix.iterate.ConcatResultIterator;
import org.apache.phoenix.iterate.LimitingResultIterator;
import org.apache.phoenix.iterate.MergeSortTopNResultIterator;
+import org.apache.phoenix.iterate.ParallelScanGrouper;
import org.apache.phoenix.iterate.ResultIterator;
import org.apache.phoenix.iterate.UnionResultIterators;
import org.apache.phoenix.parse.FilterableStatement;
@@ -123,6 +124,11 @@ public class UnionPlan implements QueryPlan {
}
@Override
+ public final ResultIterator iterator(ParallelScanGrouper scanGrouper) throws SQLException {
+ return iterator(Collections.<SQLCloseable>emptyList());
+ }
+
+ @Override
public final ResultIterator iterator() throws SQLException {
return iterator(Collections.<SQLCloseable>emptyList());
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/6a07d45a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
index 43731cb..cf66d93 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
@@ -103,6 +103,7 @@ public abstract class BaseResultIterators extends ExplainTable implements Result
private final byte[] physicalTableName;
private final QueryPlan plan;
protected final String scanId;
+ private final ParallelScanGrouper scanGrouper;
// TODO: too much nesting here - breakup into new classes.
private final List<List<List<Pair<Scan,Future<PeekingResultIterator>>>>> allFutures;
@@ -133,9 +134,10 @@ public abstract class BaseResultIterators extends ExplainTable implements Result
return true;
}
- public BaseResultIterators(QueryPlan plan, Integer perScanLimit) throws SQLException {
+ public BaseResultIterators(QueryPlan plan, Integer perScanLimit, ParallelScanGrouper scanGrouper) throws SQLException {
super(plan.getContext(), plan.getTableRef(), plan.getGroupBy(), plan.getOrderBy(), plan.getStatement().getHint(), plan.getLimit());
this.plan = plan;
+ this.scanGrouper = scanGrouper;
StatementContext context = plan.getContext();
TableRef tableRef = plan.getTableRef();
PTable table = tableRef.getTable();
@@ -371,24 +373,11 @@ public abstract class BaseResultIterators extends ExplainTable implements Result
}
private List<Scan> addNewScan(List<List<Scan>> parallelScans, List<Scan> scans, Scan scan, byte[] startKey, boolean crossedRegionBoundary) {
- PTable table = getTable();
- boolean startNewScanList = false;
- if (!plan.isRowKeyOrdered()) {
- startNewScanList = true;
- } else if (crossedRegionBoundary) {
- if (table.getIndexType() == IndexType.LOCAL) {
- startNewScanList = true;
- } else if (table.getBucketNum() != null) {
- startNewScanList = scans.isEmpty() ||
- ScanUtil.crossesPrefixBoundary(startKey,
- ScanUtil.getPrefix(scans.get(scans.size()-1).getStartRow(), SaltingUtil.NUM_SALTING_BYTES),
- SaltingUtil.NUM_SALTING_BYTES);
- }
- }
+ boolean startNewScan = scanGrouper.shouldStartNewScan(plan, scans, startKey, crossedRegionBoundary);
if (scan != null) {
- scans.add(scan);
+ scans.add(scan);
}
- if (startNewScanList && !scans.isEmpty()) {
+ if (startNewScan && !scans.isEmpty()) {
parallelScans.add(scans);
scans = Lists.newArrayListWithExpectedSize(1);
}
@@ -410,7 +399,6 @@ public abstract class BaseResultIterators extends ExplainTable implements Result
Scan scan = context.getScan();
List<HRegionLocation> regionLocations = context.getConnection().getQueryServices()
.getAllTableRegions(physicalTableName);
-
List<byte[]> regionBoundaries = toBoundaries(regionLocations);
ScanRanges scanRanges = context.getScanRanges();
PTable table = getTable();
http://git-wip-us.apache.org/repos/asf/phoenix/blob/6a07d45a/phoenix-core/src/main/java/org/apache/phoenix/iterate/DefaultParallelScanGrouper.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/DefaultParallelScanGrouper.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/DefaultParallelScanGrouper.java
new file mode 100644
index 0000000..5c7136f
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/DefaultParallelScanGrouper.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.iterate;
+
+import java.util.List;
+
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.phoenix.compile.QueryPlan;
+import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTable.IndexType;
+import org.apache.phoenix.schema.SaltingUtil;
+import org.apache.phoenix.util.ScanUtil;
+
+/**
+ * Default implementation that creates a scan group if a plan is row key ordered (which requires a merge sort),
+ * or if a scan crosses a region boundary and the table is salted or a local index.
+ */
+public class DefaultParallelScanGrouper implements ParallelScanGrouper {
+
+ private static final DefaultParallelScanGrouper INSTANCE = new DefaultParallelScanGrouper();
+
+ public static DefaultParallelScanGrouper getInstance() {
+ return INSTANCE;
+ }
+
+ private DefaultParallelScanGrouper() {}
+
+ @Override
+ public boolean shouldStartNewScan(QueryPlan plan, List<Scan> scans, byte[] startKey, boolean crossedRegionBoundary) {
+ PTable table = plan.getTableRef().getTable();
+ boolean startNewScanGroup = false;
+ if (!plan.isRowKeyOrdered()) {
+ startNewScanGroup = true;
+ } else if (crossedRegionBoundary) {
+ if (table.getIndexType() == IndexType.LOCAL) {
+ startNewScanGroup = true;
+ } else if (table.getBucketNum() != null) {
+ startNewScanGroup = scans.isEmpty() ||
+ ScanUtil.crossesPrefixBoundary(startKey,
+ ScanUtil.getPrefix(scans.get(scans.size()-1).getStartRow(), SaltingUtil.NUM_SALTING_BYTES),
+ SaltingUtil.NUM_SALTING_BYTES);
+ }
+ }
+ return startNewScanGroup;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/6a07d45a/phoenix-core/src/main/java/org/apache/phoenix/iterate/MapReduceParallelScanGrouper.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/MapReduceParallelScanGrouper.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/MapReduceParallelScanGrouper.java
new file mode 100644
index 0000000..bf2666d
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/MapReduceParallelScanGrouper.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.iterate;
+
+import java.util.List;
+
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.phoenix.compile.QueryPlan;
+
+/**
+ * Scan grouper that creates a scan group if a plan is row key ordered or if a
+ * scan crosses region boundaries
+ */
+public class MapReduceParallelScanGrouper implements ParallelScanGrouper {
+
+ private static final MapReduceParallelScanGrouper INSTANCE = new MapReduceParallelScanGrouper();
+
+ public static MapReduceParallelScanGrouper getInstance() {
+ return INSTANCE;
+ }
+
+ private MapReduceParallelScanGrouper() {}
+
+ @Override
+ public boolean shouldStartNewScan(QueryPlan plan, List<Scan> scans,
+ byte[] startKey, boolean crossedRegionBoundary) {
+ return !plan.isRowKeyOrdered() || crossedRegionBoundary;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/6a07d45a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java
index 2dfbfe3..87f8335 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java
@@ -54,11 +54,16 @@ public class ParallelIterators extends BaseResultIterators {
private static final String NAME = "PARALLEL";
private final ParallelIteratorFactory iteratorFactory;
- public ParallelIterators(QueryPlan plan, Integer perScanLimit, ParallelIteratorFactory iteratorFactory)
+ public ParallelIterators(QueryPlan plan, Integer perScanLimit, ParallelIteratorFactory iteratorFactory, ParallelScanGrouper scanGrouper)
throws SQLException {
- super(plan, perScanLimit);
+ super(plan, perScanLimit, scanGrouper);
this.iteratorFactory = iteratorFactory;
}
+
+ public ParallelIterators(QueryPlan plan, Integer perScanLimit, ParallelIteratorFactory iteratorFactory)
+ throws SQLException {
+ this(plan, perScanLimit, iteratorFactory, DefaultParallelScanGrouper.getInstance());
+ }
@Override
protected void submitWork(List<List<Scan>> nestedScans, List<List<Pair<Scan,Future<PeekingResultIterator>>>> nestedFutures,
http://git-wip-us.apache.org/repos/asf/phoenix/blob/6a07d45a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelScanGrouper.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelScanGrouper.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelScanGrouper.java
new file mode 100644
index 0000000..0becf4f
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelScanGrouper.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.iterate;
+
+import java.util.List;
+
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.phoenix.compile.QueryPlan;
+
+/**
+ * Interface for a parallel scan grouper
+ */
+public interface ParallelScanGrouper {
+
+ /**
+ * Determines whether to create a new group of parallel scans.
+ *
+ * @param scans current scan group
+ * @param plan current query plan
+ * @param startKey start key of scan
+ * @param crossedRegionBoundary whether we crossed a region boundary
+ * @return true if we should create a new group of scans
+ */
+ boolean shouldStartNewScan(QueryPlan plan, List<Scan> scans, byte[] startKey, boolean crossedRegionBoundary);
+
+}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/6a07d45a/phoenix-core/src/main/java/org/apache/phoenix/iterate/SerialIterators.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/SerialIterators.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/SerialIterators.java
index 516d73e..fa18c83 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/SerialIterators.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/SerialIterators.java
@@ -51,9 +51,9 @@ public class SerialIterators extends BaseResultIterators {
private static final String NAME = "SERIAL";
private final ParallelIteratorFactory iteratorFactory;
- public SerialIterators(QueryPlan plan, Integer perScanLimit, ParallelIteratorFactory iteratorFactory)
+ public SerialIterators(QueryPlan plan, Integer perScanLimit, ParallelIteratorFactory iteratorFactory, ParallelScanGrouper scanGrouper)
throws SQLException {
- super(plan, perScanLimit);
+ super(plan, perScanLimit, scanGrouper);
Preconditions.checkArgument(perScanLimit != null); // must be a limit specified
this.iteratorFactory = iteratorFactory;
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/6a07d45a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
index c6c5b0c..2bb3b92 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
@@ -71,6 +71,7 @@ import org.apache.phoenix.exception.SQLExceptionInfo;
import org.apache.phoenix.execute.MutationState;
import org.apache.phoenix.expression.RowKeyColumnExpression;
import org.apache.phoenix.iterate.MaterializedResultIterator;
+import org.apache.phoenix.iterate.ParallelScanGrouper;
import org.apache.phoenix.iterate.ResultIterator;
import org.apache.phoenix.parse.AddColumnStatement;
import org.apache.phoenix.parse.AliasedNode;
@@ -446,6 +447,11 @@ public class PhoenixStatement implements Statement, SQLCloseable, org.apache.pho
public ResultIterator iterator() throws SQLException {
return iterator;
}
+
+ @Override
+ public ResultIterator iterator(ParallelScanGrouper scanGrouper) throws SQLException {
+ return iterator;
+ }
@Override
public long getEstimatedSize() {
@@ -511,6 +517,7 @@ public class PhoenixStatement implements Statement, SQLCloseable, org.apache.pho
public boolean useRoundRobinIterator() throws SQLException {
return false;
}
+
};
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/6a07d45a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java
index 31759b4..8ee1634 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java
@@ -28,6 +28,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
@@ -36,6 +37,7 @@ import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.db.DBWritable;
import org.apache.phoenix.compile.QueryPlan;
+import org.apache.phoenix.iterate.MapReduceParallelScanGrouper;
import org.apache.phoenix.jdbc.PhoenixStatement;
import org.apache.phoenix.mapreduce.util.ConnectionUtil;
import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
@@ -112,10 +114,10 @@ public class PhoenixInputFormat<T extends DBWritable> extends InputFormat<NullWr
Preconditions.checkNotNull(selectStatement);
final Statement statement = connection.createStatement();
final PhoenixStatement pstmt = statement.unwrap(PhoenixStatement.class);
- // Optimize the query plan so that we potentially use secondary indexes
+ // Optimize the query plan so that we potentially use secondary indexes
final QueryPlan queryPlan = pstmt.optimizeQuery(selectStatement);
// Initialize the query plan so it sets up the parallel scans
- queryPlan.iterator();
+ queryPlan.iterator(MapReduceParallelScanGrouper.getInstance());
return queryPlan;
} catch (Exception exception) {
LOG.error(String.format("Failed to get the query plan with error [%s]",
http://git-wip-us.apache.org/repos/asf/phoenix/blob/6a07d45a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputSplit.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputSplit.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputSplit.java
index b222fc9..caee3cd 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputSplit.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputSplit.java
@@ -25,6 +25,7 @@ import java.util.List;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
+import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.mapreduce.InputSplit;
http://git-wip-us.apache.org/repos/asf/phoenix/blob/6a07d45a/phoenix-core/src/test/java/org/apache/phoenix/query/ParallelIteratorsSplitTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/ParallelIteratorsSplitTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/ParallelIteratorsSplitTest.java
index ecb088a..ad65373 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/query/ParallelIteratorsSplitTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/query/ParallelIteratorsSplitTest.java
@@ -44,6 +44,7 @@ import org.apache.phoenix.compile.SequenceManager;
import org.apache.phoenix.compile.StatementContext;
import org.apache.phoenix.filter.SkipScanFilter;
import org.apache.phoenix.iterate.ParallelIterators;
+import org.apache.phoenix.iterate.ParallelScanGrouper;
import org.apache.phoenix.iterate.ResultIterator;
import org.apache.phoenix.iterate.SpoolingResultIterator;
import org.apache.phoenix.jdbc.PhoenixConnection;
@@ -368,6 +369,11 @@ public class ParallelIteratorsSplitTest extends BaseConnectionlessQueryTest {
}
@Override
+ public ResultIterator iterator(ParallelScanGrouper scanGrouper) throws SQLException {
+ return ResultIterator.EMPTY_ITERATOR;
+ }
+
+ @Override
public ResultIterator iterator() throws SQLException {
return ResultIterator.EMPTY_ITERATOR;
}
[39/47] phoenix git commit: PHOENIX-2060 - Implement ARRAY_FILL built
in function (Dumindu Buddhika)
Posted by ma...@apache.org.
PHOENIX-2060 - Implement ARRAY_FILL built in function (Dumindu Buddhika)
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/c0ad8cf6
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/c0ad8cf6
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/c0ad8cf6
Branch: refs/heads/calcite
Commit: c0ad8cf6772b59e0ee24d1a4e8bc935d35a26a13
Parents: bc2aef8
Author: ramkrishna <ra...@gmail.com>
Authored: Tue Jun 30 22:26:53 2015 +0530
Committer: ramkrishna <ra...@gmail.com>
Committed: Tue Jun 30 22:26:53 2015 +0530
----------------------------------------------------------------------
.../main/java/org/apache/phoenix/expression/ExpressionType.java | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c0ad8cf6/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
index 51f4089..ef14e6a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
@@ -25,6 +25,7 @@ import org.apache.phoenix.expression.function.ArrayAnyComparisonExpression;
import org.apache.phoenix.expression.function.ArrayAppendFunction;
import org.apache.phoenix.expression.function.ArrayConcatFunction;
import org.apache.phoenix.expression.function.ArrayElemRefExpression;
+import org.apache.phoenix.expression.function.ArrayFillFunction;
import org.apache.phoenix.expression.function.ArrayIndexFunction;
import org.apache.phoenix.expression.function.ArrayLengthFunction;
import org.apache.phoenix.expression.function.ArrayPrependFunction;
@@ -247,7 +248,8 @@ public enum ExpressionType {
LogFunction(LogFunction.class),
ExpFunction(ExpFunction.class),
PowerFunction(PowerFunction.class),
- ArrayConcatFunction(ArrayConcatFunction.class)
+ ArrayConcatFunction(ArrayConcatFunction.class),
+ ArrayFillFunction(ArrayFillFunction.class)
;
ExpressionType(Class<? extends Expression> clazz) {
[22/47] phoenix git commit: PHOENIX-2066 Existing client fails
initialization due to upgrade atttempting to create column with no name
(Lukas Lalinsky)
Posted by ma...@apache.org.
PHOENIX-2066 Existing client fails initialization due to upgrade atttempting to create column with no name (Lukas Lalinsky)
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/b58a62a5
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/b58a62a5
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/b58a62a5
Branch: refs/heads/calcite
Commit: b58a62a5e43dcbb37695a0ebf7a20ced13e99503
Parents: 7385899
Author: James Taylor <jt...@salesforce.com>
Authored: Wed Jun 24 08:11:12 2015 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Wed Jun 24 08:11:12 2015 -0700
----------------------------------------------------------------------
.../phoenix/query/ConnectionQueryServicesImpl.java | 14 +++++++++-----
1 file changed, 9 insertions(+), 5 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/b58a62a5/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index c5dde10..ddebf9f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -1941,11 +1941,15 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
columnsToAdd += ", " + PhoenixDatabaseMetaData.INDEX_TYPE + " " + PUnsignedTinyint.INSTANCE.getSqlTypeName()
+ ", " + PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP + " " + PLong.INSTANCE.getSqlTypeName();
}
- // Ugh..need to assign to another local variable to keep eclipse happy.
- PhoenixConnection newMetaConnection = addColumnsIfNotExists(metaConnection,
- PhoenixDatabaseMetaData.SYSTEM_CATALOG,
- MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP, columnsToAdd);
- metaConnection = newMetaConnection;
+
+ // If we have some new columns from 4.1-4.3 to add, add them now.
+ if (!columnsToAdd.isEmpty()) {
+ // Ugh..need to assign to another local variable to keep eclipse happy.
+ PhoenixConnection newMetaConnection = addColumnsIfNotExists(metaConnection,
+ PhoenixDatabaseMetaData.SYSTEM_CATALOG,
+ MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP, columnsToAdd);
+ metaConnection = newMetaConnection;
+ }
if (currentServerSideTableTimeStamp < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_5_0) {
columnsToAdd = PhoenixDatabaseMetaData.BASE_COLUMN_COUNT + " "
[10/47] phoenix git commit: PHOENIX-1935
org.apache.phoenix.end2end.ArithmeticQueryIT tests are failing (Alicia Ying
Shu)
Posted by ma...@apache.org.
PHOENIX-1935 org.apache.phoenix.end2end.ArithmeticQueryIT tests are failing (Alicia Ying Shu)
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/80600488
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/80600488
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/80600488
Branch: refs/heads/calcite
Commit: 80600488f50fd000d74155ee17abfaa19ec39c69
Parents: db7b575
Author: Nick Dimiduk <nd...@apache.org>
Authored: Wed Jun 17 12:28:35 2015 -0700
Committer: Nick Dimiduk <nd...@apache.org>
Committed: Wed Jun 17 12:28:35 2015 -0700
----------------------------------------------------------------------
.../src/it/java/org/apache/phoenix/end2end/BaseViewIT.java | 2 ++
phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java | 3 +++
.../src/test/java/org/apache/phoenix/query/BaseTest.java | 5 ++++-
3 files changed, 9 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/80600488/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseViewIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseViewIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseViewIT.java
index b9d7180..3140077 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseViewIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseViewIT.java
@@ -98,6 +98,7 @@ public abstract class BaseViewIT extends BaseOwnClusterHBaseManagedTimeIT {
assertEquals(1, rs.getInt(1));
assertEquals(121, rs.getInt(2));
assertFalse(rs.next());
+ conn.close();
}
protected void testUpdatableViewIndex(Integer saltBuckets) throws Exception {
@@ -179,6 +180,7 @@ public abstract class BaseViewIT extends BaseOwnClusterHBaseManagedTimeIT {
+ "CLIENT MERGE SORT",
QueryUtil.getExplainPlan(rs));
}
+ conn.close();
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/80600488/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
index 266438d..fb58a8f 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
@@ -92,8 +92,11 @@ public class ViewIT extends BaseViewIT {
fail();
} catch (ReadOnlyTableException e) {
+ } finally {
+ conn.close();
}
+ conn = DriverManager.getConnection(getUrl());
int count = 0;
ResultSet rs = conn.createStatement().executeQuery("SELECT k FROM v2");
while (rs.next()) {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/80600488/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
index fa78656..3f09518 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
@@ -115,6 +115,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.IntegrationTestingUtility;
+import org.apache.hadoop.hbase.TableNotEnabledException;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.coprocessor.RegionServerObserver;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
@@ -1634,7 +1635,9 @@ public abstract class BaseTest {
for (HTableDescriptor table : tables) {
String schemaName = SchemaUtil.getSchemaNameFromFullName(table.getName());
if (!QueryConstants.SYSTEM_SCHEMA_NAME.equals(schemaName)) {
- admin.disableTable(table.getName());
+ try{
+ admin.disableTable(table.getName());
+ } catch (TableNotEnabledException ignored){}
admin.deleteTable(table.getName());
}
}
[43/47] phoenix git commit: PHOENIX-2085 Include joda-time in phoenix
server jar
Posted by ma...@apache.org.
PHOENIX-2085 Include joda-time in phoenix server jar
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/d2392bea
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/d2392bea
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/d2392bea
Branch: refs/heads/calcite
Commit: d2392beae3318099685856ffd18825028f21a7d1
Parents: 72a7356
Author: Thomas D'Silva <td...@salesforce.com>
Authored: Mon Jun 29 13:21:30 2015 -0700
Committer: Thomas D'Silva <td...@salesforce.com>
Committed: Tue Jun 30 22:10:33 2015 -0700
----------------------------------------------------------------------
phoenix-assembly/src/build/server.xml | 1 +
1 file changed, 1 insertion(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/d2392bea/phoenix-assembly/src/build/server.xml
----------------------------------------------------------------------
diff --git a/phoenix-assembly/src/build/server.xml b/phoenix-assembly/src/build/server.xml
index 12d3d81..78a4b1f 100644
--- a/phoenix-assembly/src/build/server.xml
+++ b/phoenix-assembly/src/build/server.xml
@@ -38,6 +38,7 @@
<include>org.iq80.snappy:snappy</include>
<include>org.jruby.joni:joni</include>
<include>org.jruby.jcodings:jcodings</include>
+ <include>joda-time:joda-time</include>
</includes>
</dependencySet>
<dependencySet>
[08/47] phoenix git commit: PHOENIX-2029 Queries are making two rpc
calls for getTable
Posted by ma...@apache.org.
PHOENIX-2029 Queries are making two rpc calls for getTable
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/03a6ac00
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/03a6ac00
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/03a6ac00
Branch: refs/heads/calcite
Commit: 03a6ac00286f9fbd0466b5739c4036ccb3ad6afb
Parents: d1f7ded
Author: Thomas D'Silva <tw...@gmail.com>
Authored: Mon Jun 8 15:30:40 2015 -0700
Committer: Thomas D'Silva <td...@salesforce.com>
Committed: Wed Jun 17 11:21:43 2015 -0700
----------------------------------------------------------------------
.../org/apache/phoenix/rpc/UpdateCacheIT.java | 139 +++++++++++++++++++
.../apache/phoenix/compile/QueryCompiler.java | 2 +-
.../coprocessor/MetaDataEndpointImpl.java | 6 +-
.../apache/phoenix/schema/MetaDataClient.java | 26 ++--
4 files changed, 156 insertions(+), 17 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/03a6ac00/phoenix-core/src/it/java/org/apache/phoenix/rpc/UpdateCacheIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/rpc/UpdateCacheIT.java b/phoenix-core/src/it/java/org/apache/phoenix/rpc/UpdateCacheIT.java
new file mode 100644
index 0000000..c657e41
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/rpc/UpdateCacheIT.java
@@ -0,0 +1,139 @@
+package org.apache.phoenix.rpc;
+
+import static org.apache.phoenix.util.TestUtil.INDEX_DATA_SCHEMA;
+import static org.apache.phoenix.util.TestUtil.MUTABLE_INDEX_DATA_TABLE;
+import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Matchers.anyLong;
+import static org.mockito.Matchers.eq;
+import static org.mockito.Matchers.isNull;
+import static org.mockito.Mockito.reset;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+
+import java.math.BigDecimal;
+import java.sql.Connection;
+import java.sql.Date;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.Map;
+import java.util.Properties;
+
+import org.apache.phoenix.end2end.BaseHBaseManagedTimeIT;
+import org.apache.phoenix.end2end.Shadower;
+import org.apache.phoenix.jdbc.PhoenixEmbeddedDriver;
+import org.apache.phoenix.query.ConnectionQueryServices;
+import org.apache.phoenix.query.QueryConstants;
+import org.apache.phoenix.schema.MetaDataClient;
+import org.apache.phoenix.schema.PName;
+import org.apache.phoenix.schema.types.PVarchar;
+import org.apache.phoenix.util.DateUtil;
+import org.apache.phoenix.util.PropertiesUtil;
+import org.apache.phoenix.util.ReadOnlyProps;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.mockito.Mockito;
+
+import com.google.common.collect.Maps;
+
+/**
+ * Verifies the number of rpcs calls from {@link MetaDataClient} updateCache()
+ * for transactional and non-transactional tables.
+ */
+public class UpdateCacheIT extends BaseHBaseManagedTimeIT {
+
+ public static final int NUM_MILLIS_IN_DAY = 86400000;
+
+ @Before
+ public void setUp() throws SQLException {
+ ensureTableCreated(getUrl(), MUTABLE_INDEX_DATA_TABLE);
+ }
+
+ @BeforeClass
+ @Shadower(classBeingShadowed = BaseHBaseManagedTimeIT.class)
+ public static void doSetup() throws Exception {
+ Map<String,String> props = Maps.newHashMapWithExpectedSize(3);
+ setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
+ }
+
+ public static void validateRowKeyColumns(ResultSet rs, int i) throws SQLException {
+ assertTrue(rs.next());
+ assertEquals(rs.getString(1), "varchar" + String.valueOf(i));
+ assertEquals(rs.getString(2), "char" + String.valueOf(i));
+ assertEquals(rs.getInt(3), i);
+ assertEquals(rs.getInt(4), i);
+ assertEquals(rs.getBigDecimal(5), new BigDecimal(i*0.5d));
+ Date date = new Date(DateUtil.parseDate("2015-01-01 00:00:00").getTime() + (i - 1) * NUM_MILLIS_IN_DAY);
+ assertEquals(rs.getDate(6), date);
+ }
+
+ public static void setRowKeyColumns(PreparedStatement stmt, int i) throws SQLException {
+ // insert row
+ stmt.setString(1, "varchar" + String.valueOf(i));
+ stmt.setString(2, "char" + String.valueOf(i));
+ stmt.setInt(3, i);
+ stmt.setLong(4, i);
+ stmt.setBigDecimal(5, new BigDecimal(i*0.5d));
+ Date date = new Date(DateUtil.parseDate("2015-01-01 00:00:00").getTime() + (i - 1) * NUM_MILLIS_IN_DAY);
+ stmt.setDate(6, date);
+ }
+
+ @Test
+ public void testUpdateCache() throws Exception {
+ String fullTableName = INDEX_DATA_SCHEMA + QueryConstants.NAME_SEPARATOR + MUTABLE_INDEX_DATA_TABLE;
+ String selectSql = "SELECT * FROM "+fullTableName;
+ // use a spyed ConnectionQueryServices so we can verify calls to getTable
+ ConnectionQueryServices connectionQueryServices = Mockito.spy(driver.getConnectionQueryServices(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)));
+ Properties props = new Properties();
+ props.putAll(PhoenixEmbeddedDriver.DEFFAULT_PROPS.asMap());
+ Connection conn = connectionQueryServices.connect(getUrl(), props);
+ try {
+ conn.setAutoCommit(false);
+ ResultSet rs = conn.createStatement().executeQuery(selectSql);
+ assertFalse(rs.next());
+ reset(connectionQueryServices);
+
+ String upsert = "UPSERT INTO " + fullTableName + "(varchar_pk, char_pk, int_pk, long_pk, decimal_pk, date_pk) VALUES(?, ?, ?, ?, ?, ?)";
+ PreparedStatement stmt = conn.prepareStatement(upsert);
+ // upsert three rows
+ setRowKeyColumns(stmt, 1);
+ stmt.execute();
+ setRowKeyColumns(stmt, 2);
+ stmt.execute();
+ setRowKeyColumns(stmt, 3);
+ stmt.execute();
+ conn.commit();
+ // verify only one rpc to getTable occurs after commit is called
+ verify(connectionQueryServices, times(1)).getTable((PName)isNull(), eq(PVarchar.INSTANCE.toBytes(INDEX_DATA_SCHEMA)), eq(PVarchar.INSTANCE.toBytes(MUTABLE_INDEX_DATA_TABLE)), anyLong(), anyLong());
+ reset(connectionQueryServices);
+
+ rs = conn.createStatement().executeQuery(selectSql);
+ validateRowKeyColumns(rs, 1);
+ validateRowKeyColumns(rs, 2);
+ validateRowKeyColumns(rs, 3);
+ assertFalse(rs.next());
+
+ rs = conn.createStatement().executeQuery(selectSql);
+ validateRowKeyColumns(rs, 1);
+ validateRowKeyColumns(rs, 2);
+ validateRowKeyColumns(rs, 3);
+ assertFalse(rs.next());
+
+ rs = conn.createStatement().executeQuery(selectSql);
+ validateRowKeyColumns(rs, 1);
+ validateRowKeyColumns(rs, 2);
+ validateRowKeyColumns(rs, 3);
+ assertFalse(rs.next());
+ conn.commit();
+ // there should be one rpc to getTable per query
+ verify(connectionQueryServices, times(3)).getTable((PName)isNull(), eq(PVarchar.INSTANCE.toBytes(INDEX_DATA_SCHEMA)), eq(PVarchar.INSTANCE.toBytes(MUTABLE_INDEX_DATA_TABLE)), anyLong(), anyLong());
+ }
+ finally {
+ conn.close();
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/03a6ac00/phoenix-core/src/main/java/org/apache/phoenix/compile/QueryCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/QueryCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/QueryCompiler.java
index e877e03..94ff075 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/QueryCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/QueryCompiler.java
@@ -529,7 +529,7 @@ public class QueryCompiler {
// Don't pass groupBy when building where clause expression, because we do not want to wrap these
// expressions as group by key expressions since they're pre, not post filtered.
if (innerPlan == null && !tableRef.equals(resolver.getTables().get(0))) {
- context.setResolver(FromCompiler.getResolverForQuery(select, this.statement.getConnection()));
+ context.setResolver(FromCompiler.getResolver(context.getConnection(), tableRef, select.getUdfParseNodes()));
}
Set<SubqueryParseNode> subqueries = Sets.<SubqueryParseNode> newHashSet();
Expression where = WhereCompiler.compile(context, select, viewWhere, subqueries);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/03a6ac00/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index 39a4956..1d578f5 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -1794,10 +1794,8 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
if (columnToDelete.isViewReferenced()) { // Disallow deletion of column referenced in WHERE clause of view
return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION, EnvironmentEdgeManager.currentTimeMillis(), table, columnToDelete);
}
- // Look for columnToDelete in any indexes. If found as PK
- // column, get lock and drop the index. If found as covered
- // column, delete from index (do this client side?).
- // In either case, invalidate index if the column is in it
+ // Look for columnToDelete in any indexes. If found as PK column, get lock and drop the index and then invalidate it
+ // Covered columns are deleted from the index by the client
PhoenixConnection connection = table.getIndexes().isEmpty() ? null : QueryUtil.getConnection(env.getConfiguration()).unwrap(PhoenixConnection.class);
for (PTable index : table.getIndexes()) {
try {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/03a6ac00/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index fcdb651..75678fd 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -436,9 +436,9 @@ public class MetaDataClient {
// timestamp, we can handle this such that we don't ask the
// server again.
if (table != null) {
- // Ensures that table in result is set to table found in our cache.
- result.setTable(table);
if (code == MutationCode.TABLE_ALREADY_EXISTS) {
+ // Ensures that table in result is set to table found in our cache.
+ result.setTable(table);
// Although this table is up-to-date, the parent table may not be.
// In this case, we update the parent table which may in turn pull
// in indexes to add to this table.
@@ -2692,18 +2692,20 @@ public class MetaDataClient {
dropColumnMutations(table, tableColumnsToDrop, tableMetaData);
for (PTable index : table.getIndexes()) {
+ IndexMaintainer indexMaintainer = index.getIndexMaintainer(table, connection);
+ // get the columns required for the index pk
+ Set<ColumnReference> indexColumns = indexMaintainer.getIndexedColumns();
+ // get the covered columns
+ Set<ColumnReference> coveredColumns = indexMaintainer.getCoverededColumns();
List<PColumn> indexColumnsToDrop = Lists.newArrayListWithExpectedSize(columnRefs.size());
for(PColumn columnToDrop : tableColumnsToDrop) {
- String indexColumnName = IndexUtil.getIndexColumnName(columnToDrop);
- try {
- PColumn indexColumn = index.getColumn(indexColumnName);
- if (SchemaUtil.isPKColumn(indexColumn)) {
- indexesToDrop.add(new TableRef(index));
- } else {
- indexColumnsToDrop.add(indexColumn);
- columnsToDrop.add(new ColumnRef(tableRef, columnToDrop.getPosition()));
- }
- } catch (ColumnNotFoundException e) {
+ ColumnReference columnToDropRef = new ColumnReference(columnToDrop.getFamilyName().getBytes(), columnToDrop.getName().getBytes());
+ if (indexColumns.contains(columnToDropRef)) {
+ indexesToDrop.add(new TableRef(index));
+ }
+ else if (coveredColumns.contains(columnToDropRef)) {
+ String indexColumnName = IndexUtil.getIndexColumnName(columnToDrop);
+ indexColumnsToDrop.add(index.getColumn(indexColumnName));
}
}
if(!indexColumnsToDrop.isEmpty()) {
[32/47] phoenix git commit: PHOENIX-1819 Build a framework to capture
and report phoenix client side request level metrics
Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/phoenix/blob/0f6595c0/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java
index 857a952..57fa25a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java
@@ -17,6 +17,7 @@
*/
package org.apache.phoenix.execute;
+import static org.apache.phoenix.monitoring.TaskExecutionMetricsHolder.NO_OP_INSTANCE;
import static org.apache.phoenix.util.LogUtil.addCustomAnnotations;
import java.sql.SQLException;
@@ -54,6 +55,7 @@ import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.job.JobManager.JobCallable;
import org.apache.phoenix.join.HashCacheClient;
import org.apache.phoenix.join.HashJoinInfo;
+import org.apache.phoenix.monitoring.TaskExecutionMetricsHolder;
import org.apache.phoenix.parse.FilterableStatement;
import org.apache.phoenix.parse.ParseNode;
import org.apache.phoenix.parse.SQLParser;
@@ -140,6 +142,11 @@ public class HashJoinPlan extends DelegateQueryPlan {
public Object getJobId() {
return HashJoinPlan.this;
}
+
+ @Override
+ public TaskExecutionMetricsHolder getTaskExecutionMetric() {
+ return NO_OP_INSTANCE;
+ }
}));
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/0f6595c0/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
index 99f41b2..af3bcf3 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
@@ -17,6 +17,10 @@
*/
package org.apache.phoenix.execute;
+import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_MUTATION_BATCH_SIZE;
+import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_MUTATION_BYTES;
+import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_MUTATION_COMMIT_TIME;
+
import java.io.IOException;
import java.sql.SQLException;
import java.util.Arrays;
@@ -39,7 +43,11 @@ import org.apache.phoenix.index.IndexMaintainer;
import org.apache.phoenix.index.IndexMetaDataCacheClient;
import org.apache.phoenix.index.PhoenixIndexCodec;
import org.apache.phoenix.jdbc.PhoenixConnection;
-import org.apache.phoenix.monitoring.PhoenixMetrics;
+import org.apache.phoenix.monitoring.GlobalClientMetrics;
+import org.apache.phoenix.monitoring.MutationMetricQueue;
+import org.apache.phoenix.monitoring.MutationMetricQueue.MutationMetric;
+import org.apache.phoenix.monitoring.MutationMetricQueue.NoOpMutationMetricsQueue;
+import org.apache.phoenix.monitoring.ReadMetricQueue;
import org.apache.phoenix.query.QueryConstants;
import org.apache.phoenix.schema.IllegalDataException;
import org.apache.phoenix.schema.MetaDataClient;
@@ -65,9 +73,6 @@ import com.google.common.collect.Iterators;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.sun.istack.NotNull;
-import static org.apache.phoenix.monitoring.PhoenixMetrics.SizeMetric.MUTATION_BYTES;
-import static org.apache.phoenix.monitoring.PhoenixMetrics.SizeMetric.MUTATION_BATCH_SIZE;
-import static org.apache.phoenix.monitoring.PhoenixMetrics.SizeMetric.MUTATION_COMMIT_TIME;
/**
*
@@ -85,11 +90,17 @@ public class MutationState implements SQLCloseable {
private final Map<TableRef, Map<ImmutableBytesPtr,RowMutationState>> mutations;
private long sizeOffset;
private int numRows = 0;
+ private final MutationMetricQueue mutationMetricQueue;
+ private ReadMetricQueue readMetricQueue;
- MutationState(long maxSize, PhoenixConnection connection, Map<TableRef, Map<ImmutableBytesPtr,RowMutationState>> mutations) {
+ MutationState(long maxSize, PhoenixConnection connection,
+ Map<TableRef, Map<ImmutableBytesPtr, RowMutationState>> mutations) {
this.maxSize = maxSize;
this.connection = connection;
this.mutations = mutations;
+ boolean isMetricsEnabled = connection.isRequestLevelMetricsEnabled();
+ this.mutationMetricQueue = isMetricsEnabled ? new MutationMetricQueue()
+ : NoOpMutationMetricsQueue.NO_OP_MUTATION_METRICS_QUEUE;
}
public MutationState(long maxSize, PhoenixConnection connection) {
@@ -108,6 +119,12 @@ public class MutationState implements SQLCloseable {
throwIfTooBig();
}
+ public static MutationState emptyMutationState(long maxSize, PhoenixConnection connection) {
+ MutationState state = new MutationState(maxSize, connection, Collections.<TableRef, Map<ImmutableBytesPtr,RowMutationState>>emptyMap());
+ state.sizeOffset = 0;
+ return state;
+ }
+
private void throwIfTooBig() {
if (numRows > maxSize) {
// TODO: throw SQLException ?
@@ -120,17 +137,18 @@ public class MutationState implements SQLCloseable {
}
/**
- * Combine a newer mutation with this one, where in the event of overlaps,
- * the newer one will take precedence.
- * @param newMutation the newer mutation
+ * Combine a newer mutation with this one, where in the event of overlaps, the newer one will take precedence.
+ * Combine any metrics collected for the newer mutation.
+ *
+ * @param newMutationState the newer mutation state
*/
- public void join(MutationState newMutation) {
- if (this == newMutation) { // Doesn't make sense
+ public void join(MutationState newMutationState) {
+ if (this == newMutationState) { // Doesn't make sense
return;
}
- this.sizeOffset += newMutation.sizeOffset;
+ this.sizeOffset += newMutationState.sizeOffset;
// Merge newMutation with this one, keeping state from newMutation for any overlaps
- for (Map.Entry<TableRef, Map<ImmutableBytesPtr,RowMutationState>> entry : newMutation.mutations.entrySet()) {
+ for (Map.Entry<TableRef, Map<ImmutableBytesPtr,RowMutationState>> entry : newMutationState.mutations.entrySet()) {
// Replace existing entries for the table with new entries
TableRef tableRef = entry.getKey();
PTable table = tableRef.getTable();
@@ -168,6 +186,12 @@ public class MutationState implements SQLCloseable {
}
}
}
+ mutationMetricQueue.combineMetricQueues(newMutationState.mutationMetricQueue);
+ if (readMetricQueue == null) {
+ readMetricQueue = newMutationState.readMetricQueue;
+ } else if (readMetricQueue != null && newMutationState.readMetricQueue != null) {
+ readMetricQueue.combineReadMetrics(newMutationState.readMetricQueue);
+ }
throwIfTooBig();
}
@@ -332,18 +356,15 @@ public class MutationState implements SQLCloseable {
return timeStamps;
}
- private static void logMutationSize(HTableInterface htable, List<Mutation> mutations, PhoenixConnection connection) {
+ private static long calculateMutationSize(List<Mutation> mutations) {
long byteSize = 0;
- int keyValueCount = 0;
- if (PhoenixMetrics.isMetricsEnabled() || logger.isDebugEnabled()) {
+ if (GlobalClientMetrics.isMetricsEnabled()) {
for (Mutation mutation : mutations) {
byteSize += mutation.heapSize();
}
- MUTATION_BYTES.update(byteSize);
- if (logger.isDebugEnabled()) {
- logger.debug(LogUtil.addCustomAnnotations("Sending " + mutations.size() + " mutations for " + Bytes.toString(htable.getTableName()) + " with " + keyValueCount + " key values of total size " + byteSize + " bytes", connection));
- }
}
+ GLOBAL_MUTATION_BYTES.update(byteSize);
+ return byteSize;
}
@SuppressWarnings("deprecation")
@@ -352,126 +373,134 @@ public class MutationState implements SQLCloseable {
byte[] tenantId = connection.getTenantId() == null ? null : connection.getTenantId().getBytes();
long[] serverTimeStamps = validate();
Iterator<Map.Entry<TableRef, Map<ImmutableBytesPtr,RowMutationState>>> iterator = this.mutations.entrySet().iterator();
-
// add tracing for this operation
- TraceScope trace = Tracing.startNewSpan(connection, "Committing mutations to tables");
- Span span = trace.getSpan();
- while (iterator.hasNext()) {
- Map.Entry<TableRef, Map<ImmutableBytesPtr,RowMutationState>> entry = iterator.next();
- Map<ImmutableBytesPtr,RowMutationState> valuesMap = entry.getValue();
- TableRef tableRef = entry.getKey();
- PTable table = tableRef.getTable();
- table.getIndexMaintainers(tempPtr, connection);
- boolean hasIndexMaintainers = tempPtr.getLength() > 0;
- boolean isDataTable = true;
- long serverTimestamp = serverTimeStamps[i++];
- Iterator<Pair<byte[],List<Mutation>>> mutationsIterator = addRowMutations(tableRef, valuesMap, serverTimestamp, false);
- while (mutationsIterator.hasNext()) {
- Pair<byte[],List<Mutation>> pair = mutationsIterator.next();
- byte[] htableName = pair.getFirst();
- List<Mutation> mutations = pair.getSecond();
-
- //create a span per target table
- //TODO maybe we can be smarter about the table name to string here?
- Span child = Tracing.child(span,"Writing mutation batch for table: "+Bytes.toString(htableName));
+ try (TraceScope trace = Tracing.startNewSpan(connection, "Committing mutations to tables")) {
+ Span span = trace.getSpan();
+ while (iterator.hasNext()) {
+ Map.Entry<TableRef, Map<ImmutableBytesPtr,RowMutationState>> entry = iterator.next();
+ // at this point we are going through mutations for each table
- int retryCount = 0;
- boolean shouldRetry = false;
- do {
- ServerCache cache = null;
- if (hasIndexMaintainers && isDataTable) {
- byte[] attribValue = null;
- byte[] uuidValue;
- if (IndexMetaDataCacheClient.useIndexMetadataCache(connection, mutations, tempPtr.getLength())) {
- IndexMetaDataCacheClient client = new IndexMetaDataCacheClient(connection, tableRef);
- cache = client.addIndexMetadataCache(mutations, tempPtr);
- child.addTimelineAnnotation("Updated index metadata cache");
- uuidValue = cache.getId();
- // If we haven't retried yet, retry for this case only, as it's possible that
- // a split will occur after we send the index metadata cache to all known
- // region servers.
- shouldRetry = true;
- } else {
- attribValue = ByteUtil.copyKeyBytesIfNecessary(tempPtr);
- uuidValue = ServerCacheClient.generateId();
- }
- // Either set the UUID to be able to access the index metadata from the cache
- // or set the index metadata directly on the Mutation
- for (Mutation mutation : mutations) {
- if (tenantId != null) {
- mutation.setAttribute(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId);
- }
- mutation.setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue);
- if (attribValue != null) {
- mutation.setAttribute(PhoenixIndexCodec.INDEX_MD, attribValue);
- }
- }
- }
-
- SQLException sqlE = null;
- HTableInterface hTable = connection.getQueryServices().getTable(htableName);
- try {
- logMutationSize(hTable, mutations, connection);
- MUTATION_BATCH_SIZE.update(mutations.size());
- long startTime = System.currentTimeMillis();
- child.addTimelineAnnotation("Attempt " + retryCount);
- hTable.batch(mutations);
- child.stop();
- long duration = System.currentTimeMillis() - startTime;
- MUTATION_COMMIT_TIME.update(duration);
- shouldRetry = false;
- if (logger.isDebugEnabled()) logger.debug(LogUtil.addCustomAnnotations("Total time for batch call of " + mutations.size() + " mutations into " + table.getName().getString() + ": " + duration + " ms", connection));
- } catch (Exception e) {
- SQLException inferredE = ServerUtil.parseServerExceptionOrNull(e);
- if (inferredE != null) {
- if (shouldRetry && retryCount == 0 && inferredE.getErrorCode() == SQLExceptionCode.INDEX_METADATA_NOT_FOUND.getErrorCode()) {
- // Swallow this exception once, as it's possible that we split after sending the index metadata
- // and one of the region servers doesn't have it. This will cause it to have it the next go around.
- // If it fails again, we don't retry.
- String msg = "Swallowing exception and retrying after clearing meta cache on connection. " + inferredE;
- logger.warn(LogUtil.addCustomAnnotations(msg, connection));
- connection.getQueryServices().clearTableRegionCache(htableName);
+ Map<ImmutableBytesPtr,RowMutationState> valuesMap = entry.getValue();
+ // above is mutations for a table where the first part is the row key and the second part is column values.
- // add a new child span as this one failed
- child.addTimelineAnnotation(msg);
- child.stop();
- child = Tracing.child(span,"Failed batch, attempting retry");
+ TableRef tableRef = entry.getKey();
+ PTable table = tableRef.getTable();
+ table.getIndexMaintainers(tempPtr, connection);
+ boolean hasIndexMaintainers = tempPtr.getLength() > 0;
+ boolean isDataTable = true;
+ long serverTimestamp = serverTimeStamps[i++];
+ Iterator<Pair<byte[],List<Mutation>>> mutationsIterator = addRowMutations(tableRef, valuesMap, serverTimestamp, false);
+ // above returns an iterator of pair where the first
+ while (mutationsIterator.hasNext()) {
+ Pair<byte[],List<Mutation>> pair = mutationsIterator.next();
+ byte[] htableName = pair.getFirst();
+ List<Mutation> mutations = pair.getSecond();
- continue;
+ //create a span per target table
+ //TODO maybe we can be smarter about the table name to string here?
+ Span child = Tracing.child(span,"Writing mutation batch for table: "+Bytes.toString(htableName));
+
+ int retryCount = 0;
+ boolean shouldRetry = false;
+ do {
+ ServerCache cache = null;
+ if (hasIndexMaintainers && isDataTable) {
+ byte[] attribValue = null;
+ byte[] uuidValue;
+ if (IndexMetaDataCacheClient.useIndexMetadataCache(connection, mutations, tempPtr.getLength())) {
+ IndexMetaDataCacheClient client = new IndexMetaDataCacheClient(connection, tableRef);
+ cache = client.addIndexMetadataCache(mutations, tempPtr);
+ child.addTimelineAnnotation("Updated index metadata cache");
+ uuidValue = cache.getId();
+ // If we haven't retried yet, retry for this case only, as it's possible that
+ // a split will occur after we send the index metadata cache to all known
+ // region servers.
+ shouldRetry = true;
+ } else {
+ attribValue = ByteUtil.copyKeyBytesIfNecessary(tempPtr);
+ uuidValue = ServerCacheClient.generateId();
+ }
+ // Either set the UUID to be able to access the index metadata from the cache
+ // or set the index metadata directly on the Mutation
+ for (Mutation mutation : mutations) {
+ if (tenantId != null) {
+ mutation.setAttribute(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId);
+ }
+ mutation.setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue);
+ if (attribValue != null) {
+ mutation.setAttribute(PhoenixIndexCodec.INDEX_MD, attribValue);
+ }
}
- e = inferredE;
}
- sqlE = new CommitException(e, getUncommittedSattementIndexes());
- } finally {
+
+ SQLException sqlE = null;
+ HTableInterface hTable = connection.getQueryServices().getTable(htableName);
try {
- hTable.close();
- } catch (IOException e) {
- if (sqlE != null) {
- sqlE.setNextException(ServerUtil.parseServerException(e));
- } else {
- sqlE = ServerUtil.parseServerException(e);
+ long numMutations = mutations.size();
+ GLOBAL_MUTATION_BATCH_SIZE.update(numMutations);
+
+ long startTime = System.currentTimeMillis();
+ child.addTimelineAnnotation("Attempt " + retryCount);
+ hTable.batch(mutations);
+ child.stop();
+ shouldRetry = false;
+ long mutationCommitTime = System.currentTimeMillis() - startTime;
+ GLOBAL_MUTATION_COMMIT_TIME.update(mutationCommitTime);
+
+ long mutationSizeBytes = calculateMutationSize(mutations);
+ MutationMetric mutationsMetric = new MutationMetric(numMutations, mutationSizeBytes, mutationCommitTime);
+ mutationMetricQueue.addMetricsForTable(Bytes.toString(htableName), mutationsMetric);
+ } catch (Exception e) {
+ SQLException inferredE = ServerUtil.parseServerExceptionOrNull(e);
+ if (inferredE != null) {
+ if (shouldRetry && retryCount == 0 && inferredE.getErrorCode() == SQLExceptionCode.INDEX_METADATA_NOT_FOUND.getErrorCode()) {
+ // Swallow this exception once, as it's possible that we split after sending the index metadata
+ // and one of the region servers doesn't have it. This will cause it to have it the next go around.
+ // If it fails again, we don't retry.
+ String msg = "Swallowing exception and retrying after clearing meta cache on connection. " + inferredE;
+ logger.warn(LogUtil.addCustomAnnotations(msg, connection));
+ connection.getQueryServices().clearTableRegionCache(htableName);
+
+ // add a new child span as this one failed
+ child.addTimelineAnnotation(msg);
+ child.stop();
+ child = Tracing.child(span,"Failed batch, attempting retry");
+
+ continue;
+ }
+ e = inferredE;
}
+ sqlE = new CommitException(e, getUncommittedStatementIndexes());
} finally {
try {
- if (cache != null) {
- cache.close();
+ hTable.close();
+ } catch (IOException e) {
+ if (sqlE != null) {
+ sqlE.setNextException(ServerUtil.parseServerException(e));
+ } else {
+ sqlE = ServerUtil.parseServerException(e);
}
} finally {
- if (sqlE != null) {
- throw sqlE;
+ try {
+ if (cache != null) {
+ cache.close();
+ }
+ } finally {
+ if (sqlE != null) {
+ throw sqlE;
+ }
}
}
}
- }
- } while (shouldRetry && retryCount++ < 1);
- isDataTable = false;
- }
- if (tableRef.getTable().getType() != PTableType.INDEX) {
- numRows -= entry.getValue().size();
+ } while (shouldRetry && retryCount++ < 1);
+ isDataTable = false;
+ }
+ if (tableRef.getTable().getType() != PTableType.INDEX) {
+ numRows -= entry.getValue().size();
+ }
+ iterator.remove(); // Remove batches as we process them
}
- iterator.remove(); // Remove batches as we process them
}
- trace.close();
assert(numRows==0);
assert(this.mutations.isEmpty());
}
@@ -481,7 +510,7 @@ public class MutationState implements SQLCloseable {
numRows = 0;
}
- private int[] getUncommittedSattementIndexes() {
+ private int[] getUncommittedStatementIndexes() {
int[] result = new int[0];
for (Map<ImmutableBytesPtr, RowMutationState> rowMutations : mutations.values()) {
for (RowMutationState rowMutationState : rowMutations.values()) {
@@ -533,12 +562,23 @@ public class MutationState implements SQLCloseable {
int[] getStatementIndexes() {
return statementIndexes;
}
-
+
void join(RowMutationState newRow) {
getColumnValues().putAll(newRow.getColumnValues());
statementIndexes = joinSortedIntArrays(statementIndexes, newRow.getStatementIndexes());
}
-
+ }
+
+ public ReadMetricQueue getReadMetricQueue() {
+ return readMetricQueue;
+ }
+ public void setReadMetricQueue(ReadMetricQueue readMetricQueue) {
+ this.readMetricQueue = readMetricQueue;
}
+
+ public MutationMetricQueue getMutationMetricQueue() {
+ return mutationMetricQueue;
+ }
+
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/0f6595c0/phoenix-core/src/main/java/org/apache/phoenix/execute/UnionPlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/UnionPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/UnionPlan.java
index 031b58b..2bed3a0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/UnionPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/UnionPlan.java
@@ -49,7 +49,7 @@ public class UnionPlan implements QueryPlan {
private final FilterableStatement statement;
private final ParameterMetaData paramMetaData;
private final OrderBy orderBy;
- private final StatementContext context;
+ private final StatementContext parentContext;
private final Integer limit;
private final GroupBy groupBy;
private final RowProjector projector;
@@ -59,7 +59,7 @@ public class UnionPlan implements QueryPlan {
public UnionPlan(StatementContext context, FilterableStatement statement, TableRef table, RowProjector projector,
Integer limit, OrderBy orderBy, GroupBy groupBy, List<QueryPlan> plans, ParameterMetaData paramMetaData) throws SQLException {
- this.context = context;
+ this.parentContext = context;
this.statement = statement;
this.tableRef = table;
this.projector = projector;
@@ -128,7 +128,7 @@ public class UnionPlan implements QueryPlan {
}
public final ResultIterator iterator(final List<? extends SQLCloseable> dependencies) throws SQLException {
- this.iterators = new UnionResultIterators(plans);
+ this.iterators = new UnionResultIterators(plans, parentContext);
ResultIterator scanner;
boolean isOrdered = !orderBy.getOrderByExpressions().isEmpty();
@@ -175,7 +175,7 @@ public class UnionPlan implements QueryPlan {
@Override
public StatementContext getContext() {
- return context;
+ return parentContext;
}
@Override
http://git-wip-us.apache.org/repos/asf/phoenix/blob/0f6595c0/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
index 6a3847b..43731cb 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
@@ -18,8 +18,8 @@
package org.apache.phoenix.iterate;
import static org.apache.phoenix.coprocessor.BaseScannerRegionObserver.EXPECTED_UPPER_REGION_KEY;
-import static org.apache.phoenix.monitoring.PhoenixMetrics.CountMetric.FAILED_QUERY;
-import static org.apache.phoenix.monitoring.PhoenixMetrics.CountMetric.QUERY_TIMEOUT;
+import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_FAILED_QUERY_COUNTER;
+import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_QUERY_TIMEOUT_COUNTER;
import static org.apache.phoenix.util.ByteUtil.EMPTY_BYTE_ARRAY;
import java.sql.SQLException;
@@ -540,12 +540,13 @@ public abstract class BaseResultIterators extends ExplainTable implements Result
} catch (ExecutionException e) {
try { // Rethrow as SQLException
throw ServerUtil.parseServerException(e);
- } catch (StaleRegionBoundaryCacheException e2) {
+ } catch (StaleRegionBoundaryCacheException e2) {
// Catch only to try to recover from region boundary cache being out of date
List<List<Pair<Scan,Future<PeekingResultIterator>>>> newFutures = Lists.newArrayListWithExpectedSize(2);
if (!clearedCache) { // Clear cache once so that we rejigger job based on new boundaries
services.clearTableRegionCache(physicalTableName);
clearedCache = true;
+ context.getOverallQueryMetrics().cacheRefreshedDueToSplits();
}
// Resubmit just this portion of work again
Scan oldScan = scanPair.getFirst();
@@ -582,7 +583,8 @@ public abstract class BaseResultIterators extends ExplainTable implements Result
success = true;
return iterators;
} catch (TimeoutException e) {
- QUERY_TIMEOUT.increment();
+ context.getOverallQueryMetrics().queryTimedOut();
+ GLOBAL_QUERY_TIMEOUT_COUNTER.increment();
// thrown when a thread times out waiting for the future.get() call to return
toThrow = new SQLExceptionInfo.Builder(SQLExceptionCode.OPERATION_TIMED_OUT)
.setMessage(". Query couldn't be completed in the alloted time: " + queryTimeOut + " ms")
@@ -616,7 +618,8 @@ public abstract class BaseResultIterators extends ExplainTable implements Result
}
} finally {
if (toThrow != null) {
- FAILED_QUERY.increment();
+ GLOBAL_FAILED_QUERY_COUNTER.increment();
+ context.getOverallQueryMetrics().queryFailed();
throw toThrow;
}
}
@@ -639,7 +642,7 @@ public abstract class BaseResultIterators extends ExplainTable implements Result
if (futurePair != null) {
Future<PeekingResultIterator> future = futurePair.getSecond();
if (future != null) {
- cancelledWork |= future.cancel(false);
+ future.cancel(false);
}
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/0f6595c0/phoenix-core/src/main/java/org/apache/phoenix/iterate/ChunkedResultIterator.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ChunkedResultIterator.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ChunkedResultIterator.java
index e1ee8db..f272e55 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ChunkedResultIterator.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ChunkedResultIterator.java
@@ -19,6 +19,7 @@
package org.apache.phoenix.iterate;
import static org.apache.phoenix.coprocessor.BaseScannerRegionObserver.STARTKEY_OFFSET;
+import static org.apache.phoenix.monitoring.MetricType.SCAN_BYTES;
import java.sql.SQLException;
import java.util.List;
@@ -66,18 +67,17 @@ public class ChunkedResultIterator implements PeekingResultIterator {
}
@Override
- public PeekingResultIterator newIterator(StatementContext context, ResultIterator scanner, Scan scan) throws SQLException {
- scanner.close(); //close the iterator since we don't need it anymore.
+ public PeekingResultIterator newIterator(StatementContext context, ResultIterator scanner, Scan scan, String tableName) throws SQLException {
if (logger.isDebugEnabled()) logger.debug(LogUtil.addCustomAnnotations("ChunkedResultIteratorFactory.newIterator over " + tableRef.getTable().getName().getString() + " with " + scan, ScanUtil.getCustomAnnotations(scan)));
return new ChunkedResultIterator(delegateFactory, context, tableRef, scan,
context.getConnection().getQueryServices().getProps().getLong(
QueryServices.SCAN_RESULT_CHUNK_SIZE,
- QueryServicesOptions.DEFAULT_SCAN_RESULT_CHUNK_SIZE));
+ QueryServicesOptions.DEFAULT_SCAN_RESULT_CHUNK_SIZE), scanner);
}
}
- public ChunkedResultIterator(ParallelIteratorFactory delegateIteratorFactory,
- StatementContext context, TableRef tableRef, Scan scan, long chunkSize) throws SQLException {
+ private ChunkedResultIterator(ParallelIteratorFactory delegateIteratorFactory,
+ StatementContext context, TableRef tableRef, Scan scan, long chunkSize, ResultIterator scanner) throws SQLException {
this.delegateIteratorFactory = delegateIteratorFactory;
this.context = context;
this.tableRef = tableRef;
@@ -87,9 +87,9 @@ public class ChunkedResultIterator implements PeekingResultIterator {
// to get parallel scans kicked off in separate threads. If we delay this,
// we'll get serialized behavior (see PHOENIX-
if (logger.isDebugEnabled()) logger.debug(LogUtil.addCustomAnnotations("Get first chunked result iterator over " + tableRef.getTable().getName().getString() + " with " + scan, ScanUtil.getCustomAnnotations(scan)));
- ResultIterator singleChunkResultIterator = new SingleChunkResultIterator(
- new TableResultIterator(context, tableRef, scan), chunkSize);
- resultIterator = delegateIteratorFactory.newIterator(context, singleChunkResultIterator, scan);
+ ResultIterator singleChunkResultIterator = new SingleChunkResultIterator(scanner, chunkSize);
+ String tableName = tableRef.getTable().getPhysicalName().getString();
+ resultIterator = delegateIteratorFactory.newIterator(context, singleChunkResultIterator, scan, tableName);
}
@Override
@@ -118,9 +118,10 @@ public class ChunkedResultIterator implements PeekingResultIterator {
scan = ScanUtil.newScan(scan);
scan.setStartRow(ByteUtil.copyKeyBytesIfNecessary(lastKey));
if (logger.isDebugEnabled()) logger.debug(LogUtil.addCustomAnnotations("Get next chunked result iterator over " + tableRef.getTable().getName().getString() + " with " + scan, ScanUtil.getCustomAnnotations(scan)));
+ String tableName = tableRef.getTable().getPhysicalName().getString();
ResultIterator singleChunkResultIterator = new SingleChunkResultIterator(
- new TableResultIterator(context, tableRef, scan), chunkSize);
- resultIterator = delegateIteratorFactory.newIterator(context, singleChunkResultIterator, scan);
+ new TableResultIterator(context, tableRef, scan, context.getReadMetricsQueue().allotMetric(SCAN_BYTES, tableName)), chunkSize);
+ resultIterator = delegateIteratorFactory.newIterator(context, singleChunkResultIterator, scan, tableName);
}
return resultIterator;
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/0f6595c0/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIteratorFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIteratorFactory.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIteratorFactory.java
index df8f658..f25e373 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIteratorFactory.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIteratorFactory.java
@@ -25,10 +25,10 @@ import org.apache.phoenix.compile.StatementContext;
public interface ParallelIteratorFactory {
public static ParallelIteratorFactory NOOP_FACTORY = new ParallelIteratorFactory() {
@Override
- public PeekingResultIterator newIterator(StatementContext context, ResultIterator scanner, Scan scan)
+ public PeekingResultIterator newIterator(StatementContext context, ResultIterator scanner, Scan scan, String physicalTableName)
throws SQLException {
return LookAheadResultIterator.wrap(scanner);
}
};
- PeekingResultIterator newIterator(StatementContext context, ResultIterator scanner, Scan scan) throws SQLException;
+ PeekingResultIterator newIterator(StatementContext context, ResultIterator scanner, Scan scan, String physicalTableName) throws SQLException;
}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/phoenix/blob/0f6595c0/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java
index be10c20..2dfbfe3 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java
@@ -17,7 +17,7 @@
*/
package org.apache.phoenix.iterate;
-import static org.apache.phoenix.monitoring.PhoenixMetrics.SizeMetric.PARALLEL_SCANS;
+import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_NUM_PARALLEL_SCANS;
import java.sql.SQLException;
import java.util.Collections;
@@ -30,6 +30,10 @@ import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.phoenix.compile.QueryPlan;
import org.apache.phoenix.job.JobManager.JobCallable;
+import org.apache.phoenix.monitoring.MetricType;
+import org.apache.phoenix.monitoring.CombinableMetric;
+import org.apache.phoenix.monitoring.ReadMetricQueue;
+import org.apache.phoenix.monitoring.TaskExecutionMetricsHolder;
import org.apache.phoenix.trace.util.Tracing;
import org.apache.phoenix.util.LogUtil;
import org.apache.phoenix.util.ScanUtil;
@@ -79,19 +83,25 @@ public class ParallelIterators extends BaseResultIterators {
// Shuffle so that we start execution across many machines
// before we fill up the thread pool
Collections.shuffle(scanLocations);
- PARALLEL_SCANS.update(scanLocations.size());
+ ReadMetricQueue readMetrics = context.getReadMetricsQueue();
+ final String physicalTableName = tableRef.getTable().getPhysicalName().getString();
+ int numScans = scanLocations.size();
+ context.getOverallQueryMetrics().updateNumParallelScans(numScans);
+ GLOBAL_NUM_PARALLEL_SCANS.update(numScans);
for (ScanLocator scanLocation : scanLocations) {
final Scan scan = scanLocation.getScan();
+ final CombinableMetric scanMetrics = readMetrics.allotMetric(MetricType.SCAN_BYTES, physicalTableName);
+ final TaskExecutionMetricsHolder taskMetrics = new TaskExecutionMetricsHolder(readMetrics, physicalTableName);
Future<PeekingResultIterator> future = executor.submit(Tracing.wrap(new JobCallable<PeekingResultIterator>() {
-
+
@Override
public PeekingResultIterator call() throws Exception {
long startTime = System.currentTimeMillis();
- ResultIterator scanner = new TableResultIterator(context, tableRef, scan);
+ ResultIterator scanner = new TableResultIterator(context, tableRef, scan, scanMetrics);
if (logger.isDebugEnabled()) {
logger.debug(LogUtil.addCustomAnnotations("Id: " + scanId + ", Time: " + (System.currentTimeMillis() - startTime) + "ms, Scan: " + scan, ScanUtil.getCustomAnnotations(scan)));
}
- PeekingResultIterator iterator = iteratorFactory.newIterator(context, scanner, scan);
+ PeekingResultIterator iterator = iteratorFactory.newIterator(context, scanner, scan, physicalTableName);
// Fill the scanner's cache. This helps reduce latency since we are parallelizing the I/O needed.
iterator.peek();
@@ -109,6 +119,11 @@ public class ParallelIterators extends BaseResultIterators {
public Object getJobId() {
return ParallelIterators.this;
}
+
+ @Override
+ public TaskExecutionMetricsHolder getTaskExecutionMetric() {
+ return taskMetrics;
+ }
}, "Parallel scanner for table: " + tableRef.getTable().getName().getString()));
// Add our future in the right place so that we can concatenate the
// results of the inner futures versus merge sorting across all of them.
http://git-wip-us.apache.org/repos/asf/phoenix/blob/0f6595c0/phoenix-core/src/main/java/org/apache/phoenix/iterate/RoundRobinResultIterator.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/RoundRobinResultIterator.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/RoundRobinResultIterator.java
index 4a9ad3e..92ac570 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/RoundRobinResultIterator.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/RoundRobinResultIterator.java
@@ -18,7 +18,7 @@
package org.apache.phoenix.iterate;
import static com.google.common.base.Preconditions.checkArgument;
-import static org.apache.phoenix.monitoring.PhoenixMetrics.CountMetric.FAILED_QUERY;
+import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_FAILED_QUERY_COUNTER;
import java.sql.SQLException;
import java.util.ArrayList;
@@ -268,7 +268,7 @@ public class RoundRobinResultIterator implements ResultIterator {
}
} finally {
if (toThrow != null) {
- FAILED_QUERY.increment();
+ GLOBAL_FAILED_QUERY_COUNTER.increment();
throw toThrow;
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/0f6595c0/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
index fd65d0c..b722794 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
@@ -17,7 +17,7 @@
*/
package org.apache.phoenix.iterate;
-import static org.apache.phoenix.monitoring.PhoenixMetrics.SizeMetric.SCAN_BYTES;
+import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_SCAN_BYTES;
import java.io.IOException;
import java.sql.SQLException;
@@ -28,15 +28,20 @@ import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.phoenix.monitoring.PhoenixMetrics;
+import org.apache.phoenix.monitoring.CombinableMetric.NoOpRequestMetric;
+import org.apache.phoenix.monitoring.GlobalClientMetrics;
+import org.apache.phoenix.monitoring.CombinableMetric;
import org.apache.phoenix.schema.tuple.ResultTuple;
import org.apache.phoenix.schema.tuple.Tuple;
import org.apache.phoenix.util.ServerUtil;
public class ScanningResultIterator implements ResultIterator {
private final ResultScanner scanner;
- public ScanningResultIterator(ResultScanner scanner) {
+ private final CombinableMetric scanMetrics;
+
+ public ScanningResultIterator(ResultScanner scanner, CombinableMetric scanMetrics) {
this.scanner = scanner;
+ this.scanMetrics = scanMetrics;
}
@Override
@@ -66,17 +71,18 @@ public class ScanningResultIterator implements ResultIterator {
return "ScanningResultIterator [scanner=" + scanner + "]";
}
- private static void calculateScanSize(Result result) {
- if (PhoenixMetrics.isMetricsEnabled()) {
- if (result != null) {
- Cell[] cells = result.rawCells();
- long scanResultSize = 0;
- for (Cell cell : cells) {
- KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
- scanResultSize += kv.heapSize();
- }
- SCAN_BYTES.update(scanResultSize);
- }
- }
- }
+ private void calculateScanSize(Result result) {
+ if (GlobalClientMetrics.isMetricsEnabled() || scanMetrics != NoOpRequestMetric.INSTANCE) {
+ if (result != null) {
+ Cell[] cells = result.rawCells();
+ long scanResultSize = 0;
+ for (Cell cell : cells) {
+ KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
+ scanResultSize += kv.heapSize();
+ }
+ scanMetrics.change(scanResultSize);
+ GLOBAL_SCAN_BYTES.update(scanResultSize);
+ }
+ }
+ }
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/0f6595c0/phoenix-core/src/main/java/org/apache/phoenix/iterate/SerialIterators.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/SerialIterators.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/SerialIterators.java
index 6b3b5e3..516d73e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/SerialIterators.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/SerialIterators.java
@@ -17,6 +17,8 @@
*/
package org.apache.phoenix.iterate;
+import static org.apache.phoenix.monitoring.MetricType.SCAN_BYTES;
+
import java.sql.SQLException;
import java.util.Collections;
import java.util.List;
@@ -29,11 +31,9 @@ import org.apache.hadoop.hbase.util.Pair;
import org.apache.phoenix.compile.QueryPlan;
import org.apache.phoenix.iterate.TableResultIterator.ScannerCreation;
import org.apache.phoenix.job.JobManager.JobCallable;
+import org.apache.phoenix.monitoring.TaskExecutionMetricsHolder;
import org.apache.phoenix.trace.util.Tracing;
-import org.apache.phoenix.util.LogUtil;
import org.apache.phoenix.util.ScanUtil;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
@@ -48,7 +48,6 @@ import com.google.common.collect.Lists;
* @since 0.1
*/
public class SerialIterators extends BaseResultIterators {
- private static final Logger logger = LoggerFactory.getLogger(SerialIterators.class);
private static final String NAME = "SERIAL";
private final ParallelIteratorFactory iteratorFactory;
@@ -74,18 +73,15 @@ public class SerialIterators extends BaseResultIterators {
Scan lastScan = scans.get(scans.size()-1);
final Scan overallScan = ScanUtil.newScan(firstScan);
overallScan.setStopRow(lastScan.getStopRow());
+ final String tableName = tableRef.getTable().getPhysicalName().getString();
+ final TaskExecutionMetricsHolder taskMetrics = new TaskExecutionMetricsHolder(context.getReadMetricsQueue(), tableName);
Future<PeekingResultIterator> future = executor.submit(Tracing.wrap(new JobCallable<PeekingResultIterator>() {
-
@Override
public PeekingResultIterator call() throws Exception {
List<PeekingResultIterator> concatIterators = Lists.newArrayListWithExpectedSize(scans.size());
for (final Scan scan : scans) {
- long startTime = System.currentTimeMillis();
- ResultIterator scanner = new TableResultIterator(context, tableRef, scan, ScannerCreation.DELAYED);
- if (logger.isDebugEnabled()) {
- logger.debug(LogUtil.addCustomAnnotations("Id: " + scanId + ", Time: " + (System.currentTimeMillis() - startTime) + "ms, Scan: " + scan, ScanUtil.getCustomAnnotations(scan)));
- }
- concatIterators.add(iteratorFactory.newIterator(context, scanner, scan));
+ ResultIterator scanner = new TableResultIterator(context, tableRef, scan, context.getReadMetricsQueue().allotMetric(SCAN_BYTES, tableName), ScannerCreation.DELAYED);
+ concatIterators.add(iteratorFactory.newIterator(context, scanner, scan, tableName));
}
PeekingResultIterator concatIterator = ConcatResultIterator.newIterator(concatIterators);
allIterators.add(concatIterator);
@@ -101,6 +97,11 @@ public class SerialIterators extends BaseResultIterators {
public Object getJobId() {
return SerialIterators.this;
}
+
+ @Override
+ public TaskExecutionMetricsHolder getTaskExecutionMetric() {
+ return taskMetrics;
+ }
}, "Serial scanner for table: " + tableRef.getTable().getName().getString()));
// Add our singleton Future which will execute serially
nestedFutures.add(Collections.singletonList(new Pair<Scan,Future<PeekingResultIterator>>(overallScan,future)));
http://git-wip-us.apache.org/repos/asf/phoenix/blob/0f6595c0/phoenix-core/src/main/java/org/apache/phoenix/iterate/SpoolingResultIterator.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/SpoolingResultIterator.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/SpoolingResultIterator.java
index 63d3761..0a3c32b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/SpoolingResultIterator.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/SpoolingResultIterator.java
@@ -17,8 +17,10 @@
*/
package org.apache.phoenix.iterate;
-import static org.apache.phoenix.monitoring.PhoenixMetrics.CountMetric.NUM_SPOOL_FILE;
-import static org.apache.phoenix.monitoring.PhoenixMetrics.SizeMetric.SPOOL_FILE_SIZE;
+import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_MEMORY_CHUNK_BYTES;
+import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_MEMORY_WAIT_TIME;
+import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_SPOOL_FILE_COUNTER;
+import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_SPOOL_FILE_SIZE;
import java.io.BufferedInputStream;
import java.io.DataInputStream;
@@ -37,6 +39,9 @@ import org.apache.hadoop.io.WritableUtils;
import org.apache.phoenix.compile.StatementContext;
import org.apache.phoenix.memory.MemoryManager;
import org.apache.phoenix.memory.MemoryManager.MemoryChunk;
+import org.apache.phoenix.monitoring.MemoryMetricsHolder;
+import org.apache.phoenix.monitoring.ReadMetricQueue;
+import org.apache.phoenix.monitoring.SpoolingMetricsHolder;
import org.apache.phoenix.query.QueryServices;
import org.apache.phoenix.query.QueryServicesOptions;
import org.apache.phoenix.schema.tuple.ResultTuple;
@@ -55,8 +60,10 @@ import org.apache.phoenix.util.TupleUtil;
* @since 0.1
*/
public class SpoolingResultIterator implements PeekingResultIterator {
- private final PeekingResultIterator spoolFrom;
+ private final PeekingResultIterator spoolFrom;
+ private final SpoolingMetricsHolder spoolMetrics;
+ private final MemoryMetricsHolder memoryMetrics;
public static class SpoolingResultIteratorFactory implements ParallelIteratorFactory {
private final QueryServices services;
@@ -64,14 +71,16 @@ public class SpoolingResultIterator implements PeekingResultIterator {
this.services = services;
}
@Override
- public PeekingResultIterator newIterator(StatementContext context, ResultIterator scanner, Scan scan) throws SQLException {
- return new SpoolingResultIterator(scanner, services);
+ public PeekingResultIterator newIterator(StatementContext context, ResultIterator scanner, Scan scan, String physicalTableName) throws SQLException {
+ ReadMetricQueue readRequestMetric = context.getReadMetricsQueue();
+ SpoolingMetricsHolder spoolMetrics = new SpoolingMetricsHolder(readRequestMetric, physicalTableName);
+ MemoryMetricsHolder memoryMetrics = new MemoryMetricsHolder(readRequestMetric, physicalTableName);
+ return new SpoolingResultIterator(spoolMetrics, memoryMetrics, scanner, services);
}
-
}
- public SpoolingResultIterator(ResultIterator scanner, QueryServices services) throws SQLException {
- this (scanner, services.getMemoryManager(),
+ private SpoolingResultIterator(SpoolingMetricsHolder spoolMetrics, MemoryMetricsHolder memoryMetrics, ResultIterator scanner, QueryServices services) throws SQLException {
+ this (spoolMetrics, memoryMetrics, scanner, services.getMemoryManager(),
services.getProps().getInt(QueryServices.SPOOL_THRESHOLD_BYTES_ATTRIB, QueryServicesOptions.DEFAULT_SPOOL_THRESHOLD_BYTES),
services.getProps().getLong(QueryServices.MAX_SPOOL_TO_DISK_BYTES_ATTRIB, QueryServicesOptions.DEFAULT_MAX_SPOOL_TO_DISK_BYTES),
services.getProps().get(QueryServices.SPOOL_DIRECTORY, QueryServicesOptions.DEFAULT_SPOOL_DIRECTORY));
@@ -86,9 +95,15 @@ public class SpoolingResultIterator implements PeekingResultIterator {
* the memory manager) is exceeded.
* @throws SQLException
*/
- SpoolingResultIterator(ResultIterator scanner, MemoryManager mm, final int thresholdBytes, final long maxSpoolToDisk, final String spoolDirectory) throws SQLException {
+ SpoolingResultIterator(SpoolingMetricsHolder sMetrics, MemoryMetricsHolder mMetrics, ResultIterator scanner, MemoryManager mm, final int thresholdBytes, final long maxSpoolToDisk, final String spoolDirectory) throws SQLException {
+ this.spoolMetrics = sMetrics;
+ this.memoryMetrics = mMetrics;
boolean success = false;
+ long startTime = System.currentTimeMillis();
final MemoryChunk chunk = mm.allocate(0, thresholdBytes);
+ long waitTime = System.currentTimeMillis() - startTime;
+ GLOBAL_MEMORY_WAIT_TIME.update(waitTime);
+ memoryMetrics.getMemoryWaitTimeMetric().change(waitTime);
DeferredFileOutputStream spoolTo = null;
try {
// Can't be bigger than int, since it's the max of the above allocation
@@ -96,8 +111,11 @@ public class SpoolingResultIterator implements PeekingResultIterator {
spoolTo = new DeferredFileOutputStream(size, "ResultSpooler",".bin", new File(spoolDirectory)) {
@Override
protected void thresholdReached() throws IOException {
- super.thresholdReached();
- chunk.close();
+ try {
+ super.thresholdReached();
+ } finally {
+ chunk.close();
+ }
}
};
DataOutputStream out = new DataOutputStream(spoolTo);
@@ -115,9 +133,14 @@ public class SpoolingResultIterator implements PeekingResultIterator {
byte[] data = spoolTo.getData();
chunk.resize(data.length);
spoolFrom = new InMemoryResultIterator(data, chunk);
+ GLOBAL_MEMORY_CHUNK_BYTES.update(data.length);
+ memoryMetrics.getMemoryChunkSizeMetric().change(data.length);
} else {
- NUM_SPOOL_FILE.increment();
- SPOOL_FILE_SIZE.update(spoolTo.getFile().length());
+ long sizeOfSpoolFile = spoolTo.getFile().length();
+ GLOBAL_SPOOL_FILE_SIZE.update(sizeOfSpoolFile);
+ GLOBAL_SPOOL_FILE_COUNTER.increment();
+ spoolMetrics.getNumSpoolFileMetric().increment();
+ spoolMetrics.getSpoolFileSizeMetric().change(sizeOfSpoolFile);
spoolFrom = new OnDiskResultIterator(spoolTo.getFile());
if (spoolTo.getFile() != null) {
spoolTo.getFile().deleteOnExit();
http://git-wip-us.apache.org/repos/asf/phoenix/blob/0f6595c0/phoenix-core/src/main/java/org/apache/phoenix/iterate/TableResultIterator.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/TableResultIterator.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/TableResultIterator.java
index ea13dfd..6f040d1 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/TableResultIterator.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/TableResultIterator.java
@@ -24,6 +24,7 @@ import java.util.List;
import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.phoenix.compile.StatementContext;
+import org.apache.phoenix.monitoring.CombinableMetric;
import org.apache.phoenix.schema.TableRef;
import org.apache.phoenix.schema.tuple.Tuple;
import org.apache.phoenix.util.Closeables;
@@ -44,9 +45,10 @@ public class TableResultIterator extends ExplainTable implements ResultIterator
private final Scan scan;
private final HTableInterface htable;
private volatile ResultIterator delegate;
-
- public TableResultIterator(StatementContext context, TableRef tableRef) throws SQLException {
- this(context, tableRef, context.getScan());
+ private final CombinableMetric scanMetrics;
+
+ public TableResultIterator(StatementContext context, TableRef tableRef, CombinableMetric scanMetrics) throws SQLException {
+ this(context, tableRef, context.getScan(), scanMetrics);
}
/*
@@ -62,7 +64,7 @@ public class TableResultIterator extends ExplainTable implements ResultIterator
delegate = this.delegate;
if (delegate == null) {
try {
- this.delegate = delegate = isClosing ? ResultIterator.EMPTY_ITERATOR : new ScanningResultIterator(htable.getScanner(scan));
+ this.delegate = delegate = isClosing ? ResultIterator.EMPTY_ITERATOR : new ScanningResultIterator(htable.getScanner(scan), scanMetrics);
} catch (IOException e) {
Closeables.closeQuietly(htable);
throw ServerUtil.parseServerException(e);
@@ -73,13 +75,14 @@ public class TableResultIterator extends ExplainTable implements ResultIterator
return delegate;
}
- public TableResultIterator(StatementContext context, TableRef tableRef, Scan scan) throws SQLException {
- this(context, tableRef, scan, ScannerCreation.IMMEDIATE);
+ public TableResultIterator(StatementContext context, TableRef tableRef, Scan scan, CombinableMetric scanMetrics) throws SQLException {
+ this(context, tableRef, scan, scanMetrics, ScannerCreation.IMMEDIATE);
}
- public TableResultIterator(StatementContext context, TableRef tableRef, Scan scan, ScannerCreation creationMode) throws SQLException {
+ public TableResultIterator(StatementContext context, TableRef tableRef, Scan scan, CombinableMetric scanMetrics, ScannerCreation creationMode) throws SQLException {
super(context, tableRef);
this.scan = scan;
+ this.scanMetrics = scanMetrics;
htable = context.getConnection().getQueryServices().getTable(tableRef.getTable().getPhysicalName().getBytes());
if (creationMode == ScannerCreation.IMMEDIATE) {
getDelegate(false);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/0f6595c0/phoenix-core/src/main/java/org/apache/phoenix/iterate/UnionResultIterators.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/UnionResultIterators.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/UnionResultIterators.java
index b7c8b21..2296982 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/UnionResultIterators.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/UnionResultIterators.java
@@ -22,6 +22,9 @@ import java.util.List;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.phoenix.compile.QueryPlan;
+import org.apache.phoenix.compile.StatementContext;
+import org.apache.phoenix.monitoring.OverAllQueryMetrics;
+import org.apache.phoenix.monitoring.ReadMetricQueue;
import org.apache.phoenix.query.KeyRange;
import org.apache.phoenix.util.ServerUtil;
@@ -39,14 +42,22 @@ public class UnionResultIterators implements ResultIterators {
private final List<List<Scan>> scans;
private final List<PeekingResultIterator> iterators;
private final List<QueryPlan> plans;
-
- public UnionResultIterators(List<QueryPlan> plans) throws SQLException {
+ private final List<ReadMetricQueue> readMetricsList;
+ private final List<OverAllQueryMetrics> overAllQueryMetricsList;
+ private boolean closed;
+ private final StatementContext parentStmtCtx;
+ public UnionResultIterators(List<QueryPlan> plans, StatementContext parentStmtCtx) throws SQLException {
+ this.parentStmtCtx = parentStmtCtx;
this.plans = plans;
int nPlans = plans.size();
iterators = Lists.newArrayListWithExpectedSize(nPlans);
splits = Lists.newArrayListWithExpectedSize(nPlans * 30);
scans = Lists.newArrayListWithExpectedSize(nPlans * 10);
+ readMetricsList = Lists.newArrayListWithCapacity(nPlans);
+ overAllQueryMetricsList = Lists.newArrayListWithCapacity(nPlans);
for (QueryPlan plan : this.plans) {
+ readMetricsList.add(plan.getContext().getReadMetricsQueue());
+ overAllQueryMetricsList.add(plan.getContext().getOverallQueryMetrics());
iterators.add(LookAheadResultIterator.wrap(plan.iterator()));
splits.addAll(plan.getSplits());
scans.addAll(plan.getScans());
@@ -59,32 +70,47 @@ public class UnionResultIterators implements ResultIterators {
}
@Override
- public void close() throws SQLException {
- SQLException toThrow = null;
- try {
- if (iterators != null) {
- for (int index=0; index < iterators.size(); index++) {
- PeekingResultIterator iterator = iterators.get(index);
- try {
- iterator.close();
- } catch (Exception e) {
- if (toThrow == null) {
- toThrow = ServerUtil.parseServerException(e);
- } else {
- toThrow.setNextException(ServerUtil.parseServerException(e));
+ public void close() throws SQLException {
+ if (!closed) {
+ closed = true;
+ SQLException toThrow = null;
+ try {
+ if (iterators != null) {
+ for (int index=0; index < iterators.size(); index++) {
+ PeekingResultIterator iterator = iterators.get(index);
+ try {
+ iterator.close();
+ } catch (Exception e) {
+ if (toThrow == null) {
+ toThrow = ServerUtil.parseServerException(e);
+ } else {
+ toThrow.setNextException(ServerUtil.parseServerException(e));
+ }
}
}
}
- }
- } catch (Exception e) {
- toThrow = ServerUtil.parseServerException(e);
- } finally {
- if (toThrow != null) {
- throw toThrow;
+ } catch (Exception e) {
+ toThrow = ServerUtil.parseServerException(e);
+ } finally {
+ setMetricsInParentContext();
+ if (toThrow != null) {
+ throw toThrow;
+ }
}
}
}
-
+
+ private void setMetricsInParentContext() {
+ ReadMetricQueue parentCtxReadMetrics = parentStmtCtx.getReadMetricsQueue();
+ for (ReadMetricQueue readMetrics : readMetricsList) {
+ parentCtxReadMetrics.combineReadMetrics(readMetrics);
+ }
+ OverAllQueryMetrics parentCtxQueryMetrics = parentStmtCtx.getOverallQueryMetrics();
+ for (OverAllQueryMetrics metric : overAllQueryMetricsList) {
+ parentCtxQueryMetrics.combine(metric);
+ }
+ }
+
@Override
public List<List<Scan>> getScans() {
return scans;
http://git-wip-us.apache.org/repos/asf/phoenix/blob/0f6595c0/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
index dad60c1..5805999 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
@@ -123,7 +123,7 @@ public class PhoenixConnection implements Connection, org.apache.phoenix.jdbc.Jd
private final Properties info;
private List<SQLCloseable> statements = new ArrayList<SQLCloseable>();
private final Map<PDataType<?>, Format> formatters = new HashMap<>();
- private MutationState mutationState;
+ private final MutationState mutationState;
private final int mutateBatchSize;
private final Long scn;
private boolean isAutoCommit = false;
@@ -137,9 +137,9 @@ public class PhoenixConnection implements Connection, org.apache.phoenix.jdbc.Jd
private boolean isClosed = false;
private Sampler<?> sampler;
private boolean readOnly = false;
- private Map<String, String> customTracingAnnotations = emptyMap();
private Consistency consistency = Consistency.STRONG;
-
+ private Map<String, String> customTracingAnnotations = emptyMap();
+ private final boolean isRequestLevelMetricsEnabled;
static {
Tracing.addTraceMetricsSource();
}
@@ -237,6 +237,7 @@ public class PhoenixConnection implements Connection, org.apache.phoenix.jdbc.Jd
! Objects.equal(tenantId, function.getTenantId()));
}
};
+ this.isRequestLevelMetricsEnabled = JDBCUtil.isCollectingRequestLevelMetricsEnabled(url, info, this.services.getProps());
this.mutationState = newMutationState(maxSize);
this.metaData = metaData.pruneTables(pruner);
this.metaData = metaData.pruneFunctions(pruner);
@@ -438,6 +439,7 @@ public class PhoenixConnection implements Connection, org.apache.phoenix.jdbc.Jd
return;
}
try {
+ clearMetrics();
try {
if (traceScope != null) {
traceScope.close();
@@ -866,4 +868,23 @@ public class PhoenixConnection implements Connection, org.apache.phoenix.jdbc.Jd
public void setTraceScope(TraceScope traceScope) {
this.traceScope = traceScope;
}
+
+ public Map<String, Map<String, Long>> getMutationMetrics() {
+ return mutationState.getMutationMetricQueue().aggregate();
+ }
+
+ public Map<String, Map<String, Long>> getReadMetrics() {
+ return mutationState.getReadMetricQueue() != null ? mutationState.getReadMetricQueue().aggregate() : Collections.<String, Map<String, Long>>emptyMap();
+ }
+
+ public boolean isRequestLevelMetricsEnabled() {
+ return isRequestLevelMetricsEnabled;
+ }
+
+ public void clearMetrics() {
+ mutationState.getMutationMetricQueue().clearMetrics();
+ if (mutationState.getReadMetricQueue() != null) {
+ mutationState.getReadMetricQueue().clearMetrics();
+ }
+ }
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/0f6595c0/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
index d1b3b27..2dd8af4 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.compile.ColumnProjector;
import org.apache.phoenix.compile.ExpressionProjector;
import org.apache.phoenix.compile.RowProjector;
+import org.apache.phoenix.compile.StatementContext;
import org.apache.phoenix.coprocessor.MetaDataProtocol;
import org.apache.phoenix.exception.SQLExceptionCode;
import org.apache.phoenix.exception.SQLExceptionInfo;
@@ -311,7 +312,7 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData, org.apache.pho
public static final int CLIENT_KEY_VALUE_BUILDER_THRESHOLD = VersionUtil.encodeVersion("0", "94", "14");
PhoenixDatabaseMetaData(PhoenixConnection connection) throws SQLException {
- this.emptyResultSet = new PhoenixResultSet(ResultIterator.EMPTY_ITERATOR, RowProjector.EMPTY_PROJECTOR, new PhoenixStatement(connection));
+ this.emptyResultSet = new PhoenixResultSet(ResultIterator.EMPTY_ITERATOR, RowProjector.EMPTY_PROJECTOR, new StatementContext(new PhoenixStatement(connection), false));
this.connection = connection;
}
@@ -509,11 +510,10 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData, org.apache.pho
public PhoenixStatement newStatement(PhoenixConnection connection) {
return new PhoenixStatement(connection) {
@Override
- protected PhoenixResultSet newResultSet(ResultIterator iterator, RowProjector projector)
- throws SQLException {
- return new PhoenixResultSet(
- new TenantColumnFilteringIterator(iterator, projector),
- projector, this);
+ protected PhoenixResultSet newResultSet(ResultIterator iterator, RowProjector projector,
+ StatementContext context) throws SQLException {
+ return new PhoenixResultSet(new TenantColumnFilteringIterator(iterator, projector),
+ projector, context);
}
};
}
@@ -523,7 +523,12 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData, org.apache.pho
}
return stmt.executeQuery(buf.toString());
}
-
+
+// private ColumnResolver getColumnResolverForCatalogTable() throws SQLException {
+// TableRef tableRef = new TableRef(getTable(connection, SYSTEM_CATALOG_NAME));
+// return FromCompiler.getResolver(tableRef);
+// }
+
/**
* Filters the tenant id column out of a column metadata result set (thus, where each row is a column definition).
* The tenant id is by definition the first column of the primary key, but the primary key does not necessarily
@@ -1007,7 +1012,7 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData, org.apache.pho
}
@Override
public ResultSet getTableTypes() throws SQLException {
- return new PhoenixResultSet(new MaterializedResultIterator(TABLE_TYPE_TUPLES), TABLE_TYPE_ROW_PROJECTOR, new PhoenixStatement(connection));
+ return new PhoenixResultSet(new MaterializedResultIterator(TABLE_TYPE_TUPLES), TABLE_TYPE_ROW_PROJECTOR, new StatementContext(new PhoenixStatement(connection), false));
}
/**
http://git-wip-us.apache.org/repos/asf/phoenix/blob/0f6595c0/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixResultSet.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixResultSet.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixResultSet.java
index 8ee56ea..da06370 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixResultSet.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixResultSet.java
@@ -39,16 +39,21 @@ import java.sql.Time;
import java.sql.Timestamp;
import java.text.Format;
import java.util.Calendar;
+import java.util.List;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.util.Pair;
import org.apache.phoenix.compile.ColumnProjector;
import org.apache.phoenix.compile.RowProjector;
+import org.apache.phoenix.compile.StatementContext;
import org.apache.phoenix.exception.SQLExceptionCode;
import org.apache.phoenix.exception.SQLExceptionInfo;
import org.apache.phoenix.iterate.ResultIterator;
+import org.apache.phoenix.monitoring.OverAllQueryMetrics;
+import org.apache.phoenix.monitoring.ReadMetricQueue;
import org.apache.phoenix.schema.tuple.ResultTuple;
import org.apache.phoenix.schema.tuple.Tuple;
import org.apache.phoenix.schema.types.PBoolean;
@@ -109,18 +114,25 @@ public class PhoenixResultSet implements ResultSet, SQLCloseable, org.apache.pho
private final ResultIterator scanner;
private final RowProjector rowProjector;
private final PhoenixStatement statement;
+ private final StatementContext context;
+ private final ReadMetricQueue readMetricsQueue;
+ private final OverAllQueryMetrics overAllQueryMetrics;
private final ImmutableBytesWritable ptr = new ImmutableBytesWritable();
private Tuple currentRow = BEFORE_FIRST;
private boolean isClosed = false;
private boolean wasNull = false;
-
- public PhoenixResultSet(ResultIterator resultIterator, RowProjector rowProjector, PhoenixStatement statement) throws SQLException {
+ private boolean firstRecordRead = false;
+
+ public PhoenixResultSet(ResultIterator resultIterator, RowProjector rowProjector, StatementContext ctx) throws SQLException {
this.rowProjector = rowProjector;
this.scanner = resultIterator;
- this.statement = statement;
+ this.context = ctx;
+ this.statement = context.getStatement();
+ this.readMetricsQueue = context.getReadMetricsQueue();
+ this.overAllQueryMetrics = context.getOverallQueryMetrics();
}
-
+
@Override
public boolean absolute(int row) throws SQLException {
throw new SQLFeatureNotSupportedException();
@@ -147,14 +159,14 @@ public class PhoenixResultSet implements ResultSet, SQLCloseable, org.apache.pho
@Override
public void close() throws SQLException {
- if (isClosed) {
- return;
- }
+ if (isClosed) { return; }
try {
scanner.close();
} finally {
isClosed = true;
statement.getResultSets().remove(this);
+ overAllQueryMetrics.endQuery();
+ overAllQueryMetrics.stopResultSetWatch();
}
}
@@ -754,6 +766,10 @@ public class PhoenixResultSet implements ResultSet, SQLCloseable, org.apache.pho
public boolean next() throws SQLException {
checkOpen();
try {
+ if (!firstRecordRead) {
+ firstRecordRead = true;
+ overAllQueryMetrics.startResultSetWatch();
+ }
currentRow = scanner.next();
rowProjector.reset();
} catch (RuntimeException e) {
@@ -764,6 +780,10 @@ public class PhoenixResultSet implements ResultSet, SQLCloseable, org.apache.pho
}
throw e;
}
+ if (currentRow == null) {
+ overAllQueryMetrics.endQuery();
+ overAllQueryMetrics.stopResultSetWatch();
+ }
return currentRow != null;
}
@@ -1261,4 +1281,18 @@ public class PhoenixResultSet implements ResultSet, SQLCloseable, org.apache.pho
public ResultIterator getUnderlyingIterator() {
return scanner;
}
+
+ public Map<String, Map<String, Long>> getReadMetrics() {
+ return readMetricsQueue.aggregate();
+ }
+
+ public Map<String, Long> getOverAllRequestReadMetrics() {
+ return overAllQueryMetrics.publish();
+ }
+
+ public void resetMetrics() {
+ readMetricsQueue.clearMetrics();
+ overAllQueryMetrics.reset();
+ }
+
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/0f6595c0/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
index 7c94d62..c6c5b0c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
@@ -17,9 +17,9 @@
*/
package org.apache.phoenix.jdbc;
-import static org.apache.phoenix.monitoring.PhoenixMetrics.CountMetric.MUTATION_COUNT;
-import static org.apache.phoenix.monitoring.PhoenixMetrics.CountMetric.QUERY_COUNT;
-import static org.apache.phoenix.monitoring.PhoenixMetrics.SizeMetric.QUERY_TIME;
+import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_MUTATION_SQL_COUNTER;
+import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_QUERY_TIME;
+import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_SELECT_SQL_COUNTER;
import java.io.IOException;
import java.io.Reader;
@@ -216,8 +216,8 @@ public class PhoenixStatement implements Statement, SQLCloseable, org.apache.pho
return resultSets;
}
- protected PhoenixResultSet newResultSet(ResultIterator iterator, RowProjector projector) throws SQLException {
- return new PhoenixResultSet(iterator, projector, this);
+ protected PhoenixResultSet newResultSet(ResultIterator iterator, RowProjector projector, StatementContext context) throws SQLException {
+ return new PhoenixResultSet(iterator, projector, context);
}
protected boolean execute(final CompilableStatement stmt) throws SQLException {
@@ -235,7 +235,7 @@ public class PhoenixStatement implements Statement, SQLCloseable, org.apache.pho
}
protected PhoenixResultSet executeQuery(final CompilableStatement stmt) throws SQLException {
- QUERY_COUNT.increment();
+ GLOBAL_SELECT_SQL_COUNTER.increment();
try {
return CallRunner.run(
new CallRunner.CallableThrowable<PhoenixResultSet, SQLException>() {
@@ -253,7 +253,9 @@ public class PhoenixStatement implements Statement, SQLCloseable, org.apache.pho
String explainPlan = QueryUtil.getExplainPlan(resultIterator);
logger.debug(LogUtil.addCustomAnnotations("Explain plan: " + explainPlan, connection));
}
- PhoenixResultSet rs = newResultSet(resultIterator, plan.getProjector());
+ StatementContext context = plan.getContext();
+ context.getOverallQueryMetrics().startQuery();
+ PhoenixResultSet rs = newResultSet(resultIterator, plan.getProjector(), context);
resultSets.add(rs);
setLastQueryPlan(plan);
setLastResultSet(rs);
@@ -272,7 +274,7 @@ public class PhoenixStatement implements Statement, SQLCloseable, org.apache.pho
// Regardless of whether the query was successfully handled or not,
// update the time spent so far. If needed, we can separate out the
// success times and failure times.
- QUERY_TIME.update(System.currentTimeMillis() - startTime);
+ GLOBAL_QUERY_TIME.update(System.currentTimeMillis() - startTime);
}
}
}, PhoenixContextExecutor.inContext());
@@ -288,7 +290,7 @@ public class PhoenixStatement implements Statement, SQLCloseable, org.apache.pho
SQLExceptionCode.READ_ONLY_CONNECTION).
build().buildException();
}
- MUTATION_COUNT.increment();
+ GLOBAL_MUTATION_SQL_COUNTER.increment();
try {
return CallRunner
.run(
http://git-wip-us.apache.org/repos/asf/phoenix/blob/0f6595c0/phoenix-core/src/main/java/org/apache/phoenix/job/JobManager.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/job/JobManager.java b/phoenix-core/src/main/java/org/apache/phoenix/job/JobManager.java
index 31ef742..7406e46 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/job/JobManager.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/job/JobManager.java
@@ -17,11 +17,11 @@
*/
package org.apache.phoenix.job;
-import static org.apache.phoenix.monitoring.PhoenixMetrics.CountMetric.REJECTED_TASK_COUNT;
-import static org.apache.phoenix.monitoring.PhoenixMetrics.CountMetric.TASK_COUNT;
-import static org.apache.phoenix.monitoring.PhoenixMetrics.SizeMetric.TASK_END_TO_END_TIME;
-import static org.apache.phoenix.monitoring.PhoenixMetrics.SizeMetric.TASK_EXECUTION_TIME;
-import static org.apache.phoenix.monitoring.PhoenixMetrics.SizeMetric.TASK_QUEUE_WAIT_TIME;
+import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_REJECTED_TASK_COUNTER;
+import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_TASK_END_TO_END_TIME;
+import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_TASK_EXECUTED_COUNTER;
+import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_TASK_EXECUTION_TIME;
+import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_TASK_QUEUE_WAIT_TIME;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.Callable;
@@ -36,6 +36,10 @@ import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
+import javax.annotation.Nullable;
+
+import org.apache.phoenix.monitoring.TaskExecutionMetricsHolder;
+
import com.google.common.util.concurrent.ThreadFactoryBuilder;
/**
*
@@ -63,6 +67,7 @@ public class JobManager<T> extends AbstractRoundRobinQueue<T> {
public static interface JobRunnable<T> extends Runnable {
public Object getJobId();
+ public TaskExecutionMetricsHolder getTaskExecutionMetric();
}
public static ThreadPoolExecutor createThreadPoolExec(int keepAliveMs, int size, int queueSize, boolean useInstrumentedThreadPool) {
@@ -117,13 +122,17 @@ public class JobManager<T> extends AbstractRoundRobinQueue<T> {
*/
static class JobFutureTask<T> extends FutureTask<T> {
private final Object jobId;
+ @Nullable
+ private final TaskExecutionMetricsHolder taskMetric;
public JobFutureTask(Runnable r, T t) {
super(r, t);
if(r instanceof JobRunnable){
this.jobId = ((JobRunnable)r).getJobId();
+ this.taskMetric = ((JobRunnable)r).getTaskExecutionMetric();
} else {
this.jobId = this;
+ this.taskMetric = null;
}
}
@@ -132,8 +141,10 @@ public class JobManager<T> extends AbstractRoundRobinQueue<T> {
// FIXME: this fails when executor used by hbase
if (c instanceof JobCallable) {
this.jobId = ((JobCallable<T>) c).getJobId();
+ this.taskMetric = ((JobCallable<T>) c).getTaskExecutionMetric();
} else {
this.jobId = this;
+ this.taskMetric = null;
}
}
@@ -187,6 +198,7 @@ public class JobManager<T> extends AbstractRoundRobinQueue<T> {
*/
public static interface JobCallable<T> extends Callable<T> {
public Object getJobId();
+ public TaskExecutionMetricsHolder getTaskExecutionMetric();
}
@@ -224,27 +236,40 @@ public class JobManager<T> extends AbstractRoundRobinQueue<T> {
private final RejectedExecutionHandler rejectedExecHandler = new RejectedExecutionHandler() {
@Override
public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) {
- REJECTED_TASK_COUNT.increment();
+ TaskExecutionMetricsHolder metrics = getRequestMetric(r);
+ if (metrics != null) {
+ metrics.getNumRejectedTasks().increment();
+ }
+ GLOBAL_REJECTED_TASK_COUNTER.increment();
throw new RejectedExecutionException("Task " + r.toString() + " rejected from " + executor.toString());
}
};
- public InstrumentedThreadPoolExecutor(String threadPoolName, int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit,
- BlockingQueue<Runnable> workQueue, ThreadFactory threadFactory) {
+ public InstrumentedThreadPoolExecutor(String threadPoolName, int corePoolSize, int maximumPoolSize,
+ long keepAliveTime, TimeUnit unit, BlockingQueue<Runnable> workQueue, ThreadFactory threadFactory) {
super(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue, threadFactory);
setRejectedExecutionHandler(rejectedExecHandler);
}
@Override
public void execute(Runnable task) {
- TASK_COUNT.increment();
+ TaskExecutionMetricsHolder metrics = getRequestMetric(task);
+ if (metrics != null) {
+ metrics.getNumTasks().increment();
+ }
+ GLOBAL_TASK_EXECUTED_COUNTER.increment();
super.execute(task);
}
@Override
protected void beforeExecute(Thread worker, Runnable task) {
InstrumentedJobFutureTask instrumentedTask = (InstrumentedJobFutureTask)task;
- TASK_QUEUE_WAIT_TIME.update(System.currentTimeMillis() - instrumentedTask.getTaskSubmissionTime());
+ long queueWaitTime = System.currentTimeMillis() - instrumentedTask.getTaskSubmissionTime();
+ GLOBAL_TASK_QUEUE_WAIT_TIME.update(queueWaitTime);
+ TaskExecutionMetricsHolder metrics = getRequestMetric(task);
+ if (metrics != null) {
+ metrics.getTaskQueueWaitTime().change(queueWaitTime);
+ }
super.beforeExecute(worker, instrumentedTask);
}
@@ -254,10 +279,21 @@ public class JobManager<T> extends AbstractRoundRobinQueue<T> {
try {
super.afterExecute(instrumentedTask, t);
} finally {
- TASK_EXECUTION_TIME.update(System.currentTimeMillis() - instrumentedTask.getTaskExecutionStartTime());
- TASK_END_TO_END_TIME.update(System.currentTimeMillis() - instrumentedTask.getTaskSubmissionTime());
+ long taskExecutionTime = System.currentTimeMillis() - instrumentedTask.getTaskExecutionStartTime();
+ long endToEndTaskTime = System.currentTimeMillis() - instrumentedTask.getTaskSubmissionTime();
+ TaskExecutionMetricsHolder metrics = getRequestMetric(task);
+ if (metrics != null) {
+ metrics.getTaskExecutionTime().change(taskExecutionTime);
+ metrics.getTaskEndToEndTime().change(endToEndTaskTime);
+ }
+ GLOBAL_TASK_EXECUTION_TIME.update(taskExecutionTime);
+ GLOBAL_TASK_END_TO_END_TIME.update(endToEndTaskTime);
}
}
+
+ private static TaskExecutionMetricsHolder getRequestMetric(Runnable task) {
+ return ((JobFutureTask)task).taskMetric;
+ }
}
}
[41/47] phoenix git commit: PHOENIX-2050 Avoid checking for child
views unless operating on table
Posted by ma...@apache.org.
PHOENIX-2050 Avoid checking for child views unless operating
on table
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/a8a9d01d
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/a8a9d01d
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/a8a9d01d
Branch: refs/heads/calcite
Commit: a8a9d01d1eaafc33ea73913bec16254ac6a55be3
Parents: fb8c941
Author: James Taylor <jt...@salesforce.com>
Authored: Mon Jun 29 21:36:19 2015 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Tue Jun 30 17:31:24 2015 -0700
----------------------------------------------------------------------
.../apache/phoenix/end2end/AlterTableIT.java | 18 +--
.../coprocessor/MetaDataEndpointImpl.java | 141 ++++++++++---------
2 files changed, 81 insertions(+), 78 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/a8a9d01d/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
index 946aaab..cd46927 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
@@ -2149,23 +2149,13 @@ public class AlterTableIT extends BaseOwnClusterHBaseManagedTimeIT {
"CREATE VIEW " + grandChildView + " AS SELECT * FROM " + childView;
conn.createStatement().execute(grandChildViewDDL);
- // dropping base table column from child view should fail
+ // dropping base table column from child view should succeed
String dropColumnFromChildView = "ALTER VIEW " + childView + " DROP COLUMN V2";
- try {
- conn.createStatement().execute(dropColumnFromChildView);
- fail("Dropping columns from a view that has child views on it is not allowed");
- } catch (SQLException e) {
- assertEquals(CANNOT_MUTATE_TABLE.getErrorCode(), e.getErrorCode());
- }
+ conn.createStatement().execute(dropColumnFromChildView);
- // dropping view specific column from child view should fail
+ // dropping view specific column from child view should succeed
dropColumnFromChildView = "ALTER VIEW " + childView + " DROP COLUMN CHILD_VIEW_COL";
- try {
- conn.createStatement().execute(dropColumnFromChildView);
- fail("Dropping columns from a view that has child views on it is not allowed");
- } catch (SQLException e) {
- assertEquals(SQLExceptionCode.CANNOT_MUTATE_TABLE.getErrorCode(), e.getErrorCode());
- }
+ conn.createStatement().execute(dropColumnFromChildView);
// Adding column to view that has child views is allowed
String addColumnToChildView = "ALTER VIEW " + childView + " ADD V5 VARCHAR";
http://git-wip-us.apache.org/repos/asf/phoenix/blob/a8a9d01d/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index 0ddd58d..cc486d5 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -1366,69 +1366,76 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
List<byte[]> indexNames = Lists.newArrayList();
List<Cell> results = Lists.newArrayList();
try (RegionScanner scanner = region.getScanner(scan);) {
- scanner.next(results);
- if (results.isEmpty()) { // Should not be possible
- return new MetaDataMutationResult(MutationCode.TABLE_NOT_FOUND, EnvironmentEdgeManager.currentTimeMillis(), null);
- }
+ scanner.next(results);
+ if (results.isEmpty()) { // Should not be possible
+ return new MetaDataMutationResult(MutationCode.TABLE_NOT_FOUND,
+ EnvironmentEdgeManager.currentTimeMillis(), null);
+ }
- // Handle any child views that exist
- TableViewFinderResult tableViewFinderResult = findChildViews(region, tenantId, table, PHYSICAL_TABLE_BYTES);
- if (tableViewFinderResult.hasViews()) {
- if (isCascade) {
- if (tableViewFinderResult.allViewsInMultipleRegions()) {
- // We don't yet support deleting a table with views where SYSTEM.CATALOG has split and the
- // view metadata spans multiple regions
- return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION, EnvironmentEdgeManager.currentTimeMillis(), null);
- } else if (tableViewFinderResult.allViewsInSingleRegion()) {
- // Recursively delete views - safe as all the views as all in the same region
- for (Result viewResult : tableViewFinderResult.getResults()) {
- byte[][] rowKeyMetaData = new byte[3][];
- getVarChars(viewResult.getRow(), 3, rowKeyMetaData);
- byte[] viewTenantId = rowKeyMetaData[PhoenixDatabaseMetaData.TENANT_ID_INDEX];
- byte[] viewSchemaName = rowKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX];
- byte[] viewName = rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX];
- byte[] viewKey = SchemaUtil.getTableKey(viewTenantId, viewSchemaName, viewName);
- Delete delete = new Delete(viewKey, clientTimeStamp);
- rowsToDelete.add(delete);
- acquireLock(region, viewKey, locks);
- MetaDataMutationResult result =
- doDropTable(viewKey, viewTenantId, viewSchemaName, viewName, null, PTableType.VIEW,
- rowsToDelete, invalidateList, locks, tableNamesToDelete, false);
- if (result.getMutationCode() != MutationCode.TABLE_ALREADY_EXISTS) {
- return result;
- }
+ // Only tables may have views, so prevent the running of this potentially
+ // expensive full table scan over the SYSTEM.CATALOG table unless it's needed.
+ if (tableType == PTableType.TABLE || tableType == PTableType.SYSTEM) {
+ // Handle any child views that exist
+ TableViewFinderResult tableViewFinderResult = findChildViews(region, tenantId, table,
+ PHYSICAL_TABLE_BYTES);
+ if (tableViewFinderResult.hasViews()) {
+ if (isCascade) {
+ if (tableViewFinderResult.allViewsInMultipleRegions()) {
+ // We don't yet support deleting a table with views where SYSTEM.CATALOG has split and the
+ // view metadata spans multiple regions
+ return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION,
+ EnvironmentEdgeManager.currentTimeMillis(), null);
+ } else if (tableViewFinderResult.allViewsInSingleRegion()) {
+ // Recursively delete views - safe as all the views as all in the same region
+ for (Result viewResult : tableViewFinderResult.getResults()) {
+ byte[][] rowKeyMetaData = new byte[3][];
+ getVarChars(viewResult.getRow(), 3, rowKeyMetaData);
+ byte[] viewTenantId = rowKeyMetaData[PhoenixDatabaseMetaData.TENANT_ID_INDEX];
+ byte[] viewSchemaName = rowKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX];
+ byte[] viewName = rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX];
+ byte[] viewKey = SchemaUtil.getTableKey(viewTenantId, viewSchemaName, viewName);
+ Delete delete = new Delete(viewKey, clientTimeStamp);
+ rowsToDelete.add(delete);
+ acquireLock(region, viewKey, locks);
+ MetaDataMutationResult result = doDropTable(viewKey, viewTenantId, viewSchemaName,
+ viewName, null, PTableType.VIEW, rowsToDelete, invalidateList, locks,
+ tableNamesToDelete, false);
+ if (result.getMutationCode() != MutationCode.TABLE_ALREADY_EXISTS) { return result; }
+ }
+ }
+ } else {
+ // DROP without CASCADE on tables with child views is not permitted
+ return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION,
+ EnvironmentEdgeManager.currentTimeMillis(), null);
+ }
}
- }
- } else {
- // DROP without CASCADE on tables with child views is not permitted
- return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION, EnvironmentEdgeManager.currentTimeMillis(), null);
}
- }
- if (tableType != PTableType.VIEW) { // Add to list of HTables to delete, unless it's a view
- tableNamesToDelete.add(table.getName().getBytes());
- }
- invalidateList.add(cacheKey);
- byte[][] rowKeyMetaData = new byte[5][];
- do {
- Cell kv = results.get(LINK_TYPE_INDEX);
- int nColumns = getVarChars(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), 0, rowKeyMetaData);
- if (nColumns == 5
- && rowKeyMetaData[PhoenixDatabaseMetaData.COLUMN_NAME_INDEX].length == 0
- && rowKeyMetaData[PhoenixDatabaseMetaData.INDEX_NAME_INDEX].length > 0
- && Bytes.compareTo(kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength(), LINK_TYPE_BYTES, 0, LINK_TYPE_BYTES.length) == 0
- && LinkType.fromSerializedValue(kv.getValueArray()[kv.getValueOffset()]) == LinkType.INDEX_TABLE) {
- indexNames.add(rowKeyMetaData[PhoenixDatabaseMetaData.INDEX_NAME_INDEX]);
+ if (tableType != PTableType.VIEW) { // Add to list of HTables to delete, unless it's a view
+ tableNamesToDelete.add(table.getName().getBytes());
}
- // FIXME: Remove when unintentionally deprecated method is fixed (HBASE-7870).
- // FIXME: the version of the Delete constructor without the lock args was introduced
- // in 0.94.4, thus if we try to use it here we can no longer use the 0.94.2 version
- // of the client.
- Delete delete = new Delete(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), clientTimeStamp);
- rowsToDelete.add(delete);
- results.clear();
- scanner.next(results);
- } while (!results.isEmpty());
+ invalidateList.add(cacheKey);
+ byte[][] rowKeyMetaData = new byte[5][];
+ do {
+ Cell kv = results.get(LINK_TYPE_INDEX);
+ int nColumns = getVarChars(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), 0, rowKeyMetaData);
+ if (nColumns == 5
+ && rowKeyMetaData[PhoenixDatabaseMetaData.COLUMN_NAME_INDEX].length == 0
+ && rowKeyMetaData[PhoenixDatabaseMetaData.INDEX_NAME_INDEX].length > 0
+ && Bytes.compareTo(kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength(),
+ LINK_TYPE_BYTES, 0, LINK_TYPE_BYTES.length) == 0
+ && LinkType.fromSerializedValue(kv.getValueArray()[kv.getValueOffset()]) == LinkType.INDEX_TABLE) {
+ indexNames.add(rowKeyMetaData[PhoenixDatabaseMetaData.INDEX_NAME_INDEX]);
+ }
+ // FIXME: Remove when unintentionally deprecated method is fixed (HBASE-7870).
+ // FIXME: the version of the Delete constructor without the lock args was introduced
+ // in 0.94.4, thus if we try to use it here we can no longer use the 0.94.2 version
+ // of the client.
+ Delete delete = new Delete(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), clientTimeStamp);
+ rowsToDelete.add(delete);
+ results.clear();
+ scanner.next(results);
+ } while (!results.isEmpty());
}
// Recursively delete indexes
@@ -1804,7 +1811,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
* and https://issues.apache.org/jira/browse/PHOENIX-2054 for enabling meta-data changes to a view
* to be propagated to its view hierarchy.
*/
- if (type == PTableType.TABLE) {
+ if (type == PTableType.TABLE || type == PTableType.SYSTEM) {
TableViewFinderResult childViewsResult = findChildViews(region, tenantId, table, PHYSICAL_TABLE_BYTES);
if (childViewsResult.hasViews()) {
// Adding a column is not allowed if the meta-data for child view/s spans over
@@ -2017,11 +2024,17 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
List<Mutation> additionalTableMetaData = Lists.newArrayList();
PTableType type = table.getType();
- TableViewFinderResult childViewsResult = findChildViews(region, tenantId, table,
- (type == PTableType.VIEW ? PARENT_TABLE_BYTES : PHYSICAL_TABLE_BYTES));
- if (childViewsResult.hasViews()) {
- return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION, EnvironmentEdgeManager
- .currentTimeMillis(), null);
+ // Only tables may have views, so prevent the running of this potentially
+ // expensive full table scan over the SYSTEM.CATALOG table unless it's needed.
+ // In the case of a view, we allow a column to be dropped without checking for
+ // child views, but in the future we'll allow it and propagate it as necessary.
+ if (type == PTableType.TABLE || type == PTableType.SYSTEM) {
+ TableViewFinderResult childViewsResult =
+ findChildViews(region, tenantId, table, PHYSICAL_TABLE_BYTES);
+ if (childViewsResult.hasViews()) {
+ return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION, EnvironmentEdgeManager
+ .currentTimeMillis(), null);
+ }
}
for (Mutation m : tableMetaData) {
[38/47] phoenix git commit: PHOENIX-2042 Windows need hadoop native
libraries to run tests (Alicia Ying Shu)
Posted by ma...@apache.org.
PHOENIX-2042 Windows need hadoop native libraries to run tests (Alicia Ying Shu)
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/bc2aef89
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/bc2aef89
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/bc2aef89
Branch: refs/heads/calcite
Commit: bc2aef89423eee836f24a123860676e967caf079
Parents: 83b8db4
Author: Enis Soztutar <en...@apache.org>
Authored: Mon Jun 29 18:16:35 2015 -0700
Committer: Enis Soztutar <en...@apache.org>
Committed: Mon Jun 29 18:16:35 2015 -0700
----------------------------------------------------------------------
pom.xml | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/bc2aef89/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 707ea66..9fa6702 100644
--- a/pom.xml
+++ b/pom.xml
@@ -266,7 +266,7 @@
<encoding>UTF-8</encoding>
<forkCount>${numForkedIT}</forkCount>
<reuseForks>true</reuseForks>
- <argLine>-enableassertions -Xmx2000m -XX:MaxPermSize=128m -Djava.security.egd=file:/dev/./urandom</argLine>
+ <argLine>-enableassertions -Xmx2000m -XX:MaxPermSize=128m -Djava.security.egd=file:/dev/./urandom "-Djava.library.path=${hadoop.library.path}${path.separator}${java.library.path}"</argLine>
<redirectTestOutputToFile>${test.output.tofile}</redirectTestOutputToFile>
<testSourceDirectory>${basedir}/src/it/java</testSourceDirectory>
<groups>org.apache.phoenix.end2end.ClientManagedTimeTest</groups>
@@ -282,7 +282,7 @@
<encoding>UTF-8</encoding>
<forkCount>${numForkedIT}</forkCount>
<reuseForks>true</reuseForks>
- <argLine>-enableassertions -Xmx2000m -XX:MaxPermSize=128m -Djava.security.egd=file:/dev/./urandom</argLine>
+ <argLine>-enableassertions -Xmx2000m -XX:MaxPermSize=128m -Djava.security.egd=file:/dev/./urandom "-Djava.library.path=${hadoop.library.path}${path.separator}${java.library.path}"</argLine>
<redirectTestOutputToFile>${test.output.tofile}</redirectTestOutputToFile>
<testSourceDirectory>${basedir}/src/it/java</testSourceDirectory>
<groups>org.apache.phoenix.end2end.HBaseManagedTimeTest</groups>
@@ -298,7 +298,7 @@
<encoding>UTF-8</encoding>
<forkCount>${numForkedIT}</forkCount>
<reuseForks>true</reuseForks>
- <argLine>-enableassertions -Xmx2000m -XX:MaxPermSize=128m -Djava.security.egd=file:/dev/./urandom</argLine>
+ <argLine>-enableassertions -Xmx2000m -XX:MaxPermSize=128m -Djava.security.egd=file:/dev/./urandom "-Djava.library.path=${hadoop.library.path}${path.separator}${java.library.path}"</argLine>
<redirectTestOutputToFile>${test.output.tofile}</redirectTestOutputToFile>
<testSourceDirectory>${basedir}/src/it/java</testSourceDirectory>
<groups>org.apache.phoenix.end2end.NeedsOwnMiniClusterTest</groups>
@@ -395,7 +395,7 @@
<forkCount>${numForkedUT}</forkCount>
<reuseForks>true</reuseForks>
<argLine>-enableassertions -Xmx2250m -XX:MaxPermSize=128m
- -Djava.security.egd=file:/dev/./urandom</argLine>
+ -Djava.security.egd=file:/dev/./urandom "-Djava.library.path=${hadoop.library.path}${path.separator}${java.library.path}"</argLine>
<redirectTestOutputToFile>${test.output.tofile}</redirectTestOutputToFile>
</configuration>
</plugin>
[29/47] phoenix git commit: PHOENIX-2055 Allow view with views to add
column
Posted by ma...@apache.org.
PHOENIX-2055 Allow view with views to add column
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/9c069bd4
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/9c069bd4
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/9c069bd4
Branch: refs/heads/calcite
Commit: 9c069bd45e6af789f903a5e1f80949de403bbf68
Parents: 7918a3d
Author: Samarth <sa...@salesforce.com>
Authored: Fri Jun 26 16:22:39 2015 -0700
Committer: Samarth <sa...@salesforce.com>
Committed: Fri Jun 26 16:22:39 2015 -0700
----------------------------------------------------------------------
.../apache/phoenix/end2end/AlterTableIT.java | 16 +++--
.../org/apache/phoenix/end2end/UpgradeIT.java | 12 ++--
.../coprocessor/MetaDataEndpointImpl.java | 71 +++++++++++---------
.../apache/phoenix/query/QueryConstants.java | 2 +-
.../org/apache/phoenix/util/UpgradeUtil.java | 6 +-
5 files changed, 60 insertions(+), 47 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/9c069bd4/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
index ae5f940..946aaab 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
@@ -2129,8 +2129,8 @@ public class AlterTableIT extends BaseOwnClusterHBaseManagedTimeIT {
}
@Test
- public void testAlteringViewThatHasChildViewsNotAllowed() throws Exception {
- String baseTable = "testAlteringViewThatHasChildViewsNotAllowed";
+ public void testAlteringViewThatHasChildViews() throws Exception {
+ String baseTable = "testAlteringViewThatHasChildViews";
String childView = "childView";
String grandChildView = "grandChildView";
try (Connection conn = DriverManager.getConnection(getUrl())) {
@@ -2167,13 +2167,17 @@ public class AlterTableIT extends BaseOwnClusterHBaseManagedTimeIT {
assertEquals(SQLExceptionCode.CANNOT_MUTATE_TABLE.getErrorCode(), e.getErrorCode());
}
- // Adding column to view that has child views should fail
+ // Adding column to view that has child views is allowed
String addColumnToChildView = "ALTER VIEW " + childView + " ADD V5 VARCHAR";
+ conn.createStatement().execute(addColumnToChildView);
+ // V5 column should be visible now for childView
+ conn.createStatement().execute("SELECT V5 FROM " + childView);
+
+ // However, column V5 shouldn't have propagated to grandChildView. Not till PHOENIX-2054 is fixed.
try {
- conn.createStatement().execute(addColumnToChildView);
- fail("Adding columns to a view that has child views on it is not allowed");
+ conn.createStatement().execute("SELECT V5 FROM " + grandChildView);
} catch (SQLException e) {
- assertEquals(SQLExceptionCode.CANNOT_MUTATE_TABLE.getErrorCode(), e.getErrorCode());
+ assertEquals(SQLExceptionCode.COLUMN_NOT_FOUND.getErrorCode(), e.getErrorCode());
}
// dropping column from the grand child view, however, should work.
http://git-wip-us.apache.org/repos/asf/phoenix/blob/9c069bd4/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java
index 886e567..094816c 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java
@@ -20,7 +20,7 @@ package org.apache.phoenix.end2end;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkNotNull;
import static org.apache.phoenix.query.QueryConstants.BASE_TABLE_BASE_COLUMN_COUNT;
-import static org.apache.phoenix.query.QueryConstants.DIVORCED_VIEW_BASE_COLUMN_COUNT;
+import static org.apache.phoenix.query.QueryConstants.DIVERGED_VIEW_BASE_COLUMN_COUNT;
import static org.apache.phoenix.util.UpgradeUtil.SELECT_BASE_COLUMN_COUNT_FROM_HEADER_ROW;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
@@ -187,14 +187,14 @@ public class UpgradeIT extends BaseHBaseManagedTimeIT {
for (int i = 1; i <=2 ; i++) {
String tenantId = "tenant" + i;
checkBaseColumnCount(tenantId, null, "TENANT_VIEW1", 4);
- checkBaseColumnCount(tenantId, null, "TENANT_VIEW2", DIVORCED_VIEW_BASE_COLUMN_COUNT);
- checkBaseColumnCount(tenantId, null, "TENANT_VIEW3", DIVORCED_VIEW_BASE_COLUMN_COUNT);
+ checkBaseColumnCount(tenantId, null, "TENANT_VIEW2", DIVERGED_VIEW_BASE_COLUMN_COUNT);
+ checkBaseColumnCount(tenantId, null, "TENANT_VIEW3", DIVERGED_VIEW_BASE_COLUMN_COUNT);
}
// Verify base column count for global views
checkBaseColumnCount(null, null, "GLOBAL_VIEW1", 4);
- checkBaseColumnCount(null, null, "GLOBAL_VIEW2", DIVORCED_VIEW_BASE_COLUMN_COUNT);
- checkBaseColumnCount(null, null, "GLOBAL_VIEW3", DIVORCED_VIEW_BASE_COLUMN_COUNT);
+ checkBaseColumnCount(null, null, "GLOBAL_VIEW2", DIVERGED_VIEW_BASE_COLUMN_COUNT);
+ checkBaseColumnCount(null, null, "GLOBAL_VIEW3", DIVERGED_VIEW_BASE_COLUMN_COUNT);
}
@@ -243,7 +243,7 @@ public class UpgradeIT extends BaseHBaseManagedTimeIT {
conn3.createStatement().execute(
"ALTER VIEW " + fullViewName + " DROP COLUMN CF2.V2");
}
- expectedBaseColumnCount = DIVORCED_VIEW_BASE_COLUMN_COUNT;
+ expectedBaseColumnCount = DIVERGED_VIEW_BASE_COLUMN_COUNT;
} else {
expectedBaseColumnCount = 6;
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/9c069bd4/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index 43dc07a..0ddd58d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -62,7 +62,7 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_CONSTANT_BYTE
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_INDEX_ID_BYTES;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_STATEMENT_BYTES;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_TYPE_BYTES;
-import static org.apache.phoenix.query.QueryConstants.DIVORCED_VIEW_BASE_COLUMN_COUNT;
+import static org.apache.phoenix.query.QueryConstants.DIVERGED_VIEW_BASE_COLUMN_COUNT;
import static org.apache.phoenix.query.QueryConstants.SEPARATOR_BYTE_ARRAY;
import static org.apache.phoenix.schema.PTableType.INDEX;
import static org.apache.phoenix.util.ByteUtil.EMPTY_BYTE_ARRAY;
@@ -1588,7 +1588,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
// lock the rows corresponding to views so that no other thread can modify the view meta-data
RowLock viewRowLock = acquireLock(region, viewKey, locks);
PTable view = doGetTable(viewKey, clientTimeStamp, viewRowLock);
- if (view.getBaseColumnCount() == QueryConstants.DIVORCED_VIEW_BASE_COLUMN_COUNT) {
+ if (view.getBaseColumnCount() == QueryConstants.DIVERGED_VIEW_BASE_COLUMN_COUNT) {
// if a view has divorced itself from the base table, we don't allow schema changes
// to be propagated to it.
return;
@@ -1641,7 +1641,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
PDataType.fromTypeId(dataColumnDataType)).getSqlType();
byte[] indexColumnDataTypeBytes = new byte[PInteger.INSTANCE.getByteSize()];
PInteger.INSTANCE.getCodec().encodeInt(indexColumnDataType, indexColumnDataTypeBytes, 0);
- indexColumnDefinitionPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+ indexColumnDefinitionPut.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
PhoenixDatabaseMetaData.DATA_TYPE_BYTES, indexColumnDataTypeBytes);
}
@@ -1651,7 +1651,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
PhoenixDatabaseMetaData.DECIMAL_DIGITS_BYTES);
if (decimalDigits != null && decimalDigits.size() > 0) {
Cell decimalDigit = decimalDigits.get(0);
- indexColumnDefinitionPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+ indexColumnDefinitionPut.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
PhoenixDatabaseMetaData.DECIMAL_DIGITS_BYTES, decimalDigit.getValueArray());
}
@@ -1661,7 +1661,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
PhoenixDatabaseMetaData.COLUMN_SIZE_BYTES);
if (columnSizes != null && columnSizes.size() > 0) {
Cell columnSize = columnSizes.get(0);
- indexColumnDefinitionPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+ indexColumnDefinitionPut.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
PhoenixDatabaseMetaData.COLUMN_SIZE_BYTES, columnSize.getValueArray());
}
@@ -1670,7 +1670,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
PhoenixDatabaseMetaData.SORT_ORDER_BYTES);
if (sortOrders != null && sortOrders.size() > 0) {
Cell sortOrder = sortOrders.get(0);
- indexColumnDefinitionPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+ indexColumnDefinitionPut.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
PhoenixDatabaseMetaData.SORT_ORDER_BYTES, sortOrder.getValueArray());
}
@@ -1679,7 +1679,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
PhoenixDatabaseMetaData.DATA_TABLE_NAME_BYTES);
if (dataTableNames != null && dataTableNames.size() > 0) {
Cell dataTableName = dataTableNames.get(0);
- indexColumnDefinitionPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+ indexColumnDefinitionPut.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
PhoenixDatabaseMetaData.DATA_TABLE_NAME_BYTES, dataTableName.getValueArray());
}
@@ -1687,12 +1687,12 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
byte[] ordinalPositionBytes = new byte[PInteger.INSTANCE.getByteSize()];
int ordinalPositionOfNewCol = oldNumberOfColsInIndex + deltaNumPkColsSoFar;
PInteger.INSTANCE.getCodec().encodeInt(ordinalPositionOfNewCol, ordinalPositionBytes, 0);
- indexColumnDefinitionPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+ indexColumnDefinitionPut.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
PhoenixDatabaseMetaData.ORDINAL_POSITION_BYTES, ordinalPositionBytes);
// New PK columns have to be nullable after the first DDL
byte[] isNullableBytes = PBoolean.INSTANCE.toBytes(true);
- indexColumnDefinitionPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+ indexColumnDefinitionPut.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
PhoenixDatabaseMetaData.NULLABLE_BYTES, isNullableBytes);
// Set the key sequence for the pk column to be added
@@ -1700,7 +1700,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
short newKeySeq = (short)(currentKeySeq + deltaNumPkColsSoFar);
byte[] keySeqBytes = new byte[PSmallint.INSTANCE.getByteSize()];
PSmallint.INSTANCE.getCodec().encodeShort(newKeySeq, keySeqBytes, 0);
- indexColumnDefinitionPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+ indexColumnDefinitionPut.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
PhoenixDatabaseMetaData.KEY_SEQ_BYTES, keySeqBytes);
mutationsForAddingColumnsToViews.add(indexColumnDefinitionPut);
@@ -1717,14 +1717,14 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
long newSequenceNumber = index.getSequenceNumber() + 1;
byte[] newSequenceNumberPtr = new byte[PLong.INSTANCE.getByteSize()];
PLong.INSTANCE.getCodec().encodeLong(newSequenceNumber, newSequenceNumberPtr, 0);
- indexHeaderRowMutation.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+ indexHeaderRowMutation.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
PhoenixDatabaseMetaData.TABLE_SEQ_NUM_BYTES, newSequenceNumberPtr);
// increase the column count
int newColumnCount = index.getColumns().size() + deltaNumPkColsSoFar;
byte[] newColumnCountPtr = new byte[PInteger.INSTANCE.getByteSize()];
PInteger.INSTANCE.getCodec().encodeInt(newColumnCount, newColumnCountPtr, 0);
- indexHeaderRowMutation.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+ indexHeaderRowMutation.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
PhoenixDatabaseMetaData.COLUMN_COUNT_BYTES, newColumnCountPtr);
// add index row header key to the invalidate list to force clients to fetch the latest meta-data
@@ -1794,21 +1794,29 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
byte[] schemaName = rowKeyMetaData[SCHEMA_NAME_INDEX];
byte[] tableName = rowKeyMetaData[TABLE_NAME_INDEX];
PTableType type = table.getType();
- TableViewFinderResult childViewsResult = findChildViews(region, tenantId, table,
- (type == PTableType.VIEW ? PARENT_TABLE_BYTES : PHYSICAL_TABLE_BYTES));
List<Mutation> mutationsForAddingColumnsToViews = Collections.emptyList();
- if (childViewsResult.hasViews()) {
- /*
- * Adding a column is not allowed if: 1) Meta-data for child view/s spans over more than one
- * region. 2) Adding column to a views that has child view/s.
- */
- if (!childViewsResult.allViewsInSingleRegion() || type == PTableType.VIEW) {
- return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION,
- EnvironmentEdgeManager.currentTimeMillis(), null);
- } else {
- mutationsForAddingColumnsToViews = new ArrayList<>(childViewsResult.getResults().size() * tableMetaData.size());
- addRowsToChildViews(tableMetaData, mutationsForAddingColumnsToViews, schemaName, tableName, invalidateList, clientTimeStamp,
- childViewsResult, region, locks);
+ /*
+ * If adding a column to a view, we don't want to propagate those meta-data changes to the child
+ * view hierarchy. This is because our check of finding child views is expensive and we want making
+ * meta-data changes to views to be light-weight. The side-effect of this change is that a child
+ * won't have it's parent views columns i.e. it would have diverged itself from the parent view. See
+ * https://issues.apache.org/jira/browse/PHOENIX-2051 for a proper way to fix the performance issue
+ * and https://issues.apache.org/jira/browse/PHOENIX-2054 for enabling meta-data changes to a view
+ * to be propagated to its view hierarchy.
+ */
+ if (type == PTableType.TABLE) {
+ TableViewFinderResult childViewsResult = findChildViews(region, tenantId, table, PHYSICAL_TABLE_BYTES);
+ if (childViewsResult.hasViews()) {
+ // Adding a column is not allowed if the meta-data for child view/s spans over
+ // more than one region (since the changes cannot be done in a transactional fashion)
+ if (!childViewsResult.allViewsInSingleRegion()) {
+ return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION,
+ EnvironmentEdgeManager.currentTimeMillis(), null);
+ } else {
+ mutationsForAddingColumnsToViews = new ArrayList<>(childViewsResult.getResults().size() * tableMetaData.size());
+ addRowsToChildViews(tableMetaData, mutationsForAddingColumnsToViews, schemaName, tableName, invalidateList, clientTimeStamp,
+ childViewsResult, region, locks);
+ }
}
}
for (Mutation m : tableMetaData) {
@@ -2041,18 +2049,19 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
continue;
}
if (table.getType() == PTableType.VIEW) {
- if (table.getBaseColumnCount() != DIVORCED_VIEW_BASE_COLUMN_COUNT
+ if (table.getBaseColumnCount() != DIVERGED_VIEW_BASE_COLUMN_COUNT
&& columnToDelete.getPosition() < table.getBaseColumnCount()) {
/*
* If the column being dropped is inherited from the base table, then the
- * view is about to divorce itself from the base table. Divorce here means
- * that any further meta-data changes made to the base table will not be
- * propagated to the hierarchy of views on the base table.
+ * view is about to diverge itself from the base table. The consequence of
+ * this divergence is that that any further meta-data changes made to the
+ * base table will not be propagated to the hierarchy of views where this
+ * view is the root.
*/
byte[] viewKey = SchemaUtil.getTableKey(tenantId, schemaName, tableName);
Put updateBaseColumnCountPut = new Put(viewKey);
byte[] baseColumnCountPtr = new byte[PInteger.INSTANCE.getByteSize()];
- PInteger.INSTANCE.getCodec().encodeInt(DIVORCED_VIEW_BASE_COLUMN_COUNT,
+ PInteger.INSTANCE.getCodec().encodeInt(DIVERGED_VIEW_BASE_COLUMN_COUNT,
baseColumnCountPtr, 0);
updateBaseColumnCountPut.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
PhoenixDatabaseMetaData.BASE_COLUMN_COUNT_BYTES, clientTimeStamp,
http://git-wip-us.apache.org/repos/asf/phoenix/blob/9c069bd4/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java
index f82c594..d095049 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java
@@ -185,7 +185,7 @@ public interface QueryConstants {
public static final BigDecimal BD_MILLIS_IN_DAY = BigDecimal.valueOf(QueryConstants.MILLIS_IN_DAY);
public static final int MAX_ALLOWED_NANOS = 999999999;
public static final int NANOS_IN_SECOND = BigDecimal.valueOf(Math.pow(10, 9)).intValue();
- public static final int DIVORCED_VIEW_BASE_COLUMN_COUNT = -100;
+ public static final int DIVERGED_VIEW_BASE_COLUMN_COUNT = -100;
public static final int BASE_TABLE_BASE_COLUMN_COUNT = -1;
public static final String CREATE_TABLE_METADATA =
// Do not use IF NOT EXISTS as we sometimes catch the TableAlreadyExists
http://git-wip-us.apache.org/repos/asf/phoenix/blob/9c069bd4/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
index dff6598..8d574ce 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
@@ -32,7 +32,7 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_NAME;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_SCHEM;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TENANT_ID;
import static org.apache.phoenix.query.QueryConstants.BASE_TABLE_BASE_COLUMN_COUNT;
-import static org.apache.phoenix.query.QueryConstants.DIVORCED_VIEW_BASE_COLUMN_COUNT;
+import static org.apache.phoenix.query.QueryConstants.DIVERGED_VIEW_BASE_COLUMN_COUNT;
import java.io.IOException;
import java.sql.PreparedStatement;
@@ -595,7 +595,7 @@ public class UpgradeUtil {
// We are about to iterate through columns of a different view. Check whether base column count was upserted.
// If it wasn't then it is likely the case that a column inherited from the base table was dropped from view.
if (currentViewName != null && !baseColumnCountUpserted && numBaseTableColsMatched < numColsInBaseTable) {
- upsertBaseColumnCountInHeaderRow(metaConnection, currentTenantId, currentViewSchema, currentViewName, DIVORCED_VIEW_BASE_COLUMN_COUNT);
+ upsertBaseColumnCountInHeaderRow(metaConnection, currentTenantId, currentViewSchema, currentViewName, DIVERGED_VIEW_BASE_COLUMN_COUNT);
}
// reset the values as we are now going to iterate over columns of a new view.
numBaseTableColsMatched = 0;
@@ -641,7 +641,7 @@ public class UpgradeUtil {
}
} else {
// special value to denote that the view has divorced itself from the base physical table.
- upsertBaseColumnCountInHeaderRow(metaConnection, viewTenantId, viewSchema, viewName, DIVORCED_VIEW_BASE_COLUMN_COUNT);
+ upsertBaseColumnCountInHeaderRow(metaConnection, viewTenantId, viewSchema, viewName, DIVERGED_VIEW_BASE_COLUMN_COUNT);
baseColumnCountUpserted = true;
// ignore rest of the rows for the view.
ignore = true;
[15/47] phoenix git commit: PHOENIX-1504 Support adding column to a
table that has views (Samarth Jain/Dave Hacker)
Posted by ma...@apache.org.
PHOENIX-1504 Support adding column to a table that has views (Samarth Jain/Dave Hacker)
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/e78eb6fa
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/e78eb6fa
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/e78eb6fa
Branch: refs/heads/calcite
Commit: e78eb6faceec40d8b09fbc7dde778b87fe54feef
Parents: 2d70eff
Author: Samarth <sa...@salesforce.com>
Authored: Thu Jun 18 15:37:37 2015 -0700
Committer: Samarth <sa...@salesforce.com>
Committed: Thu Jun 18 15:37:37 2015 -0700
----------------------------------------------------------------------
.../apache/phoenix/end2end/AlterTableIT.java | 356 +++++++++++++++++
.../end2end/TenantSpecificTablesDDLIT.java | 20 +-
.../org/apache/phoenix/end2end/UpgradeIT.java | 332 ++++++++++++++++
.../coprocessor/MetaDataEndpointImpl.java | 262 +++++++++---
.../phoenix/coprocessor/MetaDataProtocol.java | 4 +-
.../coprocessor/generated/PTableProtos.java | 103 ++++-
.../phoenix/jdbc/PhoenixDatabaseMetaData.java | 3 +-
.../query/ConnectionQueryServicesImpl.java | 51 ++-
.../apache/phoenix/query/QueryConstants.java | 30 +-
.../apache/phoenix/schema/DelegateTable.java | 5 +
.../apache/phoenix/schema/MetaDataClient.java | 37 +-
.../java/org/apache/phoenix/schema/PTable.java | 1 +
.../org/apache/phoenix/schema/PTableImpl.java | 40 +-
.../java/org/apache/phoenix/util/ByteUtil.java | 10 +-
.../org/apache/phoenix/util/UpgradeUtil.java | 395 ++++++++++++++++++-
15 files changed, 1495 insertions(+), 154 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/e78eb6fa/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
index 59698d6..61dd6a9 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
@@ -18,6 +18,7 @@
package org.apache.phoenix.end2end;
import static org.apache.hadoop.hbase.HColumnDescriptor.DEFAULT_REPLICATION_SCOPE;
+import static org.apache.phoenix.exception.SQLExceptionCode.CANNOT_MUTATE_TABLE;
import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
import static org.apache.phoenix.util.TestUtil.closeConnection;
import static org.apache.phoenix.util.TestUtil.closeStatement;
@@ -32,9 +33,11 @@ import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
+import java.sql.ResultSetMetaData;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.Collections;
+import java.util.List;
import java.util.Map;
import java.util.Properties;
@@ -48,8 +51,10 @@ import org.apache.phoenix.exception.SQLExceptionCode;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
import org.apache.phoenix.query.QueryConstants;
+import org.apache.phoenix.schema.PColumn;
import org.apache.phoenix.schema.PTable;
import org.apache.phoenix.schema.PTableKey;
+import org.apache.phoenix.schema.PTableType;
import org.apache.phoenix.schema.TableNotFoundException;
import org.apache.phoenix.util.IndexUtil;
import org.apache.phoenix.util.PhoenixRuntime;
@@ -59,6 +64,8 @@ import org.apache.phoenix.util.SchemaUtil;
import org.junit.BeforeClass;
import org.junit.Test;
+import com.google.common.base.Objects;
+
/**
*
* A lot of tests in this class test HBase level properties. As a result,
@@ -1988,4 +1995,353 @@ public class AlterTableIT extends BaseOwnClusterHBaseManagedTimeIT {
conn.close();
}
}
+
+ @Test
+ public void testAddColumnToTableWithViews() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ try {
+ conn.createStatement().execute("CREATE TABLE IF NOT EXISTS TABLEWITHVIEW ("
+ + " ID char(1) NOT NULL,"
+ + " COL1 integer NOT NULL,"
+ + " COL2 bigint NOT NULL,"
+ + " CONSTRAINT NAME_PK PRIMARY KEY (ID, COL1, COL2)"
+ + " )");
+ assertTableDefinition(conn, "TABLEWITHVIEW", PTableType.TABLE, null, 0, 3, -1, "ID", "COL1", "COL2");
+
+ conn.createStatement().execute("CREATE VIEW VIEWOFTABLE ( VIEW_COL1 SMALLINT ) AS SELECT * FROM TABLEWITHVIEW");
+ assertTableDefinition(conn, "VIEWOFTABLE", PTableType.VIEW, "TABLEWITHVIEW", 0, 4, 3, "ID", "COL1", "COL2", "VIEW_COL1");
+
+ conn.createStatement().execute("ALTER TABLE TABLEWITHVIEW ADD COL3 char(10)");
+ assertTableDefinition(conn, "VIEWOFTABLE", PTableType.VIEW, "TABLEWITHVIEW", 1, 5, 4, "ID", "COL1", "COL2", "COL3", "VIEW_COL1");
+
+ } finally {
+ conn.close();
+ }
+ }
+
+ private void assertTableDefinition(Connection conn, String tableName, PTableType tableType, String parentTableName, int sequenceNumber, int columnCount, int baseColumnCount, String... columnName) throws Exception {
+ PreparedStatement p = conn.prepareStatement("SELECT * FROM SYSTEM.CATALOG WHERE TABLE_NAME=? AND TABLE_TYPE=?");
+ p.setString(1, tableName);
+ p.setString(2, tableType.getSerializedValue());
+ ResultSet rs = p.executeQuery();
+ assertTrue(rs.next());
+ assertEquals(getSystemCatalogEntriesForTable(conn, tableName, "Mismatch in BaseColumnCount"), baseColumnCount, rs.getInt("BASE_COLUMN_COUNT"));
+ assertEquals(getSystemCatalogEntriesForTable(conn, tableName, "Mismatch in columnCount"), columnCount, rs.getInt("COLUMN_COUNT"));
+ assertEquals(getSystemCatalogEntriesForTable(conn, tableName, "Mismatch in sequenceNumber"), sequenceNumber, rs.getInt("TABLE_SEQ_NUM"));
+ rs.close();
+
+ ResultSet parentTableColumnsRs = null;
+ if (parentTableName != null) {
+ parentTableColumnsRs = conn.getMetaData().getColumns(null, null, parentTableName, null);
+ }
+
+ rs = conn.getMetaData().getColumns(null, null, tableName, null);
+ for (int i = 0; i < columnName.length; i++) {
+ if (columnName[i] != null) {
+ assertTrue(rs.next());
+ assertEquals(getSystemCatalogEntriesForTable(conn, tableName, "Mismatch in columnName: i=" + i), columnName[i], rs.getString("COLUMN_NAME"));
+ assertEquals(getSystemCatalogEntriesForTable(conn, tableName, "Mismatch in ordinalPosition: i=" + i), i+1, rs.getInt("ORDINAL_POSITION"));
+ if (i < baseColumnCount && parentTableColumnsRs != null) {
+ assertTrue(parentTableColumnsRs.next());
+ ResultSetMetaData md = parentTableColumnsRs.getMetaData();
+ assertEquals(md.getColumnCount(), rs.getMetaData().getColumnCount());
+ for (int columnIndex = 1; columnIndex < md.getColumnCount(); columnIndex++) {
+ String viewColumnValue = rs.getString(columnIndex);
+ String parentTableColumnValue = parentTableColumnsRs.getString(columnIndex);
+ if (!Objects.equal(viewColumnValue, parentTableColumnValue)) {
+ if (md.getColumnName(columnIndex).equals("TABLE_NAME")) {
+ assertEquals(parentTableName, parentTableColumnValue);
+ assertEquals(tableName, viewColumnValue);
+ } else {
+ fail(md.getColumnName(columnIndex) + "=" + parentTableColumnValue);
+ }
+ }
+ }
+ }
+ }
+ }
+ assertFalse(getSystemCatalogEntriesForTable(conn, tableName, ""), rs.next());
+ }
+
+ private String getSystemCatalogEntriesForTable(Connection conn, String tableName, String message) throws Exception {
+ StringBuilder sb = new StringBuilder(message);
+ sb.append("\n\n\n");
+ ResultSet rs = conn.createStatement().executeQuery("SELECT * FROM SYSTEM.CATALOG WHERE TABLE_NAME='"+ tableName +"'");
+ ResultSetMetaData metaData = rs.getMetaData();
+ int rowNum = 0;
+ while (rs.next()) {
+ sb.append(rowNum++).append("------\n");
+ for (int i = 1; i <= metaData.getColumnCount(); i++) {
+ sb.append("\t").append(metaData.getColumnLabel(i)).append("=").append(rs.getString(i)).append("\n");
+ }
+ sb.append("\n");
+ }
+ rs.close();
+ return sb.toString();
+ }
+
+ @Test
+ public void testCacheInvalidatedAfterAddingColumnToBaseTableWithViews() throws Exception {
+ String baseTable = "testCacheInvalidatedAfterAddingColumnToBaseTableWithViews";
+ String viewName = baseTable + "_view";
+ String tenantId = "tenantId";
+ try (Connection globalConn = DriverManager.getConnection(getUrl())) {
+ String tableDDL = "CREATE TABLE " + baseTable + " (TENANT_ID VARCHAR NOT NULL, PK1 VARCHAR NOT NULL, V1 VARCHAR CONSTRAINT NAME_PK PRIMARY KEY(TENANT_ID, PK1)) MULTI_TENANT = true " ;
+ globalConn.createStatement().execute(tableDDL);
+ Properties tenantProps = new Properties();
+ tenantProps.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId);
+ // create a tenant specific view
+ try (Connection tenantConn = DriverManager.getConnection(getUrl(), tenantProps)) {
+ String viewDDL = "CREATE VIEW " + viewName + " AS SELECT * FROM " + baseTable;
+ tenantConn.createStatement().execute(viewDDL);
+
+ // Add a column to the base table using global connection
+ globalConn.createStatement().execute("ALTER TABLE " + baseTable + " ADD NEW_COL VARCHAR");
+
+ // Check now whether the tenant connection can see the column that was added
+ tenantConn.createStatement().execute("SELECT NEW_COL FROM " + viewName);
+ tenantConn.createStatement().execute("SELECT NEW_COL FROM " + baseTable);
+ }
+ }
+ }
+
+ @Test
+ public void testDropColumnOnTableWithViewsNotAllowed() throws Exception {
+ String baseTable = "testDropColumnOnTableWithViewsNotAllowed";
+ String viewName = baseTable + "_view";
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ String tableDDL = "CREATE TABLE " + baseTable + " (PK1 VARCHAR NOT NULL PRIMARY KEY, V1 VARCHAR, V2 VARCHAR)";
+ conn.createStatement().execute(tableDDL);
+
+ String viewDDL = "CREATE VIEW " + viewName + " AS SELECT * FROM " + baseTable;
+ conn.createStatement().execute(viewDDL);
+
+ String dropColumn = "ALTER TABLE " + baseTable + " DROP COLUMN V2";
+ try {
+ conn.createStatement().execute(dropColumn);
+ fail("Dropping column on a base table that has views is not allowed");
+ } catch (SQLException e) {
+ assertEquals(CANNOT_MUTATE_TABLE.getErrorCode(), e.getErrorCode());
+ }
+ }
+ }
+
+ @Test
+ public void testAlteringViewThatHasChildViewsNotAllowed() throws Exception {
+ String baseTable = "testAlteringViewThatHasChildViewsNotAllowed";
+ String childView = "childView";
+ String grandChildView = "grandChildView";
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ String baseTableDDL =
+ "CREATE TABLE " + baseTable + " (TENANT_ID VARCHAR NOT NULL, PK2 VARCHAR NOT NULL, V1 VARCHAR, V2 VARCHAR CONSTRAINT NAME_PK PRIMARY KEY(TENANT_ID, PK2))";
+ conn.createStatement().execute(baseTableDDL);
+
+ String childViewDDL = "CREATE VIEW " + childView + " AS SELECT * FROM " + baseTable;
+ conn.createStatement().execute(childViewDDL);
+
+ String addColumnToChildViewDDL =
+ "ALTER VIEW " + childView + " ADD CHILD_VIEW_COL VARCHAR";
+ conn.createStatement().execute(addColumnToChildViewDDL);
+
+ String grandChildViewDDL =
+ "CREATE VIEW " + grandChildView + " AS SELECT * FROM " + childView;
+ conn.createStatement().execute(grandChildViewDDL);
+
+ // dropping base table column from child view should fail
+ String dropColumnFromChildView = "ALTER VIEW " + childView + " DROP COLUMN V2";
+ try {
+ conn.createStatement().execute(dropColumnFromChildView);
+ fail("Dropping columns from a view that has child views on it is not allowed");
+ } catch (SQLException e) {
+ assertEquals(CANNOT_MUTATE_TABLE.getErrorCode(), e.getErrorCode());
+ }
+
+ // dropping view specific column from child view should fail
+ dropColumnFromChildView = "ALTER VIEW " + childView + " DROP COLUMN CHILD_VIEW_COL";
+ try {
+ conn.createStatement().execute(dropColumnFromChildView);
+ fail("Dropping columns from a view that has child views on it is not allowed");
+ } catch (SQLException e) {
+ assertEquals(SQLExceptionCode.CANNOT_MUTATE_TABLE.getErrorCode(), e.getErrorCode());
+ }
+
+ // Adding column to view that has child views should fail
+ String addColumnToChildView = "ALTER VIEW " + childView + " ADD V5 VARCHAR";
+ try {
+ conn.createStatement().execute(addColumnToChildView);
+ fail("Adding columns to a view that has child views on it is not allowed");
+ } catch (SQLException e) {
+ assertEquals(SQLExceptionCode.CANNOT_MUTATE_TABLE.getErrorCode(), e.getErrorCode());
+ }
+
+ // dropping column from the grand child view, however, should work.
+ String dropColumnFromGrandChildView =
+ "ALTER VIEW " + grandChildView + " DROP COLUMN CHILD_VIEW_COL";
+ conn.createStatement().execute(dropColumnFromGrandChildView);
+
+ // similarly, dropping column inherited from the base table should work.
+ dropColumnFromGrandChildView = "ALTER VIEW " + grandChildView + " DROP COLUMN V2";
+ conn.createStatement().execute(dropColumnFromGrandChildView);
+ }
+ }
+
+ @Test
+ public void testDivorcedViewsStayDivorced() throws Exception {
+ String baseTable = "testDivorcedViewsStayDivorced";
+ String viewName = baseTable + "_view";
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ String tableDDL = "CREATE TABLE " + baseTable + " (PK1 VARCHAR NOT NULL PRIMARY KEY, V1 VARCHAR, V2 VARCHAR)";
+ conn.createStatement().execute(tableDDL);
+
+ String viewDDL = "CREATE VIEW " + viewName + " AS SELECT * FROM " + baseTable;
+ conn.createStatement().execute(viewDDL);
+
+ // Drop the column inherited from base table to divorce the view
+ String dropColumn = "ALTER VIEW " + viewName + " DROP COLUMN V2";
+ conn.createStatement().execute(dropColumn);
+
+ String alterBaseTable = "ALTER TABLE " + baseTable + " ADD V3 VARCHAR";
+ conn.createStatement().execute(alterBaseTable);
+
+ // Column V3 shouldn't have propagated to the divorced view.
+ String sql = "SELECT V3 FROM " + viewName;
+ try {
+ conn.createStatement().execute(sql);
+ } catch (SQLException e) {
+ assertEquals(SQLExceptionCode.COLUMN_NOT_FOUND.getErrorCode(), e.getErrorCode());
+ }
+ }
+ }
+
+ @Test
+ public void testAddingColumnToBaseTablePropagatesToEntireViewHierarchy() throws Exception {
+ String baseTable = "testViewHierarchy";
+ String view1 = "view1";
+ String view2 = "view2";
+ String view3 = "view3";
+ String view4 = "view4";
+ /* baseTable
+ / | \
+ view1(tenant1) view3(tenant2) view4(global)
+ /
+ view2(tenant1)
+ */
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ String baseTableDDL = "CREATE TABLE " + baseTable + " (TENANT_ID VARCHAR NOT NULL, PK1 VARCHAR NOT NULL, V1 VARCHAR, V2 VARCHAR CONSTRAINT NAME_PK PRIMARY KEY(TENANT_ID, PK1)) MULTI_TENANT = true ";
+ conn.createStatement().execute(baseTableDDL);
+
+ try (Connection tenant1Conn = getTenantConnection("tenant1")) {
+ String view1DDL = "CREATE VIEW " + view1 + " AS SELECT * FROM " + baseTable;
+ tenant1Conn.createStatement().execute(view1DDL);
+
+ String view2DDL = "CREATE VIEW " + view2 + " AS SELECT * FROM " + view1;
+ tenant1Conn.createStatement().execute(view2DDL);
+ }
+
+ try (Connection tenant2Conn = getTenantConnection("tenant2")) {
+ String view3DDL = "CREATE VIEW " + view3 + " AS SELECT * FROM " + baseTable;
+ tenant2Conn.createStatement().execute(view3DDL);
+ }
+
+ String view4DDL = "CREATE VIEW " + view4 + " AS SELECT * FROM " + baseTable;
+ conn.createStatement().execute(view4DDL);
+
+ String alterBaseTable = "ALTER TABLE " + baseTable + " ADD V3 VARCHAR";
+ conn.createStatement().execute(alterBaseTable);
+
+ // verify that the column is visible to view4
+ conn.createStatement().execute("SELECT V3 FROM " + view4);
+
+ // verify that the column is visible to view1 and view2
+ try (Connection tenant1Conn = getTenantConnection("tenant1")) {
+ tenant1Conn.createStatement().execute("SELECT V3 from " + view1);
+ tenant1Conn.createStatement().execute("SELECT V3 from " + view2);
+ }
+
+ // verify that the column is visible to view3
+ try (Connection tenant2Conn = getTenantConnection("tenant2")) {
+ tenant2Conn.createStatement().execute("SELECT V3 from " + view3);
+ }
+
+ }
+
+ }
+
+ @Test
+ public void testChangingPKOfBaseTableChangesPKForAllViews() throws Exception {
+ String baseTable = "testChangePKOfBaseTable";
+ String view1 = "view1";
+ String view2 = "view2";
+ String view3 = "view3";
+ String view4 = "view4";
+ /* baseTable
+ / | \
+ view1(tenant1) view3(tenant2) view4(global)
+ /
+ view2(tenant1)
+ */
+ Connection tenant1Conn = null, tenant2Conn = null;
+ try (Connection globalConn = DriverManager.getConnection(getUrl())) {
+ String baseTableDDL = "CREATE TABLE "
+ + baseTable
+ + " (TENANT_ID VARCHAR NOT NULL, PK1 VARCHAR NOT NULL, V1 VARCHAR, V2 VARCHAR CONSTRAINT NAME_PK PRIMARY KEY(TENANT_ID, PK1)) MULTI_TENANT = true ";
+ globalConn.createStatement().execute(baseTableDDL);
+
+ tenant1Conn = getTenantConnection("tenant1");
+ String view1DDL = "CREATE VIEW " + view1 + " AS SELECT * FROM " + baseTable;
+ tenant1Conn.createStatement().execute(view1DDL);
+
+ String view2DDL = "CREATE VIEW " + view2 + " AS SELECT * FROM " + view1;
+ tenant1Conn.createStatement().execute(view2DDL);
+
+ tenant2Conn = getTenantConnection("tenant2");
+ String view3DDL = "CREATE VIEW " + view3 + " AS SELECT * FROM " + baseTable;
+ tenant2Conn.createStatement().execute(view3DDL);
+
+ String view4DDL = "CREATE VIEW " + view4 + " AS SELECT * FROM " + baseTable;
+ globalConn.createStatement().execute(view4DDL);
+
+ String alterBaseTable = "ALTER TABLE " + baseTable + " ADD NEW_PK varchar primary key ";
+ globalConn.createStatement().execute(alterBaseTable);
+
+ // verify that the new column new_pk is now part of the primary key for the entire hierarchy
+ assertTrue(checkColumnPartOfPk(globalConn.unwrap(PhoenixConnection.class), "PK1", baseTable));
+ assertTrue(checkColumnPartOfPk(tenant1Conn.unwrap(PhoenixConnection.class), "PK1", view1));
+ assertTrue(checkColumnPartOfPk(tenant1Conn.unwrap(PhoenixConnection.class), "PK1", view2));
+ assertTrue(checkColumnPartOfPk(tenant2Conn.unwrap(PhoenixConnection.class), "PK1", view3));
+ assertTrue(checkColumnPartOfPk(globalConn.unwrap(PhoenixConnection.class), "PK1", view4));
+
+ } finally {
+ if (tenant1Conn != null) {
+ try {
+ tenant1Conn.close();
+ } catch (Throwable ignore) {}
+ }
+ if (tenant2Conn != null) {
+ try {
+ tenant2Conn.close();
+ } catch (Throwable ignore) {}
+ }
+ }
+
+ }
+
+ private boolean checkColumnPartOfPk(PhoenixConnection conn, String columnName, String tableName) throws SQLException {
+ String normalizedTableName = SchemaUtil.normalizeIdentifier(tableName);
+ PTable table = conn.getMetaDataCache().getTable(new PTableKey(conn.getTenantId(), normalizedTableName));
+ List<PColumn> pkCols = table.getPKColumns();
+ String normalizedColumnName = SchemaUtil.normalizeIdentifier(columnName);
+ for (PColumn pkCol : pkCols) {
+ if (pkCol.getName().getString().equals(normalizedColumnName)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ private Connection getTenantConnection(String tenantId) throws Exception {
+ Properties tenantProps = new Properties();
+ tenantProps.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId);
+ return DriverManager.getConnection(getUrl(), tenantProps);
+ }
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/e78eb6fa/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDDLIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDDLIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDDLIT.java
index a7c7291..e1a1970 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDDLIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDDLIT.java
@@ -272,6 +272,7 @@ public class TenantSpecificTablesDDLIT extends BaseTenantSpecificTablesIT {
assertEquals(CANNOT_MODIFY_VIEW_PK.getErrorCode(), expected.getErrorCode());
}
+ // try removing a non-PK col
try {
conn.createStatement().execute("alter table " + TENANT_TABLE_NAME + " drop column id");
fail();
@@ -291,25 +292,6 @@ public class TenantSpecificTablesDDLIT extends BaseTenantSpecificTablesIT {
props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(nextTimestamp()));
Connection conn = DriverManager.getConnection(getUrl(), props);
try {
- // try adding a PK col
- try {
- conn.createStatement().execute("alter table " + PARENT_TABLE_NAME + " add new_pk varchar primary key");
- fail();
- }
- catch (SQLException expected) {
- assertEquals(CANNOT_MUTATE_TABLE.getErrorCode(), expected.getErrorCode());
- }
-
- // try adding a non-PK col
- try {
- conn.createStatement().execute("alter table " + PARENT_TABLE_NAME + " add new_col char(1)");
- fail();
- }
- catch (SQLException expected) {
- assertEquals(CANNOT_MUTATE_TABLE.getErrorCode(), expected.getErrorCode());
- }
-
- // try removing a PK col
try {
conn.createStatement().execute("alter table " + PARENT_TABLE_NAME + " drop column id");
fail();
http://git-wip-us.apache.org/repos/asf/phoenix/blob/e78eb6fa/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java
new file mode 100644
index 0000000..886e567
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java
@@ -0,0 +1,332 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import static com.google.common.base.Preconditions.checkArgument;
+import static com.google.common.base.Preconditions.checkNotNull;
+import static org.apache.phoenix.query.QueryConstants.BASE_TABLE_BASE_COLUMN_COUNT;
+import static org.apache.phoenix.query.QueryConstants.DIVORCED_VIEW_BASE_COLUMN_COUNT;
+import static org.apache.phoenix.util.UpgradeUtil.SELECT_BASE_COLUMN_COUNT_FROM_HEADER_ROW;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.Properties;
+
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RowMutations;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
+import org.apache.phoenix.util.PhoenixRuntime;
+import org.apache.phoenix.util.SchemaUtil;
+import org.apache.phoenix.util.UpgradeUtil;
+import org.junit.Test;
+
+public class UpgradeIT extends BaseHBaseManagedTimeIT {
+
+ private static String TENANT_ID = "tenantId";
+
+ @Test
+ public void testUpgradeForTenantViewWithSameColumnsAsBaseTable() throws Exception {
+ testViewUpgrade(true, TENANT_ID, null, "TABLEWITHVIEW1", null, "VIEW1", ColumnDiff.EQUAL);
+ testViewUpgrade(true, TENANT_ID, "TABLESCHEMA", "TABLEWITHVIEW", null, "VIEW2",
+ ColumnDiff.EQUAL);
+ testViewUpgrade(true, TENANT_ID, null, "TABLEWITHVIEW3", "VIEWSCHEMA", "VIEW3",
+ ColumnDiff.EQUAL);
+ testViewUpgrade(true, TENANT_ID, "TABLESCHEMA", "TABLEWITHVIEW4", "VIEWSCHEMA", "VIEW4",
+ ColumnDiff.EQUAL);
+ testViewUpgrade(true, TENANT_ID, "SAMESCHEMA", "TABLEWITHVIEW5", "SAMESCHEMA", "VIEW5",
+ ColumnDiff.EQUAL);
+ }
+
+ @Test
+ public void testUpgradeForTenantViewWithMoreColumnsThanBaseTable() throws Exception {
+ testViewUpgrade(true, TENANT_ID, null, "TABLEWITHVIEW1", null, "VIEW1", ColumnDiff.MORE);
+ testViewUpgrade(true, TENANT_ID, "TABLESCHEMA", "TABLEWITHVIEW", null, "VIEW2",
+ ColumnDiff.MORE);
+ testViewUpgrade(true, TENANT_ID, null, "TABLEWITHVIEW3", "VIEWSCHEMA", "VIEW3",
+ ColumnDiff.MORE);
+ testViewUpgrade(true, TENANT_ID, "TABLESCHEMA", "TABLEWITHVIEW4", "VIEWSCHEMA", "VIEW4",
+ ColumnDiff.MORE);
+ testViewUpgrade(true, TENANT_ID, "SAMESCHEMA", "TABLEWITHVIEW5", "SAMESCHEMA", "VIEW5",
+ ColumnDiff.MORE);
+ }
+
+ @Test
+ public void testUpgradeForViewWithSameColumnsAsBaseTable() throws Exception {
+ testViewUpgrade(false, null, null, "TABLEWITHVIEW1", null, "VIEW1", ColumnDiff.EQUAL);
+ testViewUpgrade(false, null, "TABLESCHEMA", "TABLEWITHVIEW", null, "VIEW2",
+ ColumnDiff.EQUAL);
+ testViewUpgrade(false, null, null, "TABLEWITHVIEW3", "VIEWSCHEMA", "VIEW3",
+ ColumnDiff.EQUAL);
+ testViewUpgrade(false, null, "TABLESCHEMA", "TABLEWITHVIEW4", "VIEWSCHEMA", "VIEW4",
+ ColumnDiff.EQUAL);
+ testViewUpgrade(false, null, "SAMESCHEMA", "TABLEWITHVIEW5", "SAMESCHEMA", "VIEW5",
+ ColumnDiff.EQUAL);
+ }
+
+ @Test
+ public void testUpgradeForViewWithMoreColumnsThanBaseTable() throws Exception {
+ testViewUpgrade(false, null, null, "TABLEWITHVIEW1", null, "VIEW1", ColumnDiff.MORE);
+ testViewUpgrade(false, null, "TABLESCHEMA", "TABLEWITHVIEW", null, "VIEW2", ColumnDiff.MORE);
+ testViewUpgrade(false, null, null, "TABLEWITHVIEW3", "VIEWSCHEMA", "VIEW3", ColumnDiff.MORE);
+ testViewUpgrade(false, null, "TABLESCHEMA", "TABLEWITHVIEW4", "VIEWSCHEMA", "VIEW4",
+ ColumnDiff.MORE);
+ testViewUpgrade(false, null, "SAMESCHEMA", "TABLEWITHVIEW5", "SAMESCHEMA", "VIEW5",
+ ColumnDiff.MORE);
+ }
+
+ @Test
+ public void testSettingBaseColumnCountWhenBaseTableColumnDropped() throws Exception {
+ testViewUpgrade(true, TENANT_ID, null, "TABLEWITHVIEW1", null, "VIEW1", ColumnDiff.MORE);
+ testViewUpgrade(true, TENANT_ID, "TABLESCHEMA", "TABLEWITHVIEW", null, "VIEW2",
+ ColumnDiff.LESS);
+ testViewUpgrade(true, TENANT_ID, null, "TABLEWITHVIEW3", "VIEWSCHEMA", "VIEW3",
+ ColumnDiff.LESS);
+ testViewUpgrade(true, TENANT_ID, "TABLESCHEMA", "TABLEWITHVIEW4", "VIEWSCHEMA", "VIEW4",
+ ColumnDiff.LESS);
+ testViewUpgrade(true, TENANT_ID, "SAMESCHEMA", "TABLEWITHVIEW5", "SAMESCHEMA", "VIEW5",
+ ColumnDiff.LESS);
+ }
+
+ @Test
+ public void testSettingBaseColumnCountForMultipleViewsOnTable() throws Exception {
+ String baseSchema = "XYZ";
+ String baseTable = "BASE_TABLE";
+ String fullBaseTableName = SchemaUtil.getTableName(baseSchema, baseTable);
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ String baseTableDDL = "CREATE TABLE " + fullBaseTableName + " (TENANT_ID VARCHAR NOT NULL, PK1 VARCHAR NOT NULL, V1 INTEGER, V2 INTEGER CONSTRAINT NAME_PK PRIMARY KEY(TENANT_ID, PK1)) MULTI_TENANT = true";
+ conn.createStatement().execute(baseTableDDL);
+
+ for (int i = 1; i <=2; i++) {
+ // Create views for tenants;
+ String tenant = "tenant" + i;
+ try (Connection tenantConn = createTenantConnection(tenant)) {
+ String view = "TENANT_VIEW1";
+
+ // view with its own column
+ String viewDDL = "CREATE VIEW " + view + " AS SELECT * FROM " + fullBaseTableName;
+ tenantConn.createStatement().execute(viewDDL);
+ String addCols = "ALTER VIEW " + view + " ADD COL1 VARCHAR ";
+ tenantConn.createStatement().execute(addCols);
+ removeBaseColumnCountKV(tenant, null, view);
+
+ // view that has the last base table column removed
+ view = "TENANT_VIEW2";
+ viewDDL = "CREATE VIEW " + view + " AS SELECT * FROM " + fullBaseTableName;
+ tenantConn.createStatement().execute(viewDDL);
+ String droplastBaseCol = "ALTER VIEW " + view + " DROP COLUMN V2";
+ tenantConn.createStatement().execute(droplastBaseCol);
+ removeBaseColumnCountKV(tenant, null, view);
+
+ // view that has the middle base table column removed
+ view = "TENANT_VIEW3";
+ viewDDL = "CREATE VIEW " + view + " AS SELECT * FROM " + fullBaseTableName;
+ tenantConn.createStatement().execute(viewDDL);
+ String dropMiddileBaseCol = "ALTER VIEW " + view + " DROP COLUMN V1";
+ tenantConn.createStatement().execute(dropMiddileBaseCol);
+ removeBaseColumnCountKV(tenant, null, view);
+ }
+ }
+
+ // create global views
+ try (Connection globalConn = DriverManager.getConnection(getUrl())) {
+ String view = "GLOBAL_VIEW1";
+
+ // view with its own column
+ String viewDDL = "CREATE VIEW " + view + " AS SELECT * FROM " + fullBaseTableName;
+ globalConn.createStatement().execute(viewDDL);
+ String addCols = "ALTER VIEW " + view + " ADD COL1 VARCHAR ";
+ globalConn.createStatement().execute(addCols);
+ removeBaseColumnCountKV(null, null, view);
+
+ // view that has the last base table column removed
+ view = "GLOBAL_VIEW2";
+ viewDDL = "CREATE VIEW " + view + " AS SELECT * FROM " + fullBaseTableName;
+ globalConn.createStatement().execute(viewDDL);
+ String droplastBaseCol = "ALTER VIEW " + view + " DROP COLUMN V2";
+ globalConn.createStatement().execute(droplastBaseCol);
+ removeBaseColumnCountKV(null, null, view);
+
+ // view that has the middle base table column removed
+ view = "GLOBAL_VIEW3";
+ viewDDL = "CREATE VIEW " + view + " AS SELECT * FROM " + fullBaseTableName;
+ globalConn.createStatement().execute(viewDDL);
+ String dropMiddileBaseCol = "ALTER VIEW " + view + " DROP COLUMN V1";
+ globalConn.createStatement().execute(dropMiddileBaseCol);
+ removeBaseColumnCountKV(null, null, view);
+ }
+
+ // run upgrade
+ UpgradeUtil.upgradeTo4_5_0(conn.unwrap(PhoenixConnection.class));
+
+ // Verify base column counts for tenant specific views
+ for (int i = 1; i <=2 ; i++) {
+ String tenantId = "tenant" + i;
+ checkBaseColumnCount(tenantId, null, "TENANT_VIEW1", 4);
+ checkBaseColumnCount(tenantId, null, "TENANT_VIEW2", DIVORCED_VIEW_BASE_COLUMN_COUNT);
+ checkBaseColumnCount(tenantId, null, "TENANT_VIEW3", DIVORCED_VIEW_BASE_COLUMN_COUNT);
+ }
+
+ // Verify base column count for global views
+ checkBaseColumnCount(null, null, "GLOBAL_VIEW1", 4);
+ checkBaseColumnCount(null, null, "GLOBAL_VIEW2", DIVORCED_VIEW_BASE_COLUMN_COUNT);
+ checkBaseColumnCount(null, null, "GLOBAL_VIEW3", DIVORCED_VIEW_BASE_COLUMN_COUNT);
+ }
+
+
+ }
+
+ private enum ColumnDiff {
+ MORE, EQUAL, LESS
+ };
+
+ private void testViewUpgrade(boolean tenantView, String tenantId, String baseTableSchema,
+ String baseTableName, String viewSchema, String viewName, ColumnDiff diff)
+ throws Exception {
+ if (tenantView) {
+ checkNotNull(tenantId);
+ } else {
+ checkArgument(tenantId == null);
+ }
+ Connection conn = DriverManager.getConnection(getUrl());
+ String fullViewName = SchemaUtil.getTableName(viewSchema, viewName);
+ String fullBaseTableName = SchemaUtil.getTableName(baseTableSchema, baseTableName);
+ try {
+ int expectedBaseColumnCount;
+ conn.createStatement().execute(
+ "CREATE TABLE IF NOT EXISTS " + fullBaseTableName + " ("
+ + " TENANT_ID CHAR(15) NOT NULL, " + " PK1 integer NOT NULL, "
+ + "PK2 bigint NOT NULL, " + "CF1.V1 VARCHAR, " + "CF2.V2 VARCHAR, "
+ + "V3 CHAR(100) ARRAY[4] "
+ + " CONSTRAINT NAME_PK PRIMARY KEY (TENANT_ID, PK1, PK2)"
+ + " ) MULTI_TENANT= true");
+
+ // create a view with same columns as base table.
+ try (Connection conn2 = getConnection(tenantView, tenantId)) {
+ conn2.createStatement().execute(
+ "CREATE VIEW " + fullViewName + " AS SELECT * FROM " + fullBaseTableName);
+ }
+
+ if (diff == ColumnDiff.MORE) {
+ // add a column to the view
+ try (Connection conn3 = getConnection(tenantView, tenantId)) {
+ conn3.createStatement().execute(
+ "ALTER VIEW " + fullViewName + " ADD VIEW_COL1 VARCHAR");
+ }
+ }
+ if (diff == ColumnDiff.LESS) {
+ try (Connection conn3 = getConnection(tenantView, tenantId)) {
+ conn3.createStatement().execute(
+ "ALTER VIEW " + fullViewName + " DROP COLUMN CF2.V2");
+ }
+ expectedBaseColumnCount = DIVORCED_VIEW_BASE_COLUMN_COUNT;
+ } else {
+ expectedBaseColumnCount = 6;
+ }
+
+ checkBaseColumnCount(tenantId, viewSchema, viewName, expectedBaseColumnCount);
+ checkBaseColumnCount(null, baseTableSchema, baseTableName, BASE_TABLE_BASE_COLUMN_COUNT);
+
+ // remove base column count kv so we can check whether the upgrade code is setting the
+ // base column count correctly.
+ removeBaseColumnCountKV(tenantId, viewSchema, viewName);
+ removeBaseColumnCountKV(null, baseTableSchema, baseTableName);
+
+ // assert that the removing base column count key value worked correctly.
+ checkBaseColumnCount(tenantId, viewSchema, viewName, 0);
+ checkBaseColumnCount(null, baseTableSchema, baseTableName, 0);
+
+ // run upgrade
+ UpgradeUtil.upgradeTo4_5_0(conn.unwrap(PhoenixConnection.class));
+
+ checkBaseColumnCount(tenantId, viewSchema, viewName, expectedBaseColumnCount);
+ checkBaseColumnCount(null, baseTableSchema, baseTableName, BASE_TABLE_BASE_COLUMN_COUNT);
+ } finally {
+ conn.close();
+ }
+ }
+
+ private static void checkBaseColumnCount(String tenantId, String schemaName, String tableName,
+ int expectedBaseColumnCount) throws Exception {
+ checkNotNull(tableName);
+ Connection conn = DriverManager.getConnection(getUrl());
+ String sql = SELECT_BASE_COLUMN_COUNT_FROM_HEADER_ROW;
+ sql =
+ String.format(sql, tenantId == null ? " IS NULL " : " = ? ",
+ schemaName == null ? "IS NULL" : " = ? ");
+ int paramIndex = 1;
+ PreparedStatement stmt = conn.prepareStatement(sql);
+ if (tenantId != null) {
+ stmt.setString(paramIndex++, tenantId);
+ }
+ if (schemaName != null) {
+ stmt.setString(paramIndex++, schemaName);
+ }
+ stmt.setString(paramIndex, tableName);
+ ResultSet rs = stmt.executeQuery();
+ assertTrue(rs.next());
+ assertEquals(expectedBaseColumnCount, rs.getInt(1));
+ assertFalse(rs.next());
+ }
+
+ private static void
+ removeBaseColumnCountKV(String tenantId, String schemaName, String tableName)
+ throws Exception {
+ byte[] rowKey =
+ SchemaUtil.getTableKey(tenantId == null ? new byte[0] : Bytes.toBytes(tenantId),
+ schemaName == null ? new byte[0] : Bytes.toBytes(schemaName),
+ Bytes.toBytes(tableName));
+ Put viewColumnDefinitionPut = new Put(rowKey, HConstants.LATEST_TIMESTAMP);
+ viewColumnDefinitionPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+ PhoenixDatabaseMetaData.BASE_COLUMN_COUNT_BYTES, HConstants.LATEST_TIMESTAMP, null);
+
+ try (PhoenixConnection conn =
+ (DriverManager.getConnection(getUrl())).unwrap(PhoenixConnection.class)) {
+ try (HTableInterface htable =
+ conn.getQueryServices().getTable(
+ Bytes.toBytes(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME))) {
+ RowMutations mutations = new RowMutations(rowKey);
+ mutations.add(viewColumnDefinitionPut);
+ htable.mutateRow(mutations);
+ }
+ }
+ }
+
+ private Connection createTenantConnection(String tenantId) throws SQLException {
+ Properties props = new Properties();
+ props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId);
+ return DriverManager.getConnection(getUrl(), props);
+ }
+
+ private Connection getConnection(boolean tenantSpecific, String tenantId) throws SQLException {
+ if (tenantSpecific) {
+ checkNotNull(tenantId);
+ return createTenantConnection(tenantId);
+ }
+ return DriverManager.getConnection(getUrl());
+ }
+}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/e78eb6fa/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index 1d578f5..077e325 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -17,6 +17,7 @@
*/
package org.apache.phoenix.coprocessor;
+import static org.apache.hadoop.hbase.KeyValueUtil.createFirstOnRow;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.ARRAY_SIZE_BYTES;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.CLASS_NAME_BYTES;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_COUNT_BYTES;
@@ -61,6 +62,7 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_CONSTANT_BYTE
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_INDEX_ID_BYTES;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_STATEMENT_BYTES;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_TYPE_BYTES;
+import static org.apache.phoenix.query.QueryConstants.DIVORCED_VIEW_BASE_COLUMN_COUNT;
import static org.apache.phoenix.schema.PTableType.INDEX;
import static org.apache.phoenix.util.SchemaUtil.getVarCharLength;
import static org.apache.phoenix.util.SchemaUtil.getVarChars;
@@ -77,6 +79,7 @@ import java.util.List;
import java.util.Set;
import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.Coprocessor;
import org.apache.hadoop.hbase.CoprocessorEnvironment;
import org.apache.hadoop.hbase.HConstants;
@@ -204,24 +207,26 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
private static final Logger logger = LoggerFactory.getLogger(MetaDataEndpointImpl.class);
// KeyValues for Table
- private static final KeyValue TABLE_TYPE_KV = KeyValue.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, TABLE_TYPE_BYTES);
- private static final KeyValue TABLE_SEQ_NUM_KV = KeyValue.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, TABLE_SEQ_NUM_BYTES);
- private static final KeyValue COLUMN_COUNT_KV = KeyValue.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, COLUMN_COUNT_BYTES);
- private static final KeyValue SALT_BUCKETS_KV = KeyValue.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, SALT_BUCKETS_BYTES);
- private static final KeyValue PK_NAME_KV = KeyValue.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, PK_NAME_BYTES);
- private static final KeyValue DATA_TABLE_NAME_KV = KeyValue.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, DATA_TABLE_NAME_BYTES);
- private static final KeyValue INDEX_STATE_KV = KeyValue.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, INDEX_STATE_BYTES);
- private static final KeyValue IMMUTABLE_ROWS_KV = KeyValue.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, IMMUTABLE_ROWS_BYTES);
- private static final KeyValue VIEW_EXPRESSION_KV = KeyValue.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, VIEW_STATEMENT_BYTES);
- private static final KeyValue DEFAULT_COLUMN_FAMILY_KV = KeyValue.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, DEFAULT_COLUMN_FAMILY_NAME_BYTES);
- private static final KeyValue DISABLE_WAL_KV = KeyValue.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, DISABLE_WAL_BYTES);
- private static final KeyValue MULTI_TENANT_KV = KeyValue.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, MULTI_TENANT_BYTES);
- private static final KeyValue VIEW_TYPE_KV = KeyValue.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, VIEW_TYPE_BYTES);
- private static final KeyValue VIEW_INDEX_ID_KV = KeyValue.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, VIEW_INDEX_ID_BYTES);
- private static final KeyValue INDEX_TYPE_KV = KeyValue.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, INDEX_TYPE_BYTES);
- private static final KeyValue INDEX_DISABLE_TIMESTAMP_KV = KeyValue.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, INDEX_DISABLE_TIMESTAMP_BYTES);
- private static final KeyValue STORE_NULLS_KV = KeyValue.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, STORE_NULLS_BYTES);
- private static final KeyValue EMPTY_KEYVALUE_KV = KeyValue.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES);
+ private static final KeyValue TABLE_TYPE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, TABLE_TYPE_BYTES);
+ private static final KeyValue TABLE_SEQ_NUM_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, TABLE_SEQ_NUM_BYTES);
+ private static final KeyValue COLUMN_COUNT_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, COLUMN_COUNT_BYTES);
+ private static final KeyValue SALT_BUCKETS_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, SALT_BUCKETS_BYTES);
+ private static final KeyValue PK_NAME_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, PK_NAME_BYTES);
+ private static final KeyValue DATA_TABLE_NAME_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, DATA_TABLE_NAME_BYTES);
+ private static final KeyValue INDEX_STATE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, INDEX_STATE_BYTES);
+ private static final KeyValue IMMUTABLE_ROWS_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, IMMUTABLE_ROWS_BYTES);
+ private static final KeyValue VIEW_EXPRESSION_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, VIEW_STATEMENT_BYTES);
+ private static final KeyValue DEFAULT_COLUMN_FAMILY_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, DEFAULT_COLUMN_FAMILY_NAME_BYTES);
+ private static final KeyValue DISABLE_WAL_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, DISABLE_WAL_BYTES);
+ private static final KeyValue MULTI_TENANT_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, MULTI_TENANT_BYTES);
+ private static final KeyValue VIEW_TYPE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, VIEW_TYPE_BYTES);
+ private static final KeyValue VIEW_INDEX_ID_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, VIEW_INDEX_ID_BYTES);
+ private static final KeyValue INDEX_TYPE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, INDEX_TYPE_BYTES);
+ private static final KeyValue INDEX_DISABLE_TIMESTAMP_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, INDEX_DISABLE_TIMESTAMP_BYTES);
+ private static final KeyValue STORE_NULLS_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, STORE_NULLS_BYTES);
+ private static final KeyValue EMPTY_KEYVALUE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES);
+ private static final KeyValue BASE_COLUMN_COUNT_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.BASE_COLUMN_COUNT_BYTES);
+
private static final List<KeyValue> TABLE_KV_COLUMNS = Arrays.<KeyValue>asList(
EMPTY_KEYVALUE_KV,
TABLE_TYPE_KV,
@@ -240,7 +245,8 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
VIEW_INDEX_ID_KV,
INDEX_TYPE_KV,
INDEX_DISABLE_TIMESTAMP_KV,
- STORE_NULLS_KV
+ STORE_NULLS_KV,
+ BASE_COLUMN_COUNT_KV
);
static {
Collections.sort(TABLE_KV_COLUMNS, KeyValue.COMPARATOR);
@@ -262,18 +268,19 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
private static final int VIEW_INDEX_ID_INDEX = TABLE_KV_COLUMNS.indexOf(VIEW_INDEX_ID_KV);
private static final int INDEX_TYPE_INDEX = TABLE_KV_COLUMNS.indexOf(INDEX_TYPE_KV);
private static final int STORE_NULLS_INDEX = TABLE_KV_COLUMNS.indexOf(STORE_NULLS_KV);
+ private static final int BASE_COLUMN_COUNT_INDEX = TABLE_KV_COLUMNS.indexOf(BASE_COLUMN_COUNT_KV);
// KeyValues for Column
- private static final KeyValue DECIMAL_DIGITS_KV = KeyValue.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, DECIMAL_DIGITS_BYTES);
- private static final KeyValue COLUMN_SIZE_KV = KeyValue.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, COLUMN_SIZE_BYTES);
- private static final KeyValue NULLABLE_KV = KeyValue.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, NULLABLE_BYTES);
- private static final KeyValue DATA_TYPE_KV = KeyValue.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, DATA_TYPE_BYTES);
- private static final KeyValue ORDINAL_POSITION_KV = KeyValue.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, ORDINAL_POSITION_BYTES);
- private static final KeyValue SORT_ORDER_KV = KeyValue.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, SORT_ORDER_BYTES);
- private static final KeyValue ARRAY_SIZE_KV = KeyValue.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, ARRAY_SIZE_BYTES);
- private static final KeyValue VIEW_CONSTANT_KV = KeyValue.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, VIEW_CONSTANT_BYTES);
- private static final KeyValue IS_VIEW_REFERENCED_KV = KeyValue.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, IS_VIEW_REFERENCED_BYTES);
- private static final KeyValue COLUMN_DEF_KV = KeyValue.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, COLUMN_DEF_BYTES);
+ private static final KeyValue DECIMAL_DIGITS_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, DECIMAL_DIGITS_BYTES);
+ private static final KeyValue COLUMN_SIZE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, COLUMN_SIZE_BYTES);
+ private static final KeyValue NULLABLE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, NULLABLE_BYTES);
+ private static final KeyValue DATA_TYPE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, DATA_TYPE_BYTES);
+ private static final KeyValue ORDINAL_POSITION_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, ORDINAL_POSITION_BYTES);
+ private static final KeyValue SORT_ORDER_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, SORT_ORDER_BYTES);
+ private static final KeyValue ARRAY_SIZE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, ARRAY_SIZE_BYTES);
+ private static final KeyValue VIEW_CONSTANT_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, VIEW_CONSTANT_BYTES);
+ private static final KeyValue IS_VIEW_REFERENCED_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, IS_VIEW_REFERENCED_BYTES);
+ private static final KeyValue COLUMN_DEF_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, COLUMN_DEF_BYTES);
private static final List<KeyValue> COLUMN_KV_COLUMNS = Arrays.<KeyValue>asList(
DECIMAL_DIGITS_KV,
COLUMN_SIZE_KV,
@@ -303,16 +310,16 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
private static final int LINK_TYPE_INDEX = 0;
- private static final KeyValue CLASS_NAME_KV = KeyValue.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, CLASS_NAME_BYTES);
- private static final KeyValue JAR_PATH_KV = KeyValue.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, JAR_PATH_BYTES);
- private static final KeyValue RETURN_TYPE_KV = KeyValue.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, RETURN_TYPE_BYTES);
- private static final KeyValue NUM_ARGS_KV = KeyValue.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, NUM_ARGS_BYTES);
- private static final KeyValue TYPE_KV = KeyValue.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, TYPE_BYTES);
- private static final KeyValue IS_CONSTANT_KV = KeyValue.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, IS_CONSTANT_BYTES);
- private static final KeyValue DEFAULT_VALUE_KV = KeyValue.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, DEFAULT_VALUE_BYTES);
- private static final KeyValue MIN_VALUE_KV = KeyValue.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, MIN_VALUE_BYTES);
- private static final KeyValue MAX_VALUE_KV = KeyValue.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, MAX_VALUE_BYTES);
- private static final KeyValue IS_ARRAY_KV = KeyValue.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, IS_ARRAY_BYTES);
+ private static final KeyValue CLASS_NAME_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, CLASS_NAME_BYTES);
+ private static final KeyValue JAR_PATH_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, JAR_PATH_BYTES);
+ private static final KeyValue RETURN_TYPE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, RETURN_TYPE_BYTES);
+ private static final KeyValue NUM_ARGS_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, NUM_ARGS_BYTES);
+ private static final KeyValue TYPE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, TYPE_BYTES);
+ private static final KeyValue IS_CONSTANT_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, IS_CONSTANT_BYTES);
+ private static final KeyValue DEFAULT_VALUE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, DEFAULT_VALUE_BYTES);
+ private static final KeyValue MIN_VALUE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, MIN_VALUE_BYTES);
+ private static final KeyValue MAX_VALUE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, MAX_VALUE_BYTES);
+ private static final KeyValue IS_ARRAY_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, IS_ARRAY_BYTES);
private static final List<KeyValue> FUNCTION_KV_COLUMNS = Arrays.<KeyValue>asList(
EMPTY_KEYVALUE_KV,
@@ -400,7 +407,6 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
byte[] tableName = request.getTableName().toByteArray();
byte[] key = SchemaUtil.getTableKey(tenantId, schemaName, tableName);
long tableTimeStamp = request.getTableTimestamp();
-
try {
// TODO: check that key is within region.getStartKey() and region.getEndKey()
// and return special code to force client to lookup region from meta.
@@ -766,7 +772,9 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
Short viewIndexId = viewIndexIdKv == null ? null : (Short)MetaDataUtil.getViewIndexIdDataType().getCodec().decodeShort(viewIndexIdKv.getValueArray(), viewIndexIdKv.getValueOffset(), SortOrder.getDefault());
Cell indexTypeKv = tableKeyValues[INDEX_TYPE_INDEX];
IndexType indexType = indexTypeKv == null ? null : IndexType.fromSerializedValue(indexTypeKv.getValueArray()[indexTypeKv.getValueOffset()]);
-
+ Cell baseColumnCountKv = tableKeyValues[BASE_COLUMN_COUNT_INDEX];
+ int baseColumnCount = baseColumnCountKv == null ? 0 : PInteger.INSTANCE.getCodec().decodeInt(baseColumnCountKv.getValueArray(),
+ baseColumnCountKv.getValueOffset(), SortOrder.getDefault());
List<PColumn> columns = Lists.newArrayListWithExpectedSize(columnCount);
List<PTable> indexes = new ArrayList<PTable>();
@@ -811,7 +819,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
return PTableImpl.makePTable(tenantId, schemaName, tableName, tableType, indexState, timeStamp,
tableSeqNum, pkName, saltBucketNum, columns, tableType == INDEX ? schemaName : null,
tableType == INDEX ? dataTableName : null, indexes, isImmutableRows, physicalTables, defaultFamilyName, viewStatement,
- disableWAL, multiTenant, storeNulls, viewType, viewIndexId, indexType, stats);
+ disableWAL, multiTenant, storeNulls, viewType, viewIndexId, indexType, stats, baseColumnCount);
}
private PFunction getFunction(RegionScanner scanner)
@@ -1161,14 +1169,16 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
locks.add(rowLock);
}
- protected static final byte[] PHYSICAL_TABLE_BYTES = new byte[] {PTable.LinkType.PHYSICAL_TABLE.getSerializedValue()};
+ private static final byte[] PHYSICAL_TABLE_BYTES = new byte[] {PTable.LinkType.PHYSICAL_TABLE.getSerializedValue()};
+ private static final byte[] PARENT_TABLE_BYTES = new byte[] {PTable.LinkType.PARENT_TABLE.getSerializedValue()};
+
/**
* @param tableName parent table's name
* Looks for whether child views exist for the table specified by table.
* TODO: should we pass a timestamp here?
*/
@SuppressWarnings("deprecation")
- private TableViewFinderResult findChildViews(Region region, byte[] tenantId, PTable table) throws IOException {
+ private TableViewFinderResult findChildViews(Region region, byte[] tenantId, PTable table, byte[] linkTypeBytes) throws IOException {
byte[] schemaName = table.getSchemaName().getBytes();
byte[] tableName = table.getTableName().getBytes();
boolean isMultiTenant = table.isMultiTenant();
@@ -1182,13 +1192,15 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
scan.setStartRow(startRow);
scan.setStopRow(stopRow);
}
- SingleColumnValueFilter linkFilter = new SingleColumnValueFilter(TABLE_FAMILY_BYTES, LINK_TYPE_BYTES, CompareOp.EQUAL, PHYSICAL_TABLE_BYTES);
+ SingleColumnValueFilter linkFilter = new SingleColumnValueFilter(TABLE_FAMILY_BYTES, LINK_TYPE_BYTES, CompareOp.EQUAL, linkTypeBytes);
linkFilter.setFilterIfMissing(true);
byte[] suffix = ByteUtil.concat(QueryConstants.SEPARATOR_BYTE_ARRAY, SchemaUtil.getTableNameAsBytes(schemaName, tableName));
SuffixFilter rowFilter = new SuffixFilter(suffix);
Filter filter = new FilterList(linkFilter, rowFilter);
scan.setFilter(filter);
scan.addColumn(TABLE_FAMILY_BYTES, LINK_TYPE_BYTES);
+ scan.addColumn(TABLE_FAMILY_BYTES, TABLE_SEQ_NUM_BYTES);
+
// Original region-only scanner modified due to PHOENIX-1208
// RegionScanner scanner = region.getScanner(scan);
// The following *should* work, but doesn't due to HBASE-11837
@@ -1354,7 +1366,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
}
// Handle any child views that exist
- TableViewFinderResult tableViewFinderResult = findChildViews(region, tenantId, table);
+ TableViewFinderResult tableViewFinderResult = findChildViews(region, tenantId, table, PHYSICAL_TABLE_BYTES);
if (tableViewFinderResult.hasViews()) {
if (isCascade) {
if (tableViewFinderResult.allViewsInMultipleRegions()) {
@@ -1438,7 +1450,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
private static interface ColumnMutator {
MetaDataMutationResult updateMutation(PTable table, byte[][] rowKeyMetaData,
List<Mutation> tableMetadata, Region region,
- List<ImmutableBytesPtr> invalidateList, List<RowLock> locks) throws IOException,
+ List<ImmutableBytesPtr> invalidateList, List<RowLock> locks, long clientTimeStamp) throws IOException,
SQLException;
}
@@ -1521,24 +1533,24 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
EnvironmentEdgeManager.currentTimeMillis(), null);
} else {
// server-side, except for indexing, we always expect the keyvalues to be standard KeyValues
- PTableType expectedType = MetaDataUtil.getTableType(tableMetadata, GenericKeyValueBuilder.INSTANCE, new ImmutableBytesPtr());
+ PTableType expectedType = MetaDataUtil.getTableType(tableMetadata, GenericKeyValueBuilder.INSTANCE,
+ new ImmutableBytesPtr());
// We said to drop a table, but found a view or visa versa
- if (type != expectedType) {
- return new MetaDataMutationResult(MutationCode.TABLE_NOT_FOUND, EnvironmentEdgeManager.currentTimeMillis(), null);
- }
- if (findChildViews(region, tenantId, table).hasViews()) {
- // Disallow any column mutations for parents of tenant tables
- return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION, EnvironmentEdgeManager.currentTimeMillis(), null);
+ if (type != expectedType) { return new MetaDataMutationResult(MutationCode.TABLE_NOT_FOUND,
+ EnvironmentEdgeManager.currentTimeMillis(), null); }
+ if (table.getBaseColumnCount() == 0) {
+ // If the base column count hasn't been set, then it means that the upgrade
+ // to 4.5.0 is in progress. Have the client retry the mutation operation.
+ return new MetaDataMutationResult(MutationCode.CONCURRENT_TABLE_MUTATION,
+ EnvironmentEdgeManager.currentTimeMillis(), table);
}
}
result = mutator.updateMutation(table, rowKeyMetaData, tableMetadata, region,
- invalidateList, locks);
+ invalidateList, locks, clientTimeStamp);
if (result != null) {
return result;
}
-
- region.mutateRowsWithLocks(tableMetadata, Collections.<byte[]> emptySet(), HConstants.NO_NONCE,
- HConstants.NO_NONCE);
+ region.mutateRowsWithLocks(tableMetadata, Collections.<byte[]> emptySet(), HConstants.NO_NONCE, HConstants.NO_NONCE);
// Invalidate from cache
for (ImmutableBytesPtr invalidateKey : invalidateList) {
metaDataCache.invalidate(invalidateKey);
@@ -1557,6 +1569,85 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
}
}
+ private void addRowsToChildViews(List<Mutation> tableMetadata, List<Mutation> mutationsForAddingColumnsToViews, byte[] schemaName, byte[] tableName,
+ List<ImmutableBytesPtr> invalidateList, long clientTimeStamp, TableViewFinderResult childViewsResult,
+ Region region, List<RowLock> locks) throws IOException, SQLException {
+ for (Result viewResult : childViewsResult.getResults()) {
+ byte[][] rowViewKeyMetaData = new byte[3][];
+ getVarChars(viewResult.getRow(), 3, rowViewKeyMetaData);
+ byte[] viewTenantId = rowViewKeyMetaData[PhoenixDatabaseMetaData.TENANT_ID_INDEX];
+ byte[] viewSchemaName = rowViewKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX];
+ byte[] viewName = rowViewKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX];
+ byte[] viewKey = SchemaUtil.getTableKey(viewTenantId, viewSchemaName, viewName);
+ PTable view = doGetTable(viewKey, clientTimeStamp);
+
+ if (view.getBaseColumnCount() == QueryConstants.DIVORCED_VIEW_BASE_COLUMN_COUNT) {
+ // if a view has divorced itself from the base table, we don't allow schema changes
+ // to be propagated to it.
+ return;
+ }
+ // lock the rows corresponding to views so that no other thread can modify the view meta-data
+ acquireLock(region, viewKey, locks);
+
+ int deltaNumberOfColumns = 0;
+
+ for (Mutation m : tableMetadata) {
+ byte[][] rkmd = new byte[5][];
+ int pkCount = getVarChars(m.getRow(), rkmd);
+ if (m instanceof Put && pkCount > COLUMN_NAME_INDEX
+ && Bytes.compareTo(schemaName, rkmd[SCHEMA_NAME_INDEX]) == 0
+ && Bytes.compareTo(tableName, rkmd[TABLE_NAME_INDEX]) == 0) {
+ Put p = (Put)m;
+
+ byte[] k = ByteUtil.concat(viewKey, QueryConstants.SEPARATOR_BYTE_ARRAY, rkmd[COLUMN_NAME_INDEX],
+ QueryConstants.SEPARATOR_BYTE_ARRAY, rkmd[FAMILY_NAME_INDEX]);
+ Put viewColumnDefinitionPut = new Put(k, clientTimeStamp);
+ for (Cell cell : p.getFamilyCellMap().values().iterator().next()) {
+ viewColumnDefinitionPut.add(CellUtil.createCell(k, CellUtil.cloneFamily(cell),
+ CellUtil.cloneQualifier(cell), cell.getTimestamp(), cell.getTypeByte(),
+ CellUtil.cloneValue(cell)));
+ }
+ deltaNumberOfColumns++;
+ mutationsForAddingColumnsToViews.add(viewColumnDefinitionPut);
+ }
+ }
+
+ int oldBaseColumnCount = view.getBaseColumnCount();
+
+ Put viewHeaderRowPut = new Put(viewKey, clientTimeStamp);
+ byte[] baseColumnCountPtr = new byte[PInteger.INSTANCE.getByteSize()];
+ PInteger.INSTANCE.getCodec().encodeInt(oldBaseColumnCount + deltaNumberOfColumns, baseColumnCountPtr, 0);
+ byte[] columnCountPtr = new byte[PInteger.INSTANCE.getByteSize()];
+ PInteger.INSTANCE.getCodec().encodeInt(view.getColumns().size() + deltaNumberOfColumns, columnCountPtr, 0);
+ byte[] viewSequencePtr = new byte[PLong.INSTANCE.getByteSize()];
+ PLong.INSTANCE.getCodec().encodeLong(view.getSequenceNumber() + 1, viewSequencePtr, 0);
+ viewHeaderRowPut.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+ PhoenixDatabaseMetaData.COLUMN_COUNT_BYTES, clientTimeStamp, columnCountPtr);
+ viewHeaderRowPut.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+ PhoenixDatabaseMetaData.BASE_COLUMN_COUNT_BYTES, clientTimeStamp, baseColumnCountPtr);
+ viewHeaderRowPut.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+ PhoenixDatabaseMetaData.TABLE_SEQ_NUM_BYTES, clientTimeStamp, viewSequencePtr);
+ mutationsForAddingColumnsToViews.add(viewHeaderRowPut);
+
+ // Update positions of view columns
+ for (PColumn column : view.getColumns()) {
+ if (column.getPosition() >= oldBaseColumnCount) {
+ int newPosition = column.getPosition() + deltaNumberOfColumns + 1;
+
+ byte[] k = ByteUtil.concat(viewKey, QueryConstants.SEPARATOR_BYTE_ARRAY, column.getName()
+ .getBytes(), QueryConstants.SEPARATOR_BYTE_ARRAY, column.getFamilyName() != null ? column.getFamilyName().getBytes() : null);
+
+ Put positionUpdatePut = new Put(k, clientTimeStamp);
+ byte[] ptr = new byte[PInteger.INSTANCE.getByteSize()];
+ PInteger.INSTANCE.getCodec().encodeInt(newPosition, ptr, 0);
+ positionUpdatePut.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+ PhoenixDatabaseMetaData.ORDINAL_POSITION_BYTES, clientTimeStamp, ptr);
+ mutationsForAddingColumnsToViews.add(positionUpdatePut);
+ }
+ }
+ invalidateList.add(new ImmutableBytesPtr(viewKey));
+ }
+ }
@Override
public void addColumn(RpcController controller, AddColumnRequest request,
@@ -1566,11 +1657,29 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
MetaDataMutationResult result = mutateColumn(tableMetaData, new ColumnMutator() {
@Override
public MetaDataMutationResult updateMutation(PTable table, byte[][] rowKeyMetaData,
- List<Mutation> tableMetaData, Region region,
- List<ImmutableBytesPtr> invalidateList, List<RowLock> locks) {
+ List<Mutation> tableMetaData, Region region, List<ImmutableBytesPtr> invalidateList,
+ List<RowLock> locks, long clientTimeStamp) throws IOException, SQLException {
byte[] tenantId = rowKeyMetaData[TENANT_ID_INDEX];
byte[] schemaName = rowKeyMetaData[SCHEMA_NAME_INDEX];
byte[] tableName = rowKeyMetaData[TABLE_NAME_INDEX];
+ PTableType type = table.getType();
+ TableViewFinderResult childViewsResult = findChildViews(region, tenantId, table,
+ (type == PTableType.VIEW ? PARENT_TABLE_BYTES : PHYSICAL_TABLE_BYTES));
+ List<Mutation> mutationsForAddingColumnsToViews = Collections.emptyList();
+ if (childViewsResult.hasViews()) {
+ /*
+ * Adding a column is not allowed if: 1) Meta-data for child view/s spans over more than one
+ * region. 2) Adding column to a views that has child view/s.
+ */
+ if (!childViewsResult.allViewsInSingleRegion() || type == PTableType.VIEW) {
+ return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION,
+ EnvironmentEdgeManager.currentTimeMillis(), null);
+ } else {
+ mutationsForAddingColumnsToViews = new ArrayList<>(childViewsResult.getResults().size() * tableMetaData.size());
+ addRowsToChildViews(tableMetaData, mutationsForAddingColumnsToViews, schemaName, tableName, invalidateList, clientTimeStamp,
+ childViewsResult, region, locks);
+ }
+ }
for (Mutation m : tableMetaData) {
byte[] key = m.getRow();
boolean addingPKColumn = false;
@@ -1613,6 +1722,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
}
}
}
+ tableMetaData.addAll(mutationsForAddingColumnsToViews);
return null;
}
});
@@ -1754,19 +1864,27 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
try {
tableMetaData = ProtobufUtil.getMutations(request);
- final long clientTimeStamp = MetaDataUtil.getClientTimeStamp(tableMetaData);
final List<byte[]> tableNamesToDelete = Lists.newArrayList();
MetaDataMutationResult result = mutateColumn(tableMetaData, new ColumnMutator() {
@Override
public MetaDataMutationResult updateMutation(PTable table, byte[][] rowKeyMetaData,
List<Mutation> tableMetaData, Region region,
- List<ImmutableBytesPtr> invalidateList, List<RowLock> locks)
+ List<ImmutableBytesPtr> invalidateList, List<RowLock> locks, long clientTimeStamp)
throws IOException, SQLException {
byte[] tenantId = rowKeyMetaData[TENANT_ID_INDEX];
byte[] schemaName = rowKeyMetaData[SCHEMA_NAME_INDEX];
byte[] tableName = rowKeyMetaData[TABLE_NAME_INDEX];
boolean deletePKColumn = false;
List<Mutation> additionalTableMetaData = Lists.newArrayList();
+
+ PTableType type = table.getType();
+ TableViewFinderResult childViewsResult = findChildViews(region, tenantId, table,
+ (type == PTableType.VIEW ? PARENT_TABLE_BYTES : PHYSICAL_TABLE_BYTES));
+ if (childViewsResult.hasViews()) {
+ return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION, EnvironmentEdgeManager
+ .currentTimeMillis(), null);
+ }
+
for (Mutation m : tableMetaData) {
if (m instanceof Delete) {
byte[] key = m.getRow();
@@ -1791,6 +1909,26 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
} else {
continue;
}
+ if (table.getType() == PTableType.VIEW) {
+ if (table.getBaseColumnCount() != DIVORCED_VIEW_BASE_COLUMN_COUNT
+ && columnToDelete.getPosition() < table.getBaseColumnCount()) {
+ /*
+ * If the column being dropped is inherited from the base table, then the
+ * view is about to divorce itself from the base table. Divorce here means
+ * that any further meta-data changes made to the base table will not be
+ * propagated to the hierarchy of views on the base table.
+ */
+ byte[] viewKey = SchemaUtil.getTableKey(tenantId, schemaName, tableName);
+ Put updateBaseColumnCountPut = new Put(viewKey);
+ byte[] baseColumnCountPtr = new byte[PInteger.INSTANCE.getByteSize()];
+ PInteger.INSTANCE.getCodec().encodeInt(DIVORCED_VIEW_BASE_COLUMN_COUNT,
+ baseColumnCountPtr, 0);
+ updateBaseColumnCountPut.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+ PhoenixDatabaseMetaData.BASE_COLUMN_COUNT_BYTES, clientTimeStamp,
+ baseColumnCountPtr);
+ additionalTableMetaData.add(updateBaseColumnCountPut);
+ }
+ }
if (columnToDelete.isViewReferenced()) { // Disallow deletion of column referenced in WHERE clause of view
return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION, EnvironmentEdgeManager.currentTimeMillis(), table, columnToDelete);
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/e78eb6fa/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
index 3867e00..9009e7c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
@@ -62,8 +62,7 @@ public abstract class MetaDataProtocol extends MetaDataService {
public static final long MIN_TABLE_TIMESTAMP = 0;
- // Incremented from 5 to 7 with the addition of the STORE_NULLS table option in 4.3
- public static final long MIN_SYSTEM_TABLE_TIMESTAMP = MIN_TABLE_TIMESTAMP + 7;
+ public static final long MIN_SYSTEM_TABLE_TIMESTAMP = MIN_TABLE_TIMESTAMP + 8;
public static final int DEFAULT_MAX_META_DATA_VERSIONS = 1000;
public static final int DEFAULT_MAX_STAT_DATA_VERSIONS = 3;
public static final boolean DEFAULT_META_DATA_KEEP_DELETED_CELLS = true;
@@ -73,6 +72,7 @@ public abstract class MetaDataProtocol extends MetaDataService {
public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_2_0 = MIN_TABLE_TIMESTAMP + 4;
public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_2_1 = MIN_TABLE_TIMESTAMP + 5;
public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_3_0 = MIN_TABLE_TIMESTAMP + 7;
+ public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_5_0 = MIN_TABLE_TIMESTAMP + 8;
// TODO: pare this down to minimum, as we don't need duplicates for both table and column errors, nor should we need
// a different code for every type of error.
http://git-wip-us.apache.org/repos/asf/phoenix/blob/e78eb6fa/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/PTableProtos.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/PTableProtos.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/PTableProtos.java
index 7d389ac..dd6e303 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/PTableProtos.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/PTableProtos.java
@@ -3108,6 +3108,16 @@ public final class PTableProtos {
* <code>optional bool storeNulls = 24;</code>
*/
boolean getStoreNulls();
+
+ // optional int32 baseColumnCount = 25;
+ /**
+ * <code>optional int32 baseColumnCount = 25;</code>
+ */
+ boolean hasBaseColumnCount();
+ /**
+ * <code>optional int32 baseColumnCount = 25;</code>
+ */
+ int getBaseColumnCount();
}
/**
* Protobuf type {@code PTable}
@@ -3298,6 +3308,11 @@ public final class PTableProtos {
storeNulls_ = input.readBool();
break;
}
+ case 200: {
+ bitField0_ |= 0x00100000;
+ baseColumnCount_ = input.readInt32();
+ break;
+ }
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -3828,6 +3843,22 @@ public final class PTableProtos {
return storeNulls_;
}
+ // optional int32 baseColumnCount = 25;
+ public static final int BASECOLUMNCOUNT_FIELD_NUMBER = 25;
+ private int baseColumnCount_;
+ /**
+ * <code>optional int32 baseColumnCount = 25;</code>
+ */
+ public boolean hasBaseColumnCount() {
+ return ((bitField0_ & 0x00100000) == 0x00100000);
+ }
+ /**
+ * <code>optional int32 baseColumnCount = 25;</code>
+ */
+ public int getBaseColumnCount() {
+ return baseColumnCount_;
+ }
+
private void initFields() {
schemaNameBytes_ = com.google.protobuf.ByteString.EMPTY;
tableNameBytes_ = com.google.protobuf.ByteString.EMPTY;
@@ -3853,6 +3884,7 @@ public final class PTableProtos {
indexType_ = com.google.protobuf.ByteString.EMPTY;
statsTimeStamp_ = 0L;
storeNulls_ = false;
+ baseColumnCount_ = 0;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
@@ -3992,6 +4024,9 @@ public final class PTableProtos {
if (((bitField0_ & 0x00080000) == 0x00080000)) {
output.writeBool(24, storeNulls_);
}
+ if (((bitField0_ & 0x00100000) == 0x00100000)) {
+ output.writeInt32(25, baseColumnCount_);
+ }
getUnknownFields().writeTo(output);
}
@@ -4102,6 +4137,10 @@ public final class PTableProtos {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(24, storeNulls_);
}
+ if (((bitField0_ & 0x00100000) == 0x00100000)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(25, baseColumnCount_);
+ }
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
@@ -4233,6 +4272,11 @@ public final class PTableProtos {
result = result && (getStoreNulls()
== other.getStoreNulls());
}
+ result = result && (hasBaseColumnCount() == other.hasBaseColumnCount());
+ if (hasBaseColumnCount()) {
+ result = result && (getBaseColumnCount()
+ == other.getBaseColumnCount());
+ }
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
@@ -4342,6 +4386,10 @@ public final class PTableProtos {
hash = (37 * hash) + STORENULLS_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getStoreNulls());
}
+ if (hasBaseColumnCount()) {
+ hash = (37 * hash) + BASECOLUMNCOUNT_FIELD_NUMBER;
+ hash = (53 * hash) + getBaseColumnCount();
+ }
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
@@ -4514,6 +4562,8 @@ public final class PTableProtos {
bitField0_ = (bitField0_ & ~0x00400000);
storeNulls_ = false;
bitField0_ = (bitField0_ & ~0x00800000);
+ baseColumnCount_ = 0;
+ bitField0_ = (bitField0_ & ~0x01000000);
return this;
}
@@ -4654,6 +4704,10 @@ public final class PTableProtos {
to_bitField0_ |= 0x00080000;
}
result.storeNulls_ = storeNulls_;
+ if (((from_bitField0_ & 0x01000000) == 0x01000000)) {
+ to_bitField0_ |= 0x00100000;
+ }
+ result.baseColumnCount_ = baseColumnCount_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
@@ -4820,6 +4874,9 @@ public final class PTableProtos {
if (other.hasStoreNulls()) {
setStoreNulls(other.getStoreNulls());
}
+ if (other.hasBaseColumnCount()) {
+ setBaseColumnCount(other.getBaseColumnCount());
+ }
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
@@ -6424,6 +6481,39 @@ public final class PTableProtos {
return this;
}
+ // optional int32 baseColumnCount = 25;
+ private int baseColumnCount_ ;
+ /**
+ * <code>optional int32 baseColumnCount = 25;</code>
+ */
+ public boolean hasBaseColumnCount() {
+ return ((bitField0_ & 0x01000000) == 0x01000000);
+ }
+ /**
+ * <code>optional int32 baseColumnCount = 25;</code>
+ */
+ public int getBaseColumnCount() {
+ return baseColumnCount_;
+ }
+ /**
+ * <code>optional int32 baseColumnCount = 25;</code>
+ */
+ public Builder setBaseColumnCount(int value) {
+ bitField0_ |= 0x01000000;
+ baseColumnCount_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional int32 baseColumnCount = 25;</code>
+ */
+ public Builder clearBaseColumnCount() {
+ bitField0_ = (bitField0_ & ~0x01000000);
+ baseColumnCount_ = 0;
+ onChanged();
+ return this;
+ }
+
// @@protoc_insertion_point(builder_scope:PTable)
}
@@ -6470,7 +6560,7 @@ public final class PTableProtos {
"values\030\002 \003(\014\022\033\n\023guidePostsByteCount\030\003 \001(" +
"\003\022\025\n\rkeyBytesCount\030\004 \001(\003\022\027\n\017guidePostsCo",
"unt\030\005 \001(\005\022!\n\013pGuidePosts\030\006 \001(\0132\014.PGuideP" +
- "osts\"\266\004\n\006PTable\022\027\n\017schemaNameBytes\030\001 \002(\014" +
+ "osts\"\317\004\n\006PTable\022\027\n\017schemaNameBytes\030\001 \002(\014" +
"\022\026\n\016tableNameBytes\030\002 \002(\014\022\036\n\ttableType\030\003 " +
"\002(\0162\013.PTableType\022\022\n\nindexState\030\004 \001(\t\022\026\n\016" +
"sequenceNumber\030\005 \002(\003\022\021\n\ttimeStamp\030\006 \002(\003\022" +
@@ -6484,10 +6574,11 @@ public final class PTableProtos {
"nt\030\022 \001(\014\022\025\n\rphysicalNames\030\023 \003(\014\022\020\n\010tenan" +
"tId\030\024 \001(\014\022\023\n\013viewIndexId\030\025 \001(\005\022\021\n\tindexT" +
"ype\030\026 \001(\014\022\026\n\016statsTimeStamp\030\027 \001(\003\022\022\n\nsto" +
- "reNulls\030\030 \001(\010*A\n\nPTableType\022\n\n\006SYSTEM\020\000\022" +
- "\010\n\004USER\020\001\022\010\n\004VIEW\020\002\022\t\n\005INDEX\020\003\022\010\n\004JOIN\020\004" +
- "B@\n(org.apache.phoenix.coprocessor.gener" +
- "atedB\014PTableProtosH\001\210\001\001\240\001\001"
+ "reNulls\030\030 \001(\010\022\027\n\017baseColumnCount\030\031 \001(\005*A" +
+ "\n\nPTableType\022\n\n\006SYSTEM\020\000\022\010\n\004USER\020\001\022\010\n\004VI" +
+ "EW\020\002\022\t\n\005INDEX\020\003\022\010\n\004JOIN\020\004B@\n(org.apache." +
+ "phoenix.coprocessor.generatedB\014PTablePro" +
+ "tosH\001\210\001\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -6511,7 +6602,7 @@ public final class PTableProtos {
internal_static_PTable_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_PTable_descriptor,
- new java.lang.String[] { "SchemaNameBytes", "TableNameBytes", "TableType", "IndexState", "SequenceNumber", "TimeStamp", "PkNameBytes", "BucketNum", "Columns", "Indexes", "IsImmutableRows", "GuidePosts", "DataTableNameBytes", "DefaultFamilyName", "DisableWAL", "MultiTenant", "ViewType", "ViewStatement", "PhysicalNames", "TenantId", "ViewIndexId", "IndexType", "StatsTimeStamp", "StoreNulls", });
+ new java.lang.String[] { "SchemaNameBytes", "TableNameBytes", "TableType", "IndexState", "SequenceNumber", "TimeStamp", "PkNameBytes", "BucketNum", "Columns", "Indexes", "IsImmutableRows", "GuidePosts", "DataTableNameBytes", "DefaultFamilyName", "DisableWAL", "MultiTenant", "ViewType", "ViewStatement", "PhysicalNames", "TenantId", "ViewIndexId", "IndexType", "StatsTimeStamp", "StoreNulls", "BaseColumnCount", });
return null;
}
};
[26/47] phoenix git commit: PHOENIX-2072 - (James Taylor) Fix Eclipse
compiler errors in pherf module
Posted by ma...@apache.org.
PHOENIX-2072 - (James Taylor) Fix Eclipse compiler errors in pherf module
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/c1e57235
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/c1e57235
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/c1e57235
Branch: refs/heads/calcite
Commit: c1e57235b81af4fe847394f8288c9b679ee2d54f
Parents: 3e49339
Author: cmarcel <cm...@salesforce.com>
Authored: Fri Jun 26 09:07:00 2015 -0700
Committer: cmarcel <cm...@salesforce.com>
Committed: Fri Jun 26 09:07:00 2015 -0700
----------------------------------------------------------------------
.../pherf/workload/MultithreadedDiffer.java | 7 ++----
.../apache/phoenix/pherf/RuleGeneratorTest.java | 26 ++++++++++++++------
2 files changed, 20 insertions(+), 13 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c1e57235/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/MultithreadedDiffer.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/MultithreadedDiffer.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/MultithreadedDiffer.java
index 1735754..91189e2 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/MultithreadedDiffer.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/MultithreadedDiffer.java
@@ -21,20 +21,18 @@ package org.apache.phoenix.pherf.workload;
import java.util.Calendar;
import java.util.Date;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
import org.apache.phoenix.pherf.PherfConstants;
import org.apache.phoenix.pherf.configuration.Query;
import org.apache.phoenix.pherf.result.RunTime;
import org.apache.phoenix.pherf.result.ThreadTime;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
class MultithreadedDiffer implements Runnable {
private static final Logger logger = LoggerFactory.getLogger(MultiThreadedRunner.class);
private Thread t;
private Query query;
private ThreadTime threadTime;
- private String threadName;
private long numberOfExecutions;
private long executionDurationInMs;
private QueryVerifier queryVerifier = new QueryVerifier(true);
@@ -72,7 +70,6 @@ class MultithreadedDiffer implements Runnable {
MultithreadedDiffer(String threadName, Query query, ThreadTime threadTime,
long numberOfExecutions, long executionDurationInMs) {
this.query = query;
- this.threadName = threadName;
this.threadTime = threadTime;
this.numberOfExecutions = numberOfExecutions;
this.executionDurationInMs = executionDurationInMs;
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c1e57235/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/RuleGeneratorTest.java
----------------------------------------------------------------------
diff --git a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/RuleGeneratorTest.java b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/RuleGeneratorTest.java
index 92604d4..936eedb 100644
--- a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/RuleGeneratorTest.java
+++ b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/RuleGeneratorTest.java
@@ -18,22 +18,32 @@
package org.apache.phoenix.pherf;
-import org.apache.phoenix.pherf.configuration.*;
-import org.apache.phoenix.pherf.workload.WriteWorkload;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Set;
+import java.util.TreeSet;
+
+import org.apache.phoenix.pherf.configuration.Column;
+import org.apache.phoenix.pherf.configuration.DataModel;
+import org.apache.phoenix.pherf.configuration.DataSequence;
+import org.apache.phoenix.pherf.configuration.DataTypeMapping;
+import org.apache.phoenix.pherf.configuration.Scenario;
+import org.apache.phoenix.pherf.configuration.XMLConfigParser;
import org.apache.phoenix.pherf.rules.DataValue;
import org.apache.phoenix.pherf.rules.RulesApplier;
-import org.apache.phoenix.pherf.util.PhoenixUtil;
+import org.apache.phoenix.pherf.workload.WriteWorkload;
import org.joda.time.DateTime;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import org.junit.Test;
-import java.util.*;
-
-import static org.junit.Assert.*;
-
public class RuleGeneratorTest {
- private static PhoenixUtil util = PhoenixUtil.create(true);
private static final String matcherScenario = PherfConstants.SCENARIO_ROOT_PATTERN + ".xml";
@Test