You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by ja...@apache.org on 2014/10/29 01:01:56 UTC
[01/15] git commit: PHOENIX-1375 Remove references to incubation
Repository: phoenix
Updated Branches:
refs/heads/3.2 e7e00f991 -> 8b460b5c0
PHOENIX-1375 Remove references to incubation
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/f1cbcc45
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/f1cbcc45
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/f1cbcc45
Branch: refs/heads/3.2
Commit: f1cbcc4586ee93f683a126fabbf0b03c222666b2
Parents: e7e00f9
Author: Gabriel Reid <ga...@ngdata.com>
Authored: Thu Oct 23 21:53:47 2014 +0200
Committer: Gabriel Reid <ga...@ngdata.com>
Committed: Thu Oct 23 21:53:47 2014 +0200
----------------------------------------------------------------------
NOTICE | 2 +-
dev/release_files/NOTICE | 2 +-
pom.xml | 6 +++---
3 files changed, 5 insertions(+), 5 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/f1cbcc45/NOTICE
----------------------------------------------------------------------
diff --git a/NOTICE b/NOTICE
index ac3ec17..093ae02 100644
--- a/NOTICE
+++ b/NOTICE
@@ -1,4 +1,4 @@
-Apache Phoenix (Incubating)
+Apache Phoenix
Copyright 2014 The Apache Software Foundation
This product includes software developed at
http://git-wip-us.apache.org/repos/asf/phoenix/blob/f1cbcc45/dev/release_files/NOTICE
----------------------------------------------------------------------
diff --git a/dev/release_files/NOTICE b/dev/release_files/NOTICE
index 84869a6..a96a97c 100644
--- a/dev/release_files/NOTICE
+++ b/dev/release_files/NOTICE
@@ -1,4 +1,4 @@
-Apache Phoenix (Incubating)
+Apache Phoenix
Copyright 2014 The Apache Software Foundation
This product includes software developed at
http://git-wip-us.apache.org/repos/asf/phoenix/blob/f1cbcc45/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 145d528..214814a 100644
--- a/pom.xml
+++ b/pom.xml
@@ -59,9 +59,9 @@
</parent>
<scm>
- <connection>scm:git:http://git-wip-us.apache.org/repos/asf/incubator-phoenix.git</connection>
- <url>https://git-wip-us.apache.org/repos/asf/incubator-phoenix.git</url>
- <developerConnection>scm:git:https://git-wip-us.apache.org/repos/asf/incubator-phoenix.git</developerConnection>
+ <connection>scm:git:http://git-wip-us.apache.org/repos/asf/phoenix.git</connection>
+ <url>https://git-wip-us.apache.org/repos/asf/phoenix.git</url>
+ <developerConnection>scm:git:https://git-wip-us.apache.org/repos/asf/phoenix.git</developerConnection>
</scm>
<properties>
[09/15] git commit: PHOENIX-897 Quote parameters in psql.py
Posted by ja...@apache.org.
PHOENIX-897 Quote parameters in psql.py
Properly quote supplied command-line parameters in psql.py so that
it's possible to supply any character (including ones that have
special meanings in various shells) as parameters.
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/f4687599
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/f4687599
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/f4687599
Branch: refs/heads/3.2
Commit: f4687599173427d2b7e71b076aa1e763a062281b
Parents: ca6c08f
Author: Gabriel Reid <ga...@ngdata.com>
Authored: Mon Oct 27 10:57:43 2014 +0100
Committer: Gabriel Reid <ga...@ngdata.com>
Committed: Mon Oct 27 10:58:48 2014 +0100
----------------------------------------------------------------------
bin/psql.py | 8 +++++++-
1 file changed, 7 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/f4687599/bin/psql.py
----------------------------------------------------------------------
diff --git a/bin/psql.py b/bin/psql.py
index a8cbe31..34a95df 100755
--- a/bin/psql.py
+++ b/bin/psql.py
@@ -26,11 +26,17 @@ import phoenix_utils
phoenix_utils.setPath()
+if os.name == 'nt':
+ args = subprocess.list2cmdline(sys.argv[1:])
+else:
+ import pipes # pipes module isn't available on Windows
+ args = " ".join([pipes.quote(v) for v in sys.argv[1:]])
+
# HBase configuration folder path (where hbase-site.xml reside) for
# HBase/Phoenix client side property override
java_cmd = 'java -cp "' + phoenix_utils.hbase_conf_path + os.pathsep + phoenix_utils.phoenix_client_jar + \
'" -Dlog4j.configuration=file:' + \
os.path.join(phoenix_utils.current_dir, "log4j.properties") + \
- " org.apache.phoenix.util.PhoenixRuntime " + ' '.join(sys.argv[1:])
+ " org.apache.phoenix.util.PhoenixRuntime " + args
subprocess.call(java_cmd, shell=True)
[04/15] PHOENIX-944 Support derived tables in FROM clause that needs
extra steps of client-side aggregation or other processing
Posted by ja...@apache.org.
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2bdc33bc/phoenix-core/src/main/java/org/apache/phoenix/execute/TupleProjector.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/TupleProjector.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/TupleProjector.java
new file mode 100644
index 0000000..14b488d
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/TupleProjector.java
@@ -0,0 +1,275 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.execute;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.List;
+
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.io.WritableUtils;
+import org.apache.phoenix.compile.ColumnProjector;
+import org.apache.phoenix.compile.JoinCompiler.ProjectedPTableWrapper;
+import org.apache.phoenix.compile.RowProjector;
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.expression.ExpressionType;
+import org.apache.phoenix.schema.KeyValueSchema;
+import org.apache.phoenix.schema.KeyValueSchema.KeyValueSchemaBuilder;
+import org.apache.phoenix.schema.PColumn;
+import org.apache.phoenix.schema.ValueBitSet;
+import org.apache.phoenix.schema.tuple.BaseTuple;
+import org.apache.phoenix.schema.tuple.Tuple;
+import org.apache.phoenix.util.KeyValueUtil;
+import org.apache.phoenix.util.SchemaUtil;
+
+public class TupleProjector {
+ public static final byte[] VALUE_COLUMN_FAMILY = Bytes.toBytes("_v");
+ public static final byte[] VALUE_COLUMN_QUALIFIER = new byte[0];
+
+ private static final String SCAN_PROJECTOR = "scanProjector";
+
+ private final KeyValueSchema schema;
+ private final Expression[] expressions;
+ private ValueBitSet valueSet;
+ private final ImmutableBytesWritable ptr = new ImmutableBytesWritable();
+
+ public TupleProjector(RowProjector rowProjector) {
+ List<? extends ColumnProjector> columnProjectors = rowProjector.getColumnProjectors();
+ int count = columnProjectors.size();
+ KeyValueSchemaBuilder builder = new KeyValueSchemaBuilder(0);
+ expressions = new Expression[count];
+ for (int i = 0; i < count; i++) {
+ Expression expression = columnProjectors.get(i).getExpression();
+ builder.addField(expression);
+ expressions[i] = expression;
+ }
+ schema = builder.build();
+ valueSet = ValueBitSet.newInstance(schema);
+ }
+
+ public TupleProjector(ProjectedPTableWrapper projected) {
+ List<PColumn> columns = projected.getTable().getColumns();
+ expressions = new Expression[columns.size() - projected.getTable().getPKColumns().size()];
+ // we do not count minNullableIndex for we might do later merge.
+ KeyValueSchemaBuilder builder = new KeyValueSchemaBuilder(0);
+ int i = 0;
+ for (PColumn column : projected.getTable().getColumns()) {
+ if (!SchemaUtil.isPKColumn(column)) {
+ builder.addField(column);
+ expressions[i++] = projected.getSourceExpression(column);
+ }
+ }
+ schema = builder.build();
+ valueSet = ValueBitSet.newInstance(schema);
+ }
+
+ private TupleProjector(KeyValueSchema schema, Expression[] expressions) {
+ this.schema = schema;
+ this.expressions = expressions;
+ this.valueSet = ValueBitSet.newInstance(schema);
+ }
+
+ public void setValueBitSet(ValueBitSet bitSet) {
+ this.valueSet = bitSet;
+ }
+
+ public static void serializeProjectorIntoScan(Scan scan, TupleProjector projector) {
+ ByteArrayOutputStream stream = new ByteArrayOutputStream();
+ try {
+ DataOutputStream output = new DataOutputStream(stream);
+ projector.schema.write(output);
+ int count = projector.expressions.length;
+ WritableUtils.writeVInt(output, count);
+ for (int i = 0; i < count; i++) {
+ WritableUtils.writeVInt(output, ExpressionType.valueOf(projector.expressions[i]).ordinal());
+ projector.expressions[i].write(output);
+ }
+ scan.setAttribute(SCAN_PROJECTOR, stream.toByteArray());
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ } finally {
+ try {
+ stream.close();
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ }
+
+ public static TupleProjector deserializeProjectorFromScan(Scan scan) {
+ byte[] proj = scan.getAttribute(SCAN_PROJECTOR);
+ if (proj == null) {
+ return null;
+ }
+ ByteArrayInputStream stream = new ByteArrayInputStream(proj);
+ try {
+ DataInputStream input = new DataInputStream(stream);
+ KeyValueSchema schema = new KeyValueSchema();
+ schema.readFields(input);
+ int count = WritableUtils.readVInt(input);
+ Expression[] expressions = new Expression[count];
+ for (int i = 0; i < count; i++) {
+ int ordinal = WritableUtils.readVInt(input);
+ expressions[i] = ExpressionType.values()[ordinal].newInstance();
+ expressions[i].readFields(input);
+ }
+ return new TupleProjector(schema, expressions);
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ } finally {
+ try {
+ stream.close();
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ }
+ }
+
+ public static class ProjectedValueTuple extends BaseTuple {
+ private ImmutableBytesWritable keyPtr = new ImmutableBytesWritable();
+ private long timestamp;
+ private byte[] projectedValue;
+ private int bitSetLen;
+ private KeyValue keyValue;
+
+ private ProjectedValueTuple(byte[] keyBuffer, int keyOffset, int keyLength, long timestamp, byte[] projectedValue, int bitSetLen) {
+ this.keyPtr.set(keyBuffer, keyOffset, keyLength);
+ this.timestamp = timestamp;
+ this.projectedValue = projectedValue;
+ this.bitSetLen = bitSetLen;
+ }
+
+ public ImmutableBytesWritable getKeyPtr() {
+ return keyPtr;
+ }
+
+ public long getTimestamp() {
+ return timestamp;
+ }
+
+ public byte[] getProjectedValue() {
+ return projectedValue;
+ }
+
+ public int getBitSetLength() {
+ return bitSetLen;
+ }
+
+ @Override
+ public void getKey(ImmutableBytesWritable ptr) {
+ ptr.set(keyPtr.get(), keyPtr.getOffset(), keyPtr.getLength());
+ }
+
+ @Override
+ public KeyValue getValue(int index) {
+ if (index != 0) {
+ throw new IndexOutOfBoundsException(Integer.toString(index));
+ }
+ return getValue(VALUE_COLUMN_FAMILY, VALUE_COLUMN_QUALIFIER);
+ }
+
+ @Override
+ public KeyValue getValue(byte[] family, byte[] qualifier) {
+ if (keyValue == null) {
+ keyValue = KeyValueUtil.newKeyValue(keyPtr.get(), keyPtr.getOffset(), keyPtr.getLength(),
+ VALUE_COLUMN_FAMILY, VALUE_COLUMN_QUALIFIER, timestamp, projectedValue, 0, projectedValue.length);
+ }
+ return keyValue;
+ }
+
+ @Override
+ public boolean getValue(byte[] family, byte[] qualifier,
+ ImmutableBytesWritable ptr) {
+ ptr.set(projectedValue);
+ return true;
+ }
+
+ @Override
+ public boolean isImmutable() {
+ return true;
+ }
+
+ @Override
+ public int size() {
+ return 1;
+ }
+ }
+
+ public ProjectedValueTuple projectResults(Tuple tuple) {
+ byte[] bytesValue = schema.toBytes(tuple, getExpressions(), valueSet, ptr);
+ KeyValue base = tuple.getValue(0);
+ return new ProjectedValueTuple(base.getBuffer(), base.getRowOffset(), base.getRowLength(), base.getTimestamp(), bytesValue, valueSet.getEstimatedLength());
+ }
+
+ public static void decodeProjectedValue(Tuple tuple, ImmutableBytesWritable ptr) throws IOException {
+ boolean b = tuple.getValue(VALUE_COLUMN_FAMILY, VALUE_COLUMN_QUALIFIER, ptr);
+ if (!b)
+ throw new IOException("Trying to decode a non-projected value.");
+ }
+
+ public static ProjectedValueTuple mergeProjectedValue(ProjectedValueTuple dest, KeyValueSchema destSchema, ValueBitSet destBitSet,
+ Tuple src, KeyValueSchema srcSchema, ValueBitSet srcBitSet, int offset) throws IOException {
+ ImmutableBytesWritable destValue = new ImmutableBytesWritable(dest.getProjectedValue());
+ destBitSet.clear();
+ destBitSet.or(destValue);
+ int origDestBitSetLen = dest.getBitSetLength();
+ ImmutableBytesWritable srcValue = new ImmutableBytesWritable();
+ decodeProjectedValue(src, srcValue);
+ srcBitSet.clear();
+ srcBitSet.or(srcValue);
+ int origSrcBitSetLen = srcBitSet.getEstimatedLength();
+ for (int i = 0; i < srcBitSet.getMaxSetBit(); i++) {
+ if (srcBitSet.get(i)) {
+ destBitSet.set(offset + i);
+ }
+ }
+ int destBitSetLen = destBitSet.getEstimatedLength();
+ byte[] merged = new byte[destValue.getLength() - origDestBitSetLen + srcValue.getLength() - origSrcBitSetLen + destBitSetLen];
+ int o = Bytes.putBytes(merged, 0, destValue.get(), destValue.getOffset(), destValue.getLength() - origDestBitSetLen);
+ o = Bytes.putBytes(merged, o, srcValue.get(), srcValue.getOffset(), srcValue.getLength() - origSrcBitSetLen);
+ destBitSet.toBytes(merged, o);
+ ImmutableBytesWritable keyPtr = dest.getKeyPtr();
+ return new ProjectedValueTuple(keyPtr.get(), keyPtr.getOffset(), keyPtr.getLength(), dest.getTimestamp(), merged, destBitSetLen);
+ }
+
+ public KeyValueSchema getSchema() {
+ return schema;
+ }
+
+ public Expression[] getExpressions() {
+ return expressions;
+ }
+
+ public ValueBitSet getValueBitSet() {
+ return valueSet;
+ }
+
+ @Override
+ public String toString() {
+ return "TUPLE-PROJECTOR {" + Arrays.toString(expressions) + " ==> " + schema.toString() + "}";
+ }
+}
+
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2bdc33bc/phoenix-core/src/main/java/org/apache/phoenix/expression/ProjectedColumnExpression.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/ProjectedColumnExpression.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/ProjectedColumnExpression.java
index dcac849..10657e0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/ProjectedColumnExpression.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/ProjectedColumnExpression.java
@@ -22,8 +22,8 @@ import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.phoenix.execute.TupleProjector;
import org.apache.phoenix.expression.visitor.ExpressionVisitor;
-import org.apache.phoenix.join.TupleProjector;
import org.apache.phoenix.schema.KeyValueSchema;
import org.apache.phoenix.schema.KeyValueSchema.KeyValueSchemaBuilder;
import org.apache.phoenix.schema.PColumn;
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2bdc33bc/phoenix-core/src/main/java/org/apache/phoenix/expression/aggregator/DistinctValueClientAggregator.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/aggregator/DistinctValueClientAggregator.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/aggregator/DistinctValueClientAggregator.java
index fefb077..2af99ca 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/aggregator/DistinctValueClientAggregator.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/aggregator/DistinctValueClientAggregator.java
@@ -21,7 +21,6 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
import org.apache.phoenix.schema.PArrayDataType;
import org.apache.phoenix.schema.PDataType;
-import org.apache.phoenix.schema.PhoenixArray;
import org.apache.phoenix.schema.SortOrder;
import org.apache.phoenix.schema.tuple.Tuple;
@@ -37,15 +36,15 @@ public class DistinctValueClientAggregator extends DistinctValueWithCountClientA
@Override
public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) {
- if (buffer == null || buffer.length == 0) {
+ if (cachedResult == null) {
Object[] values = new Object[valueVsCount.size()];
int i = 0;
for (ImmutableBytesPtr key : valueVsCount.keySet()) {
values[i++] = valueType.toObject(key, sortOrder);
}
- PhoenixArray array = PArrayDataType.instantiatePhoenixArray(valueType, values);
- buffer = resultType.toBytes(array, sortOrder);
+ cachedResult = PArrayDataType.instantiatePhoenixArray(valueType, values);
}
+ buffer = resultType.toBytes(cachedResult, sortOrder);
ptr.set(buffer);
return true;
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2bdc33bc/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseGroupedAggregatingResultIterator.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseGroupedAggregatingResultIterator.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseGroupedAggregatingResultIterator.java
new file mode 100644
index 0000000..8fd36b3
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseGroupedAggregatingResultIterator.java
@@ -0,0 +1,105 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.iterate;
+
+import static org.apache.phoenix.query.QueryConstants.AGG_TIMESTAMP;
+import static org.apache.phoenix.query.QueryConstants.SINGLE_COLUMN;
+import static org.apache.phoenix.query.QueryConstants.SINGLE_COLUMN_FAMILY;
+
+import java.sql.SQLException;
+import java.util.List;
+
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.phoenix.expression.aggregator.Aggregator;
+import org.apache.phoenix.expression.aggregator.Aggregators;
+import org.apache.phoenix.schema.tuple.Tuple;
+import org.apache.phoenix.util.KeyValueUtil;
+
+/**
+ *
+ * Base class for result scanners that aggregate the row count value for rows with
+ * duplicate keys. This result scanner assumes that the results of the inner result
+ * scanner are returned in order of grouping keys.
+ *
+ */
+public abstract class BaseGroupedAggregatingResultIterator implements
+ AggregatingResultIterator {
+ private static final byte[] UNITIALIZED_KEY_BUFFER = new byte[0];
+ protected final PeekingResultIterator resultIterator;
+ protected final Aggregators aggregators;
+ private ImmutableBytesWritable currentKey;
+ private ImmutableBytesWritable nextKey;
+
+ public BaseGroupedAggregatingResultIterator(
+ PeekingResultIterator resultIterator, Aggregators aggregators) {
+ if (resultIterator == null) throw new NullPointerException();
+ if (aggregators == null) throw new NullPointerException();
+ this.resultIterator = resultIterator;
+ this.aggregators = aggregators;
+ this.currentKey = new ImmutableBytesWritable(UNITIALIZED_KEY_BUFFER);
+ this.nextKey = new ImmutableBytesWritable(UNITIALIZED_KEY_BUFFER);
+ }
+
+ protected abstract ImmutableBytesWritable getGroupingKey(Tuple tuple, ImmutableBytesWritable ptr) throws SQLException;
+ protected abstract Tuple wrapKeyValueAsResult(KeyValue keyValue) throws SQLException;
+
+ @Override
+ public Tuple next() throws SQLException {
+ Tuple result = resultIterator.next();
+ if (result == null) {
+ return null;
+ }
+ if (currentKey.get() == UNITIALIZED_KEY_BUFFER) {
+ getGroupingKey(result, currentKey);
+ }
+ Aggregator[] rowAggregators = aggregators.getAggregators();
+ aggregators.reset(rowAggregators);
+ while (true) {
+ aggregators.aggregate(rowAggregators, result);
+ Tuple nextResult = resultIterator.peek();
+ if (nextResult == null || !currentKey.equals(getGroupingKey(nextResult, nextKey))) {
+ break;
+ }
+ result = resultIterator.next();
+ }
+
+ byte[] value = aggregators.toBytes(rowAggregators);
+ Tuple tuple = wrapKeyValueAsResult(KeyValueUtil.newKeyValue(currentKey, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length));
+ currentKey.set(nextKey.get(), nextKey.getOffset(), nextKey.getLength());
+ return tuple;
+ }
+
+ @Override
+ public void close() throws SQLException {
+ resultIterator.close();
+ }
+
+ @Override
+ public void aggregate(Tuple result) {
+ Aggregator[] rowAggregators = aggregators.getAggregators();
+ aggregators.reset(rowAggregators);
+ aggregators.aggregate(rowAggregators, result);
+ }
+
+ @Override
+ public void explain(List<String> planSteps) {
+ resultIterator.explain(planSteps);
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2bdc33bc/phoenix-core/src/main/java/org/apache/phoenix/iterate/GroupedAggregatingResultIterator.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/GroupedAggregatingResultIterator.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/GroupedAggregatingResultIterator.java
index 50e1bc2..db08696 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/GroupedAggregatingResultIterator.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/GroupedAggregatingResultIterator.java
@@ -17,19 +17,13 @@
*/
package org.apache.phoenix.iterate;
-import static org.apache.phoenix.query.QueryConstants.*;
-
import java.sql.SQLException;
-import java.util.List;
+import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-
-import org.apache.phoenix.expression.aggregator.Aggregator;
import org.apache.phoenix.expression.aggregator.Aggregators;
import org.apache.phoenix.schema.tuple.SingleKeyValueTuple;
import org.apache.phoenix.schema.tuple.Tuple;
-import org.apache.phoenix.util.KeyValueUtil;
-import org.apache.phoenix.util.TupleUtil;
@@ -51,54 +45,20 @@ import org.apache.phoenix.util.TupleUtil;
*
* @since 0.1
*/
-public class GroupedAggregatingResultIterator implements AggregatingResultIterator {
- private final ImmutableBytesWritable tempPtr = new ImmutableBytesWritable();
- private final PeekingResultIterator resultIterator;
- protected final Aggregators aggregators;
-
- public GroupedAggregatingResultIterator( PeekingResultIterator resultIterator, Aggregators aggregators) {
- if (resultIterator == null) throw new NullPointerException();
- if (aggregators == null) throw new NullPointerException();
- this.resultIterator = resultIterator;
- this.aggregators = aggregators;
- }
-
- @Override
- public Tuple next() throws SQLException {
- Tuple result = resultIterator.next();
- if (result == null) {
- return null;
- }
- Aggregator[] rowAggregators = aggregators.getAggregators();
- aggregators.reset(rowAggregators);
- while (true) {
- aggregators.aggregate(rowAggregators, result);
- Tuple nextResult = resultIterator.peek();
- if (nextResult == null || !TupleUtil.equals(result, nextResult, tempPtr)) {
- break;
- }
- result = resultIterator.next();
- }
-
- byte[] value = aggregators.toBytes(rowAggregators);
- result.getKey(tempPtr);
- return new SingleKeyValueTuple(KeyValueUtil.newKeyValue(tempPtr, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length));
- }
-
- @Override
- public void close() throws SQLException {
- resultIterator.close();
+public class GroupedAggregatingResultIterator extends BaseGroupedAggregatingResultIterator {
+
+ public GroupedAggregatingResultIterator(PeekingResultIterator resultIterator, Aggregators aggregators) {
+ super(resultIterator, aggregators);
}
@Override
- public void aggregate(Tuple result) {
- Aggregator[] rowAggregators = aggregators.getAggregators();
- aggregators.reset(rowAggregators);
- aggregators.aggregate(rowAggregators, result);
+ protected ImmutableBytesWritable getGroupingKey(Tuple tuple, ImmutableBytesWritable ptr) throws SQLException {
+ tuple.getKey(ptr);
+ return ptr;
}
@Override
- public void explain(List<String> planSteps) {
- resultIterator.explain(planSteps);
+ protected Tuple wrapKeyValueAsResult(KeyValue keyValue) throws SQLException {
+ return new SingleKeyValueTuple(keyValue);
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2bdc33bc/phoenix-core/src/main/java/org/apache/phoenix/iterate/LookAheadResultIterator.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/LookAheadResultIterator.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/LookAheadResultIterator.java
index a7f390f..3293f65 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/LookAheadResultIterator.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/LookAheadResultIterator.java
@@ -25,7 +25,11 @@ import org.apache.phoenix.schema.tuple.Tuple;
abstract public class LookAheadResultIterator implements PeekingResultIterator {
- public static LookAheadResultIterator wrap(final ResultIterator iterator) {
+ public static PeekingResultIterator wrap(final ResultIterator iterator) {
+ if (iterator instanceof PeekingResultIterator) {
+ return (PeekingResultIterator) iterator;
+ }
+
return new LookAheadResultIterator() {
@Override
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2bdc33bc/phoenix-core/src/main/java/org/apache/phoenix/join/TupleProjector.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/join/TupleProjector.java b/phoenix-core/src/main/java/org/apache/phoenix/join/TupleProjector.java
deleted file mode 100644
index 8377b03..0000000
--- a/phoenix-core/src/main/java/org/apache/phoenix/join/TupleProjector.java
+++ /dev/null
@@ -1,246 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.join;
-
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.List;
-
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.io.WritableUtils;
-import org.apache.phoenix.compile.JoinCompiler.ProjectedPTableWrapper;
-import org.apache.phoenix.expression.Expression;
-import org.apache.phoenix.expression.ExpressionType;
-import org.apache.phoenix.schema.KeyValueSchema;
-import org.apache.phoenix.schema.KeyValueSchema.KeyValueSchemaBuilder;
-import org.apache.phoenix.schema.PColumn;
-import org.apache.phoenix.schema.ValueBitSet;
-import org.apache.phoenix.schema.tuple.BaseTuple;
-import org.apache.phoenix.schema.tuple.Tuple;
-import org.apache.phoenix.util.KeyValueUtil;
-import org.apache.phoenix.util.SchemaUtil;
-
-public class TupleProjector {
- public static final byte[] VALUE_COLUMN_FAMILY = Bytes.toBytes("_v");
- public static final byte[] VALUE_COLUMN_QUALIFIER = new byte[0];
-
- private static final String SCAN_PROJECTOR = "scanProjector";
-
- private final KeyValueSchema schema;
- private final Expression[] expressions;
- private ValueBitSet valueSet;
- private final ImmutableBytesWritable ptr = new ImmutableBytesWritable();
-
- public TupleProjector(ProjectedPTableWrapper projected) {
- List<PColumn> columns = projected.getTable().getColumns();
- expressions = new Expression[columns.size() - projected.getTable().getPKColumns().size()];
- // we do not count minNullableIndex for we might do later merge.
- KeyValueSchemaBuilder builder = new KeyValueSchemaBuilder(0);
- int i = 0;
- for (PColumn column : projected.getTable().getColumns()) {
- if (!SchemaUtil.isPKColumn(column)) {
- builder.addField(column);
- expressions[i++] = projected.getSourceExpression(column);
- }
- }
- schema = builder.build();
- valueSet = ValueBitSet.newInstance(schema);
- }
-
- private TupleProjector(KeyValueSchema schema, Expression[] expressions) {
- this.schema = schema;
- this.expressions = expressions;
- this.valueSet = ValueBitSet.newInstance(schema);
- }
-
- public void setValueBitSet(ValueBitSet bitSet) {
- this.valueSet = bitSet;
- }
-
- public static void serializeProjectorIntoScan(Scan scan, TupleProjector projector) {
- ByteArrayOutputStream stream = new ByteArrayOutputStream();
- try {
- DataOutputStream output = new DataOutputStream(stream);
- projector.schema.write(output);
- int count = projector.expressions.length;
- WritableUtils.writeVInt(output, count);
- for (int i = 0; i < count; i++) {
- WritableUtils.writeVInt(output, ExpressionType.valueOf(projector.expressions[i]).ordinal());
- projector.expressions[i].write(output);
- }
- scan.setAttribute(SCAN_PROJECTOR, stream.toByteArray());
- } catch (IOException e) {
- throw new RuntimeException(e);
- } finally {
- try {
- stream.close();
- } catch (IOException e) {
- throw new RuntimeException(e);
- }
- }
-
- }
-
- public static TupleProjector deserializeProjectorFromScan(Scan scan) {
- byte[] proj = scan.getAttribute(SCAN_PROJECTOR);
- if (proj == null) {
- return null;
- }
- ByteArrayInputStream stream = new ByteArrayInputStream(proj);
- try {
- DataInputStream input = new DataInputStream(stream);
- KeyValueSchema schema = new KeyValueSchema();
- schema.readFields(input);
- int count = WritableUtils.readVInt(input);
- Expression[] expressions = new Expression[count];
- for (int i = 0; i < count; i++) {
- int ordinal = WritableUtils.readVInt(input);
- expressions[i] = ExpressionType.values()[ordinal].newInstance();
- expressions[i].readFields(input);
- }
- return new TupleProjector(schema, expressions);
- } catch (IOException e) {
- throw new RuntimeException(e);
- } finally {
- try {
- stream.close();
- } catch (IOException e) {
- throw new RuntimeException(e);
- }
- }
- }
-
- public static class ProjectedValueTuple extends BaseTuple {
- private ImmutableBytesWritable keyPtr = new ImmutableBytesWritable();
- private long timestamp;
- private byte[] projectedValue;
- private int bitSetLen;
- private KeyValue keyValue;
-
- private ProjectedValueTuple(byte[] keyBuffer, int keyOffset, int keyLength, long timestamp, byte[] projectedValue, int bitSetLen) {
- this.keyPtr.set(keyBuffer, keyOffset, keyLength);
- this.timestamp = timestamp;
- this.projectedValue = projectedValue;
- this.bitSetLen = bitSetLen;
- }
-
- public ImmutableBytesWritable getKeyPtr() {
- return keyPtr;
- }
-
- public long getTimestamp() {
- return timestamp;
- }
-
- public byte[] getProjectedValue() {
- return projectedValue;
- }
-
- public int getBitSetLength() {
- return bitSetLen;
- }
-
- @Override
- public void getKey(ImmutableBytesWritable ptr) {
- ptr.set(keyPtr.get(), keyPtr.getOffset(), keyPtr.getLength());
- }
-
- @Override
- public KeyValue getValue(int index) {
- if (index != 0) {
- throw new IndexOutOfBoundsException(Integer.toString(index));
- }
- return getValue(VALUE_COLUMN_FAMILY, VALUE_COLUMN_QUALIFIER);
- }
-
- @Override
- public KeyValue getValue(byte[] family, byte[] qualifier) {
- if (keyValue == null) {
- keyValue = KeyValueUtil.newKeyValue(keyPtr.get(), keyPtr.getOffset(), keyPtr.getLength(),
- VALUE_COLUMN_FAMILY, VALUE_COLUMN_QUALIFIER, timestamp, projectedValue, 0, projectedValue.length);
- }
- return keyValue;
- }
-
- @Override
- public boolean getValue(byte[] family, byte[] qualifier,
- ImmutableBytesWritable ptr) {
- ptr.set(projectedValue);
- return true;
- }
-
- @Override
- public boolean isImmutable() {
- return true;
- }
-
- @Override
- public int size() {
- return 1;
- }
- }
-
- public ProjectedValueTuple projectResults(Tuple tuple) {
- byte[] bytesValue = schema.toBytes(tuple, expressions, valueSet, ptr);
- KeyValue base = tuple.getValue(0);
- return new ProjectedValueTuple(base.getBuffer(), base.getRowOffset(), base.getRowLength(), base.getTimestamp(), bytesValue, valueSet.getEstimatedLength());
- }
-
- public static void decodeProjectedValue(Tuple tuple, ImmutableBytesWritable ptr) throws IOException {
- boolean b = tuple.getValue(VALUE_COLUMN_FAMILY, VALUE_COLUMN_QUALIFIER, ptr);
- if (!b)
- throw new IOException("Trying to decode a non-projected value.");
- }
-
- public static ProjectedValueTuple mergeProjectedValue(ProjectedValueTuple dest, KeyValueSchema destSchema, ValueBitSet destBitSet,
- Tuple src, KeyValueSchema srcSchema, ValueBitSet srcBitSet, int offset) throws IOException {
- ImmutableBytesWritable destValue = new ImmutableBytesWritable(dest.getProjectedValue());
- destBitSet.clear();
- destBitSet.or(destValue);
- int origDestBitSetLen = dest.getBitSetLength();
- ImmutableBytesWritable srcValue = new ImmutableBytesWritable();
- decodeProjectedValue(src, srcValue);
- srcBitSet.clear();
- srcBitSet.or(srcValue);
- int origSrcBitSetLen = srcBitSet.getEstimatedLength();
- for (int i = 0; i < srcBitSet.getMaxSetBit(); i++) {
- if (srcBitSet.get(i)) {
- destBitSet.set(offset + i);
- }
- }
- int destBitSetLen = destBitSet.getEstimatedLength();
- byte[] merged = new byte[destValue.getLength() - origDestBitSetLen + srcValue.getLength() - origSrcBitSetLen + destBitSetLen];
- int o = Bytes.putBytes(merged, 0, destValue.get(), destValue.getOffset(), destValue.getLength() - origDestBitSetLen);
- o = Bytes.putBytes(merged, o, srcValue.get(), srcValue.getOffset(), srcValue.getLength() - origSrcBitSetLen);
- destBitSet.toBytes(merged, o);
- ImmutableBytesWritable keyPtr = dest.getKeyPtr();
- return new ProjectedValueTuple(keyPtr.get(), keyPtr.getOffset(), keyPtr.getLength(), dest.getTimestamp(), merged, destBitSetLen);
- }
-
- @Override
- public String toString() {
- return "TUPLE-PROJECTOR {" + Arrays.toString(expressions) + " ==> " + schema.toString() + "}";
- }
-}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2bdc33bc/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java b/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java
index 132c831..c7bc944 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java
@@ -101,6 +101,7 @@ public class QueryOptimizer {
if (!useIndexes
|| select.isJoin()
|| dataPlan.getContext().getResolver().getTables().size() > 1
+ || select.getInnerSelectStatement() != null
|| (dataPlan.getContext().getScanRanges().isPointLookup() && stopAtBestPlan)) {
return Collections.singletonList(dataPlan);
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2bdc33bc/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
index 2242cd0..5aaf04d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
@@ -629,8 +629,8 @@ public class ParseNodeFactory {
statement.hasSequence());
}
- public SelectStatement select(SelectStatement statement, List<AliasedNode> select, ParseNode where, List<ParseNode> groupBy, boolean isAggregate) {
- return select(statement.getFrom(), statement.getHint(), statement.isDistinct(), select, where, groupBy,
+ public SelectStatement select(SelectStatement statement, boolean isDistinct, List<AliasedNode> select, ParseNode where, List<ParseNode> groupBy, boolean isAggregate) {
+ return select(statement.getFrom(), statement.getHint(), isDistinct, select, where, groupBy,
statement.getHaving(), statement.getOrderBy(), statement.getLimit(), statement.getBindCount(), isAggregate,
statement.hasSequence());
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2bdc33bc/phoenix-core/src/main/java/org/apache/phoenix/parse/SelectStatement.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/SelectStatement.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/SelectStatement.java
index 6cee588..e7302dc 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/SelectStatement.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/SelectStatement.java
@@ -192,4 +192,11 @@ public class SelectStatement implements FilterableStatement {
public boolean isJoin() {
return fromTable.size() > 1 || (fromTable.size() > 0 && fromTable.get(0) instanceof JoinTableNode);
}
+
+ public SelectStatement getInnerSelectStatement() {
+ if (fromTable.size() != 1 || !(fromTable.get(0) instanceof DerivedTableNode))
+ return null;
+
+ return ((DerivedTableNode) fromTable.get(0)).getSelect();
+ }
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2bdc33bc/phoenix-core/src/main/java/org/apache/phoenix/schema/ColumnRef.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/ColumnRef.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/ColumnRef.java
index 0910712..f1a0028 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/ColumnRef.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/ColumnRef.java
@@ -114,7 +114,7 @@ public final class ColumnRef {
return new KeyValueColumnExpression(column, displayName);
}
- if (table.getType() == PTableType.JOIN) {
+ if (table.getType() == PTableType.JOIN || table.getType() == PTableType.SUBQUERY) {
return new ProjectedColumnExpression(column, table, column.getName().getString());
}
[10/15] git commit: PHOENIX-1385 Adding,
dropping and adding columns fails with NPE (Samarth Jain, James Taylor)
Posted by ja...@apache.org.
PHOENIX-1385 Adding, dropping and adding columns fails with NPE (Samarth Jain, James Taylor)
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/26816011
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/26816011
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/26816011
Branch: refs/heads/3.2
Commit: 2681601164441f6868e8472c40ed461cdac32c36
Parents: f468759
Author: James Taylor <jt...@salesforce.com>
Authored: Mon Oct 27 13:35:49 2014 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Mon Oct 27 13:45:32 2014 -0700
----------------------------------------------------------------------
.../apache/phoenix/end2end/AlterTableIT.java | 22 +++++++-
.../apache/phoenix/jdbc/PhoenixConnection.java | 8 +--
.../query/ConnectionQueryServicesImpl.java | 4 +-
.../query/ConnectionlessQueryServicesImpl.java | 6 +--
.../query/DelegateConnectionQueryServices.java | 6 +--
.../apache/phoenix/query/MetaDataMutated.java | 2 +-
.../apache/phoenix/schema/MetaDataClient.java | 6 +--
.../apache/phoenix/schema/PMetaDataImpl.java | 53 +++++++++++---------
8 files changed, 64 insertions(+), 43 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/26816011/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
index b17c1bd..9c14f16 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
@@ -884,4 +884,24 @@ public class AlterTableIT extends BaseHBaseManagedTimeIT {
pstmt2.close();
conn1.close();
}
-}
+
+ @Test
+ public void testAddColumnsUsingNewConnection() throws Exception {
+ Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+ String ddl = "CREATE TABLE T (\n"
+ +"ID1 VARCHAR(15) NOT NULL,\n"
+ +"ID2 VARCHAR(15) NOT NULL,\n"
+ +"CREATED_DATE DATE,\n"
+ +"CREATION_TIME BIGINT,\n"
+ +"LAST_USED DATE,\n"
+ +"CONSTRAINT PK PRIMARY KEY (ID1, ID2))";
+ Connection conn1 = DriverManager.getConnection(getUrl(), props);
+ conn1.createStatement().execute(ddl);
+ ddl = "ALTER TABLE T ADD STRING VARCHAR, STRING_DATA_TYPES VARCHAR";
+ conn1.createStatement().execute(ddl);
+ ddl = "ALTER TABLE T DROP COLUMN STRING, STRING_DATA_TYPES";
+ conn1.createStatement().execute(ddl);
+ ddl = "ALTER TABLE T ADD STRING_ARRAY1 VARCHAR[]";
+ conn1.createStatement().execute(ddl);
+ conn1.close();
+ }}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/26816011/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
index 7eced73..75f9f55 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
@@ -697,11 +697,11 @@ public class PhoenixConnection implements Connection, org.apache.phoenix.jdbc.Jd
}
@Override
- public PMetaData removeColumn(PName tenantId, String tableName, String familyName, String columnName,
- long tableTimeStamp, long tableSeqNum) throws SQLException {
- metaData = metaData.removeColumn(tenantId, tableName, familyName, columnName, tableTimeStamp, tableSeqNum);
+ public PMetaData removeColumn(PName tenantId, String tableName, List<PColumn> columnsToRemove, long tableTimeStamp,
+ long tableSeqNum) throws SQLException {
+ metaData = metaData.removeColumn(tenantId, tableName, columnsToRemove, tableTimeStamp, tableSeqNum);
//Cascade through to connectionQueryServices too
- getQueryServices().removeColumn(tenantId, tableName, familyName, columnName, tableTimeStamp, tableSeqNum);
+ getQueryServices().removeColumn(tenantId, tableName, columnsToRemove, tableTimeStamp, tableSeqNum);
return metaData;
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/26816011/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index c0552ce..3e46a30 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -480,12 +480,12 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
}
@Override
- public PMetaData removeColumn(final PName tenantId, final String tableName, final String familyName, final String columnName, final long tableTimeStamp, final long tableSeqNum) throws SQLException {
+ public PMetaData removeColumn(final PName tenantId, final String tableName, final List<PColumn> columnsToRemove, final long tableTimeStamp, final long tableSeqNum) throws SQLException {
return metaDataMutated(tenantId, tableName, tableSeqNum, new Mutator() {
@Override
public PMetaData mutate(PMetaData metaData) throws SQLException {
try {
- return metaData.removeColumn(tenantId, tableName, familyName, columnName, tableTimeStamp, tableSeqNum);
+ return metaData.removeColumn(tenantId, tableName, columnsToRemove, tableTimeStamp, tableSeqNum);
} catch (TableNotFoundException e) {
// The DROP TABLE may have been processed first, so just ignore.
return metaData;
http://git-wip-us.apache.org/repos/asf/phoenix/blob/26816011/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
index c372802..7d0a109 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
@@ -150,9 +150,9 @@ public class ConnectionlessQueryServicesImpl extends DelegateQueryServices imple
}
@Override
- public PMetaData removeColumn(PName tenantId, String tableName, String familyName, String columnName,
- long tableTimeStamp, long tableSeqNum) throws SQLException {
- return metaData = metaData.removeColumn(tenantId, tableName, familyName, columnName, tableTimeStamp, tableSeqNum);
+ public PMetaData removeColumn(PName tenantId, String tableName, List<PColumn> columnsToRemove, long tableTimeStamp,
+ long tableSeqNum) throws SQLException {
+ return metaData = metaData.removeColumn(tenantId, tableName, columnsToRemove, tableTimeStamp, tableSeqNum);
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/26816011/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java b/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java
index bb4bb33..defad5b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java
@@ -88,9 +88,9 @@ public class DelegateConnectionQueryServices extends DelegateQueryServices imple
}
@Override
- public PMetaData removeColumn(PName tenantId, String tableName, String familyName, String columnName,
- long tableTimeStamp, long tableSeqNum) throws SQLException {
- return getDelegate().removeColumn(tenantId, tableName, familyName, columnName, tableTimeStamp, tableSeqNum);
+ public PMetaData removeColumn(PName tenantId, String tableName, List<PColumn> columnsToRemove, long tableTimeStamp,
+ long tableSeqNum) throws SQLException {
+ return getDelegate().removeColumn(tenantId, tableName, columnsToRemove, tableTimeStamp, tableSeqNum);
}
@Override
http://git-wip-us.apache.org/repos/asf/phoenix/blob/26816011/phoenix-core/src/main/java/org/apache/phoenix/query/MetaDataMutated.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/MetaDataMutated.java b/phoenix-core/src/main/java/org/apache/phoenix/query/MetaDataMutated.java
index 1b8ebda..cd4e2de 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/MetaDataMutated.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/MetaDataMutated.java
@@ -37,5 +37,5 @@ public interface MetaDataMutated {
PMetaData addTable(PTable table) throws SQLException;
PMetaData removeTable(PName tenantId, String tableName, String parentTableName, long tableTimeStamp) throws SQLException;
PMetaData addColumn(PName tenantId, String tableName, List<PColumn> columns, long tableTimeStamp, long tableSeqNum, boolean isImmutableRows) throws SQLException;
- PMetaData removeColumn(PName tenantId, String tableName, String familyName, String columnName, long tableTimeStamp, long tableSeqNum) throws SQLException;
+ PMetaData removeColumn(PName tenantId, String tableName, List<PColumn> columnsToRemove, long tableTimeStamp, long tableSeqNum) throws SQLException;
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/26816011/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index 5ce2f93..0efbad6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -2160,10 +2160,8 @@ public class MetaDataClient {
// If we've done any index metadata updates, don't bother trying to update
// client-side cache as it would be too painful. Just let it pull it over from
// the server when needed.
- if (columnsToDrop.size() > 0 && indexesToDrop.isEmpty()) {
- for(PColumn columnToDrop : tableColumnsToDrop) {
- connection.removeColumn(tenantId, SchemaUtil.getTableName(schemaName, tableName) , columnToDrop.getFamilyName().getString(), columnToDrop.getName().getString(), result.getMutationTime(), seqNum);
- }
+ if (tableColumnsToDrop.size() > 0 && indexesToDrop.isEmpty()) {
+ connection.removeColumn(tenantId, SchemaUtil.getTableName(schemaName, tableName) , tableColumnsToDrop, result.getMutationTime(), seqNum);
}
// If we have a VIEW, then only delete the metadata, and leave the table data alone
if (table.getType() != PTableType.VIEW) {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/26816011/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaDataImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaDataImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaDataImpl.java
index 8b26709..0d75aa2 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaDataImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaDataImpl.java
@@ -365,38 +365,41 @@ public class PMetaDataImpl implements PMetaData {
}
@Override
- public PMetaData removeColumn(PName tenantId, String tableName, String familyName, String columnName, long tableTimeStamp, long tableSeqNum) throws SQLException {
+ public PMetaData removeColumn(PName tenantId, String tableName, List<PColumn> columnsToRemove, long tableTimeStamp, long tableSeqNum) throws SQLException {
PTableRef tableRef = metaData.get(new PTableKey(tenantId, tableName));
if (tableRef == null) {
return this;
}
PTable table = tableRef.table;
PTableCache tables = metaData.clone();
- PColumn column;
- if (familyName == null) {
- column = table.getPKColumn(columnName);
- } else {
- column = table.getColumnFamily(familyName).getColumn(columnName);
- }
- int positionOffset = 0;
- int position = column.getPosition();
- List<PColumn> oldColumns = table.getColumns();
- if (table.getBucketNum() != null) {
- position--;
- positionOffset = 1;
- oldColumns = oldColumns.subList(positionOffset, oldColumns.size());
- }
- List<PColumn> columns = Lists.newArrayListWithExpectedSize(oldColumns.size() - 1);
- columns.addAll(oldColumns.subList(0, position));
- // Update position of columns that follow removed column
- for (int i = position+1; i < oldColumns.size(); i++) {
- PColumn oldColumn = oldColumns.get(i);
- PColumn newColumn = new PColumnImpl(oldColumn.getName(), oldColumn.getFamilyName(), oldColumn.getDataType(), oldColumn.getMaxLength(), oldColumn.getScale(), oldColumn.isNullable(), i-1+positionOffset, oldColumn.getSortOrder(), oldColumn.getArraySize(), oldColumn.getViewConstant(), oldColumn.isViewReferenced());
- columns.add(newColumn);
+ for (PColumn columnToRemove : columnsToRemove) {
+ PColumn column;
+ String familyName = columnToRemove.getFamilyName().getString();
+ if (familyName == null) {
+ column = table.getPKColumn(columnToRemove.getName().getString());
+ } else {
+ column = table.getColumnFamily(familyName).getColumn(columnToRemove.getName().getString());
+ }
+ int positionOffset = 0;
+ int position = column.getPosition();
+ List<PColumn> oldColumns = table.getColumns();
+ if (table.getBucketNum() != null) {
+ position--;
+ positionOffset = 1;
+ oldColumns = oldColumns.subList(positionOffset, oldColumns.size());
+ }
+ List<PColumn> columns = Lists.newArrayListWithExpectedSize(oldColumns.size() - 1);
+ columns.addAll(oldColumns.subList(0, position));
+ // Update position of columns that follow removed column
+ for (int i = position+1; i < oldColumns.size(); i++) {
+ PColumn oldColumn = oldColumns.get(i);
+ PColumn newColumn = new PColumnImpl(oldColumn.getName(), oldColumn.getFamilyName(), oldColumn.getDataType(), oldColumn.getMaxLength(), oldColumn.getScale(), oldColumn.isNullable(), i-1+positionOffset, oldColumn.getSortOrder(), oldColumn.getArraySize(), oldColumn.getViewConstant(), oldColumn.isViewReferenced());
+ columns.add(newColumn);
+ }
+
+ table = PTableImpl.makePTable(table, tableTimeStamp, tableSeqNum, columns);
}
-
- PTable newTable = PTableImpl.makePTable(table, tableTimeStamp, tableSeqNum, columns);
- tables.put(newTable.getKey(), newTable);
+ tables.put(table.getKey(), table);
return new PMetaDataImpl(tables);
}
[05/15] git commit: PHOENIX-944 Support derived tables in FROM clause
that needs extra steps of client-side aggregation or other processing
Posted by ja...@apache.org.
PHOENIX-944 Support derived tables in FROM clause that needs extra steps of client-side aggregation or other processing
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/2bdc33bc
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/2bdc33bc
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/2bdc33bc
Branch: refs/heads/3.2
Commit: 2bdc33bc58739606111157539ad9a066368dd03a
Parents: f8db1d5
Author: maryannxue <ma...@apache.org>
Authored: Fri Oct 24 00:17:39 2014 -0400
Committer: maryannxue <ma...@apache.org>
Committed: Fri Oct 24 00:17:39 2014 -0400
----------------------------------------------------------------------
.../apache/phoenix/end2end/DerivedTableIT.java | 282 ++-
.../org/apache/phoenix/end2end/SubqueryIT.java | 12 +
.../apache/phoenix/compile/FromCompiler.java | 27 +-
.../apache/phoenix/compile/GroupByCompiler.java | 5 +-
.../apache/phoenix/compile/JoinCompiler.java | 2 +-
.../apache/phoenix/compile/OrderByCompiler.java | 2 +-
.../apache/phoenix/compile/QueryCompiler.java | 62 +-
.../phoenix/compile/SubqueryRewriter.java | 10 +-
.../TrackOrderPreservingExpressionCompiler.java | 27 +-
.../apache/phoenix/compile/WhereCompiler.java | 32 +-
.../GroupedAggregateRegionObserver.java | 2 +-
.../coprocessor/HashJoinRegionScanner.java | 4 +-
.../phoenix/coprocessor/ScanRegionObserver.java | 2 +-
.../UngroupedAggregateRegionObserver.java | 2 +-
.../phoenix/execute/ClientAggregatePlan.java | 230 ++
.../phoenix/execute/ClientProcessingPlan.java | 82 +
.../apache/phoenix/execute/ClientScanPlan.java | 92 +
.../apache/phoenix/execute/HashJoinPlan.java | 24 +-
.../phoenix/execute/TupleProjectionPlan.java | 49 +-
.../apache/phoenix/execute/TupleProjector.java | 275 +++
.../expression/ProjectedColumnExpression.java | 2 +-
.../DistinctValueClientAggregator.java | 7 +-
.../BaseGroupedAggregatingResultIterator.java | 105 +
.../GroupedAggregatingResultIterator.java | 60 +-
.../iterate/LookAheadResultIterator.java | 6 +-
.../org/apache/phoenix/join/TupleProjector.java | 246 --
.../apache/phoenix/optimize/QueryOptimizer.java | 1 +
.../apache/phoenix/parse/ParseNodeFactory.java | 4 +-
.../apache/phoenix/parse/SelectStatement.java | 7 +
.../org/apache/phoenix/schema/ColumnRef.java | 2 +-
.../phoenix/schema/MetaDataClient.java.orig | 2197 ------------------
31 files changed, 1184 insertions(+), 2676 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2bdc33bc/phoenix-core/src/it/java/org/apache/phoenix/end2end/DerivedTableIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DerivedTableIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DerivedTableIT.java
index 8a80764..8ef542a 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DerivedTableIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DerivedTableIT.java
@@ -35,19 +35,19 @@ import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import java.sql.Array;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
-import java.sql.SQLFeatureNotSupportedException;
import java.util.Collection;
import java.util.List;
import java.util.Properties;
import org.apache.phoenix.util.PhoenixRuntime;
import org.apache.phoenix.util.PropertiesUtil;
+import org.apache.phoenix.util.QueryUtil;
import org.junit.Before;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@@ -61,33 +61,65 @@ import com.google.common.collect.Lists;
@RunWith(Parameterized.class)
public class DerivedTableIT extends BaseClientManagedTimeIT {
private static final String tenantId = getOrganizationId();
- private static final String MSG = "Complex nested queries not supported.";
private long ts;
- private String indexDDL;
+ private String[] indexDDL;
+ private String[] plans;
- public DerivedTableIT(String indexDDL) {
+ public DerivedTableIT(String[] indexDDL, String[] plans) {
this.indexDDL = indexDDL;
+ this.plans = plans;
}
@Before
public void initTable() throws Exception {
ts = nextTimestamp();
initATableValues(tenantId, getDefaultSplits(tenantId), null, ts);
- if (indexDDL != null && indexDDL.length() > 0) {
+ if (indexDDL != null && indexDDL.length > 0) {
Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts));
Connection conn = DriverManager.getConnection(getUrl(), props);
- conn.createStatement().execute(indexDDL);
+ for (String ddl : indexDDL) {
+ conn.createStatement().execute(ddl);
+ }
}
}
@Parameters(name="{0}")
public static Collection<Object> data() {
List<Object> testCases = Lists.newArrayList();
- testCases.add(new String[] { "CREATE INDEX ATABLE_DERIVED_IDX ON aTable (a_byte) INCLUDE ("
- + " A_STRING, " + " B_STRING)" });
- testCases.add(new String[] { "" });
+ testCases.add(new String[][] {
+ {
+ "CREATE INDEX ATABLE_DERIVED_IDX ON aTable (a_byte) INCLUDE (A_STRING, B_STRING)"
+ }, {
+ "CLIENT PARALLEL 1-WAY FULL SCAN OVER ATABLE_DERIVED_IDX\n" +
+ " SERVER AGGREGATE INTO DISTINCT ROWS BY [A_STRING, B_STRING]\n" +
+ "CLIENT MERGE SORT\n" +
+ "CLIENT SORTED BY [B_STRING]\n" +
+ "CLIENT SORTED BY [A]\n" +
+ "CLIENT AGGREGATE INTO DISTINCT ROWS BY [A]\n" +
+ "CLIENT SORTED BY [A DESC]",
+
+ "CLIENT PARALLEL 1-WAY FULL SCAN OVER ATABLE_DERIVED_IDX\n" +
+ " SERVER AGGREGATE INTO DISTINCT ROWS BY [A_STRING, B_STRING]\n" +
+ "CLIENT MERGE SORT\n" +
+ "CLIENT AGGREGATE INTO DISTINCT ROWS BY [A]\n" +
+ "CLIENT DISTINCT ON [COLLECTDISTINCT(B)]"}});
+ testCases.add(new String[][] {
+ {}, {
+ "CLIENT PARALLEL 4-WAY FULL SCAN OVER ATABLE\n" +
+ " SERVER AGGREGATE INTO DISTINCT ROWS BY [A_STRING, B_STRING]\n" +
+ "CLIENT MERGE SORT\n" +
+ "CLIENT SORTED BY [B_STRING]\n" +
+ "CLIENT SORTED BY [A]\n" +
+ "CLIENT AGGREGATE INTO DISTINCT ROWS BY [A]\n" +
+ "CLIENT SORTED BY [A DESC]",
+
+ "CLIENT PARALLEL 4-WAY FULL SCAN OVER ATABLE\n" +
+ " SERVER AGGREGATE INTO DISTINCT ROWS BY [A_STRING, B_STRING]\n" +
+ "CLIENT MERGE SORT\n" +
+ "CLIENT AGGREGATE INTO DISTINCT ROWS BY [A]\n" +
+ "CLIENT DISTINCT ON [COLLECTDISTINCT(B)]"}});
return testCases;
}
@@ -183,21 +215,21 @@ public class DerivedTableIT extends BaseClientManagedTimeIT {
// (limit) where
query = "SELECT t.eid FROM (SELECT entity_id eid, b_string b FROM aTable LIMIT 2) AS t WHERE t.b = '" + C_VALUE + "'";
- try {
- conn.createStatement().executeQuery(query);
- fail("Should have got SQLFeatureNotSupportedException");
- } catch (SQLFeatureNotSupportedException e) {
- assertEquals(MSG, e.getMessage());
- }
+ statement = conn.prepareStatement(query);
+ rs = statement.executeQuery();
+ assertTrue (rs.next());
+ assertEquals(ROW2,rs.getString(1));
+
+ assertFalse(rs.next());
// (count) where
query = "SELECT t.c FROM (SELECT count(*) c FROM aTable) AS t WHERE t.c > 0";
- try {
- conn.createStatement().executeQuery(query);
- fail("Should have got SQLFeatureNotSupportedException");
- } catch (SQLFeatureNotSupportedException e) {
- assertEquals(MSG, e.getMessage());
- }
+ statement = conn.prepareStatement(query);
+ rs = statement.executeQuery();
+ assertTrue (rs.next());
+ assertEquals(9,rs.getInt(1));
+
+ assertFalse(rs.next());
} finally {
conn.close();
}
@@ -227,12 +259,78 @@ public class DerivedTableIT extends BaseClientManagedTimeIT {
// (groupby) groupby
query = "SELECT t.c, count(*) FROM (SELECT count(*) c FROM aTable GROUP BY a_string) AS t GROUP BY t.c";
- try {
- conn.createStatement().executeQuery(query);
- fail("Should have got SQLFeatureNotSupportedException");
- } catch (SQLFeatureNotSupportedException e) {
- assertEquals(MSG, e.getMessage());
- }
+ statement = conn.prepareStatement(query);
+ rs = statement.executeQuery();
+ assertTrue (rs.next());
+ assertEquals(1,rs.getInt(1));
+ assertEquals(1,rs.getInt(2));
+ assertTrue (rs.next());
+ assertEquals(4,rs.getInt(1));
+ assertEquals(2,rs.getInt(2));
+
+ assertFalse(rs.next());
+
+ // (groupby) groupby orderby
+ query = "SELECT t.c, count(*) FROM (SELECT count(*) c FROM aTable GROUP BY a_string) AS t GROUP BY t.c ORDER BY count(*) DESC";
+ statement = conn.prepareStatement(query);
+ rs = statement.executeQuery();
+ assertTrue (rs.next());
+ assertEquals(4,rs.getInt(1));
+ assertEquals(2,rs.getInt(2));
+ assertTrue (rs.next());
+ assertEquals(1,rs.getInt(1));
+ assertEquals(1,rs.getInt(2));
+
+ assertFalse(rs.next());
+
+ // (groupby a, b orderby b) groupby a orderby a
+ query = "SELECT t.a, COLLECTDISTINCT(t.b) FROM (SELECT b_string b, a_string a FROM aTable GROUP BY a_string, b_string ORDER BY b_string) AS t GROUP BY t.a ORDER BY t.a DESC";
+ statement = conn.prepareStatement(query);
+ rs = statement.executeQuery();
+ assertTrue (rs.next());
+ assertEquals(C_VALUE,rs.getString(1));
+ String[] b = new String[1];
+ b[0] = E_VALUE;
+ Array array = conn.createArrayOf("VARCHAR", b);
+ assertEquals(array,rs.getArray(2));
+ assertTrue (rs.next());
+ assertEquals(B_VALUE,rs.getString(1));
+ b = new String[3];
+ b[0] = B_VALUE;
+ b[1] = C_VALUE;
+ b[2] = E_VALUE;
+ array = conn.createArrayOf("VARCHAR", b);
+ assertEquals(array,rs.getArray(2));
+ assertTrue (rs.next());
+ assertEquals(A_VALUE,rs.getString(1));
+ assertEquals(array,rs.getArray(2));
+
+ assertFalse(rs.next());
+
+ rs = conn.createStatement().executeQuery("EXPLAIN " + query);
+ assertEquals(plans[0], QueryUtil.getExplainPlan(rs));
+
+ // distinct b (groupby b, a) groupby a
+ query = "SELECT DISTINCT COLLECTDISTINCT(t.b) FROM (SELECT b_string b, a_string a FROM aTable GROUP BY b_string, a_string) AS t GROUP BY t.a";
+ statement = conn.prepareStatement(query);
+ rs = statement.executeQuery();
+ assertTrue (rs.next());
+ b = new String[1];
+ b[0] = E_VALUE;
+ array = conn.createArrayOf("VARCHAR", b);
+ assertEquals(array,rs.getArray(1));
+ assertTrue (rs.next());
+ b = new String[3];
+ b[0] = B_VALUE;
+ b[1] = C_VALUE;
+ b[2] = E_VALUE;
+ array = conn.createArrayOf("VARCHAR", b);
+ assertEquals(array,rs.getArray(1));
+
+ assertFalse(rs.next());
+
+ rs = conn.createStatement().executeQuery("EXPLAIN " + query);
+ assertEquals(plans[1], QueryUtil.getExplainPlan(rs));
} finally {
conn.close();
}
@@ -321,13 +419,15 @@ public class DerivedTableIT extends BaseClientManagedTimeIT {
assertFalse(rs.next());
// (limit) orderby
- query = "SELECT t.eid FROM (SELECT entity_id eid, b_string b FROM aTable LIMIT 2) AS t ORDER BY t.b, t.eid";
- try {
- conn.createStatement().executeQuery(query);
- fail("Should have got SQLFeatureNotSupportedException");
- } catch (SQLFeatureNotSupportedException e) {
- assertEquals(MSG, e.getMessage());
- }
+ query = "SELECT t.eid FROM (SELECT entity_id eid, b_string b FROM aTable LIMIT 2) AS t ORDER BY t.b DESC, t.eid";
+ statement = conn.prepareStatement(query);
+ rs = statement.executeQuery();
+ assertTrue (rs.next());
+ assertEquals(ROW2,rs.getString(1));
+ assertTrue (rs.next());
+ assertEquals(ROW1,rs.getString(1));
+
+ assertFalse(rs.next());
} finally {
conn.close();
}
@@ -386,15 +486,16 @@ public class DerivedTableIT extends BaseClientManagedTimeIT {
// limit ? limit ?
query = "SELECT t.eid FROM (SELECT entity_id eid FROM aTable LIMIT ?) AS t LIMIT ?";
- try {
- statement = conn.prepareStatement(query);
- statement.setInt(1, 4);
- statement.setInt(2, 2);
- statement.executeQuery();
- fail("Should have got SQLFeatureNotSupportedException");
- } catch (SQLFeatureNotSupportedException e) {
- assertEquals(MSG, e.getMessage());
- }
+ statement = conn.prepareStatement(query);
+ statement.setInt(1, 4);
+ statement.setInt(2, 2);
+ rs = statement.executeQuery();
+ assertTrue (rs.next());
+ assertEquals(ROW1,rs.getString(1));
+ assertTrue (rs.next());
+ assertEquals(ROW2,rs.getString(1));
+
+ assertFalse(rs.next());
// (groupby orderby) limit
query = "SELECT a, s FROM (SELECT a_string a, sum(a_byte) s FROM aTable GROUP BY a_string ORDER BY sum(a_byte)) LIMIT 2";
@@ -466,30 +567,51 @@ public class DerivedTableIT extends BaseClientManagedTimeIT {
// distinct (distinct)
query = "SELECT DISTINCT t.a FROM (SELECT DISTINCT a_string a, b_string b FROM aTable) AS t";
- try {
- conn.createStatement().executeQuery(query);
- fail("Should have got SQLFeatureNotSupportedException");
- } catch (SQLFeatureNotSupportedException e) {
- assertEquals(MSG, e.getMessage());
- }
+ statement = conn.prepareStatement(query);
+ rs = statement.executeQuery();
+ assertTrue (rs.next());
+ assertEquals(A_VALUE,rs.getString(1));
+ assertTrue (rs.next());
+ assertEquals(B_VALUE,rs.getString(1));
+ assertTrue (rs.next());
+ assertEquals(C_VALUE,rs.getString(1));
+
+ assertFalse(rs.next());
// distinct (groupby)
query = "SELECT distinct t.c FROM (SELECT count(*) c FROM aTable GROUP BY a_string) AS t";
- try {
- conn.createStatement().executeQuery(query);
- fail("Should have got SQLFeatureNotSupportedException");
- } catch (SQLFeatureNotSupportedException e) {
- assertEquals(MSG, e.getMessage());
- }
+ statement = conn.prepareStatement(query);
+ rs = statement.executeQuery();
+ assertTrue (rs.next());
+ assertEquals(1,rs.getInt(1));
+ assertTrue (rs.next());
+ assertEquals(4,rs.getInt(1));
+
+ assertFalse(rs.next());
+
+ // distinct (groupby) orderby
+ query = "SELECT distinct t.c FROM (SELECT count(*) c FROM aTable GROUP BY a_string) AS t ORDER BY t.c DESC";
+ statement = conn.prepareStatement(query);
+ rs = statement.executeQuery();
+ assertTrue (rs.next());
+ assertEquals(4,rs.getInt(1));
+ assertTrue (rs.next());
+ assertEquals(1,rs.getInt(1));
+
+ assertFalse(rs.next());
// distinct (limit)
query = "SELECT DISTINCT t.a, t.b FROM (SELECT a_string a, b_string b FROM aTable LIMIT 2) AS t";
- try {
- conn.createStatement().executeQuery(query);
- fail("Should have got SQLFeatureNotSupportedException");
- } catch (SQLFeatureNotSupportedException e) {
- assertEquals(MSG, e.getMessage());
- }
+ statement = conn.prepareStatement(query);
+ rs = statement.executeQuery();
+ assertTrue (rs.next());
+ assertEquals(A_VALUE,rs.getString(1));
+ assertEquals(B_VALUE,rs.getString(2));
+ assertTrue (rs.next());
+ assertEquals(A_VALUE,rs.getString(1));
+ assertEquals(C_VALUE,rs.getString(2));
+
+ assertFalse(rs.next());
} finally {
conn.close();
}
@@ -522,30 +644,30 @@ public class DerivedTableIT extends BaseClientManagedTimeIT {
// count (distinct)
query = "SELECT count(*) FROM (SELECT DISTINCT a_string FROM aTable) AS t";
- try {
- conn.createStatement().executeQuery(query);
- fail("Should have got SQLFeatureNotSupportedException");
- } catch (SQLFeatureNotSupportedException e) {
- assertEquals(MSG, e.getMessage());
- }
+ statement = conn.prepareStatement(query);
+ rs = statement.executeQuery();
+ assertTrue (rs.next());
+ assertEquals(3,rs.getInt(1));
+
+ assertFalse(rs.next());
// count (groupby)
query = "SELECT count(*) FROM (SELECT count(*) c FROM aTable GROUP BY a_string) AS t";
- try {
- conn.createStatement().executeQuery(query);
- fail("Should have got SQLFeatureNotSupportedException");
- } catch (SQLFeatureNotSupportedException e) {
- assertEquals(MSG, e.getMessage());
- }
+ statement = conn.prepareStatement(query);
+ rs = statement.executeQuery();
+ assertTrue (rs.next());
+ assertEquals(3,rs.getInt(1));
+
+ assertFalse(rs.next());
// count (limit)
query = "SELECT count(*) FROM (SELECT entity_id FROM aTable LIMIT 2) AS t";
- try {
- conn.createStatement().executeQuery(query);
- fail("Should have got SQLFeatureNotSupportedException");
- } catch (SQLFeatureNotSupportedException e) {
- assertEquals(MSG, e.getMessage());
- }
+ statement = conn.prepareStatement(query);
+ rs = statement.executeQuery();
+ assertTrue (rs.next());
+ assertEquals(2,rs.getInt(1));
+
+ assertFalse(rs.next());
} finally {
conn.close();
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2bdc33bc/phoenix-core/src/it/java/org/apache/phoenix/end2end/SubqueryIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SubqueryIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SubqueryIT.java
index f0b8cc1..7c6c342 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SubqueryIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SubqueryIT.java
@@ -901,6 +901,18 @@ public class SubqueryIT extends BaseHBaseManagedTimeIT {
assertEquals(rs.getString(2), "T6");
assertFalse(rs.next());
+
+ query = "SELECT \"order_id\", name FROM " + JOIN_ORDER_TABLE_FULL_NAME + " o JOIN " + JOIN_ITEM_TABLE_FULL_NAME + " i ON o.\"item_id\" = i.\"item_id\" WHERE quantity != ANY(SELECT quantity FROM " + JOIN_ORDER_TABLE_FULL_NAME + " q WHERE o.\"item_id\" = q.\"item_id\" GROUP BY quantity)";
+ statement = conn.prepareStatement(query);
+ rs = statement.executeQuery();
+ assertTrue (rs.next());
+ assertEquals(rs.getString(1), "000000000000002");
+ assertEquals(rs.getString(2), "T6");
+ assertTrue (rs.next());
+ assertEquals(rs.getString(1), "000000000000004");
+ assertEquals(rs.getString(2), "T6");
+
+ assertFalse(rs.next());
} finally {
conn.close();
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2bdc33bc/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
index f290f75..bc997d3 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.phoenix.coprocessor.MetaDataProtocol;
import org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult;
+import org.apache.phoenix.expression.Expression;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.parse.AliasedNode;
import org.apache.phoenix.parse.BindTableNode;
@@ -174,6 +175,23 @@ public class FromCompiler {
SingleTableColumnResolver visitor = new SingleTableColumnResolver(connection, statement.getTable(), true);
return visitor;
}
+
+ public static ColumnResolver getResolverForCompiledDerivedTable(PhoenixConnection connection, TableRef tableRef, RowProjector projector)
+ throws SQLException {
+ List<PColumn> projectedColumns = new ArrayList<PColumn>();
+ List<Expression> sourceExpressions = new ArrayList<Expression>();
+ PTable table = tableRef.getTable();
+ for (PColumn column : table.getColumns()) {
+ Expression sourceExpression = projector.getColumnProjector(column.getPosition()).getExpression();
+ PColumnImpl projectedColumn = new PColumnImpl(column.getName(), column.getFamilyName(),
+ sourceExpression.getDataType(), sourceExpression.getMaxLength(), sourceExpression.getScale(), sourceExpression.isNullable(),
+ column.getPosition(), sourceExpression.getSortOrder(), column.getArraySize(), column.getViewConstant(), column.isViewReferenced());
+ projectedColumns.add(projectedColumn);
+ sourceExpressions.add(sourceExpression);
+ }
+ PTable t = PTableImpl.makePTable(table, projectedColumns);
+ return new SingleTableColumnResolver(connection, new TableRef(tableRef.getTableAlias(), t, tableRef.getLowerBoundTimeStamp(), tableRef.hasDynamicCols()));
+ }
public static ColumnResolver getResolverForMutation(DMLStatement statement, PhoenixConnection connection)
throws SQLException {
@@ -214,6 +232,12 @@ public class FromCompiler {
TableRef tableRef = createTableRef(tableNode, updateCacheImmediately);
tableRefs = ImmutableList.of(tableRef);
}
+
+ public SingleTableColumnResolver(PhoenixConnection connection, TableRef tableRef) {
+ super(connection, 0);
+ alias = tableRef.getTableAlias();
+ tableRefs = ImmutableList.of(tableRef);
+ }
@Override
public List<TableRef> getTables() {
@@ -365,8 +389,7 @@ public class FromCompiler {
}
}
- // TODO: unused, but should be used for joins - make private once used
- public static class MultiTableColumnResolver extends BaseColumnResolver implements TableNodeVisitor<Void> {
+ private static class MultiTableColumnResolver extends BaseColumnResolver implements TableNodeVisitor<Void> {
private final ListMultimap<String, TableRef> tableMap;
private final List<TableRef> tables;
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2bdc33bc/phoenix-core/src/main/java/org/apache/phoenix/compile/GroupByCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/GroupByCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/GroupByCompiler.java
index dda27aa..a561a47 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/GroupByCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/GroupByCompiler.java
@@ -30,6 +30,7 @@ import org.apache.phoenix.coprocessor.BaseScannerRegionObserver;
import org.apache.phoenix.coprocessor.GroupedAggregateRegionObserver;
import org.apache.phoenix.exception.SQLExceptionCode;
import org.apache.phoenix.exception.SQLExceptionInfo;
+import org.apache.phoenix.execute.TupleProjector;
import org.apache.phoenix.expression.CoerceExpression;
import org.apache.phoenix.expression.Expression;
import org.apache.phoenix.parse.AliasedNode;
@@ -135,7 +136,7 @@ public class GroupByCompiler {
* @throws ColumnNotFoundException if column name could not be resolved
* @throws AmbiguousColumnException if an unaliased column name is ambiguous across multiple tables
*/
- public static GroupBy compile(StatementContext context, SelectStatement statement) throws SQLException {
+ public static GroupBy compile(StatementContext context, SelectStatement statement, TupleProjector tupleProjector) throws SQLException {
List<ParseNode> groupByNodes = statement.getGroupBy();
/**
* Distinct can use an aggregate plan if there's no group by.
@@ -160,7 +161,7 @@ public class GroupByCompiler {
TrackOrderPreservingExpressionCompiler groupByVisitor =
new TrackOrderPreservingExpressionCompiler(context,
GroupBy.EMPTY_GROUP_BY, groupByNodes.size(),
- Ordering.UNORDERED);
+ Ordering.UNORDERED, tupleProjector);
for (ParseNode node : groupByNodes) {
Expression expression = node.accept(groupByVisitor);
if (groupByVisitor.isAggregate()) {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2bdc33bc/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
index 64e44b3..3899179 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
@@ -35,12 +35,12 @@ import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.phoenix.exception.SQLExceptionCode;
import org.apache.phoenix.exception.SQLExceptionInfo;
+import org.apache.phoenix.execute.TupleProjector;
import org.apache.phoenix.expression.AndExpression;
import org.apache.phoenix.expression.CoerceExpression;
import org.apache.phoenix.expression.Expression;
import org.apache.phoenix.expression.function.CountAggregateFunction;
import org.apache.phoenix.jdbc.PhoenixStatement;
-import org.apache.phoenix.join.TupleProjector;
import org.apache.phoenix.parse.AliasedNode;
import org.apache.phoenix.parse.AndParseNode;
import org.apache.phoenix.parse.BindTableNode;
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2bdc33bc/phoenix-core/src/main/java/org/apache/phoenix/compile/OrderByCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/OrderByCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/OrderByCompiler.java
index df9ce2c..d33d93a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/OrderByCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/OrderByCompiler.java
@@ -84,7 +84,7 @@ public class OrderByCompiler {
// accumulate columns in ORDER BY
TrackOrderPreservingExpressionCompiler visitor =
new TrackOrderPreservingExpressionCompiler(context, groupBy,
- orderByNodes.size(), Ordering.ORDERED);
+ orderByNodes.size(), Ordering.ORDERED, null);
LinkedHashSet<OrderByExpression> orderByExpressions = Sets.newLinkedHashSetWithExpectedSize(orderByNodes.size());
for (OrderByNode node : orderByNodes) {
boolean isAscending = node.isAscending();
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2bdc33bc/phoenix-core/src/main/java/org/apache/phoenix/compile/QueryCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/QueryCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/QueryCompiler.java
index d82ac02..214330c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/QueryCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/QueryCompiler.java
@@ -34,12 +34,16 @@ import org.apache.phoenix.compile.JoinCompiler.ProjectedPTableWrapper;
import org.apache.phoenix.compile.JoinCompiler.Table;
import org.apache.phoenix.compile.OrderByCompiler.OrderBy;
import org.apache.phoenix.execute.AggregatePlan;
+import org.apache.phoenix.execute.ClientAggregatePlan;
+import org.apache.phoenix.execute.ClientScanPlan;
import org.apache.phoenix.execute.HashJoinPlan;
import org.apache.phoenix.execute.HashJoinPlan.HashSubPlan;
import org.apache.phoenix.execute.HashJoinPlan.WhereClauseSubPlan;
import org.apache.phoenix.execute.ScanPlan;
import org.apache.phoenix.execute.TupleProjectionPlan;
+import org.apache.phoenix.execute.TupleProjector;
import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.expression.LiteralExpression;
import org.apache.phoenix.expression.RowValueConstructorExpression;
import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
import org.apache.phoenix.iterate.ParallelIterators.ParallelIteratorFactory;
@@ -47,7 +51,6 @@ import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
import org.apache.phoenix.jdbc.PhoenixStatement;
import org.apache.phoenix.join.HashJoinInfo;
-import org.apache.phoenix.join.TupleProjector;
import org.apache.phoenix.parse.HintNode.Hint;
import org.apache.phoenix.parse.JoinTableNode.JoinType;
import org.apache.phoenix.parse.ParseNode;
@@ -59,11 +62,11 @@ import org.apache.phoenix.schema.AmbiguousColumnException;
import org.apache.phoenix.schema.ColumnNotFoundException;
import org.apache.phoenix.schema.PDatum;
import org.apache.phoenix.schema.PTable;
-import org.apache.phoenix.schema.PTableType;
import org.apache.phoenix.schema.TableRef;
import org.apache.phoenix.util.ScanUtil;
import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
@@ -349,30 +352,49 @@ public class QueryCompiler {
}
protected QueryPlan compileSingleQuery(StatementContext context, SelectStatement select, List<Object> binds, boolean asSubquery, boolean allowPageFilter) throws SQLException{
+ SelectStatement innerSelect = select.getInnerSelectStatement();
+ if (innerSelect == null) {
+ return compileSingleFlatQuery(context, select, binds, asSubquery, allowPageFilter, null, null);
+ }
+
+ QueryPlan innerPlan = compileSubquery(innerSelect);
+ TupleProjector tupleProjector = new TupleProjector(innerPlan.getProjector());
+ innerPlan = new TupleProjectionPlan(innerPlan, tupleProjector, null);
+
+ // Replace the original resolver and table with those having compiled type info.
+ TableRef tableRef = context.getResolver().getTables().get(0);
+ ColumnResolver resolver = FromCompiler.getResolverForCompiledDerivedTable(statement.getConnection(), tableRef, innerPlan.getProjector());
+ context.setResolver(resolver);
+ tableRef = resolver.getTables().get(0);
+ context.setCurrentTable(tableRef);
+
+ return compileSingleFlatQuery(context, select, binds, asSubquery, allowPageFilter, innerPlan, innerPlan.getOrderBy().getOrderByExpressions().isEmpty() ? tupleProjector : null);
+ }
+
+ protected QueryPlan compileSingleFlatQuery(StatementContext context, SelectStatement select, List<Object> binds, boolean asSubquery, boolean allowPageFilter, QueryPlan innerPlan, TupleProjector innerPlanTupleProjector) throws SQLException{
PhoenixConnection connection = statement.getConnection();
ColumnResolver resolver = context.getResolver();
TableRef tableRef = context.getCurrentTable();
PTable table = tableRef.getTable();
- // TODO PHOENIX-944. See DerivedTableIT for a list of unsupported cases.
- if (table.getType() == PTableType.SUBQUERY)
- throw new SQLFeatureNotSupportedException("Complex nested queries not supported.");
-
ParseNode viewWhere = null;
if (table.getViewStatement() != null) {
viewWhere = new SQLParser(table.getViewStatement()).parseQuery().getWhere();
}
Integer limit = LimitCompiler.compile(context, select);
- GroupBy groupBy = GroupByCompiler.compile(context, select);
+ GroupBy groupBy = GroupByCompiler.compile(context, select, innerPlanTupleProjector);
// Optimize the HAVING clause by finding any group by expressions that can be moved
// to the WHERE clause
select = HavingCompiler.rewrite(context, select, groupBy);
Expression having = HavingCompiler.compile(context, select, groupBy);
// Don't pass groupBy when building where clause expression, because we do not want to wrap these
// expressions as group by key expressions since they're pre, not post filtered.
- context.setResolver(FromCompiler.getResolverForQuery(select, connection));
- Set<SubqueryParseNode> subqueries = WhereCompiler.compile(context, select, viewWhere);
+ if (innerPlan == null) {
+ context.setResolver(FromCompiler.getResolverForQuery(select, connection));
+ }
+ Set<SubqueryParseNode> subqueries = Sets.<SubqueryParseNode> newHashSet();
+ Expression where = WhereCompiler.compile(context, select, viewWhere, subqueries);
context.setResolver(resolver); // recover resolver
OrderBy orderBy = OrderByCompiler.compile(context, select, groupBy, limit);
RowProjector projector = ProjectionCompiler.compile(context, select, groupBy, asSubquery ? Collections.<PDatum>emptyList() : targetColumns);
@@ -386,10 +408,14 @@ public class QueryCompiler {
limit = maxRows;
}
}
- ParallelIteratorFactory parallelIteratorFactory = asSubquery ? null : this.parallelIteratorFactory;
- QueryPlan plan = select.isAggregate() || select.isDistinct() ?
- new AggregatePlan(context, select, tableRef, projector, limit, orderBy, parallelIteratorFactory, groupBy, having)
- : new ScanPlan(context, select, tableRef, projector, limit, orderBy, parallelIteratorFactory, allowPageFilter);
+
+ QueryPlan plan = innerPlan;
+ if (plan == null) {
+ ParallelIteratorFactory parallelIteratorFactory = asSubquery ? null : this.parallelIteratorFactory;
+ plan = select.isAggregate() || select.isDistinct() ?
+ new AggregatePlan(context, select, tableRef, projector, limit, orderBy, parallelIteratorFactory, groupBy, having)
+ : new ScanPlan(context, select, tableRef, projector, limit, orderBy, parallelIteratorFactory, allowPageFilter);
+ }
if (!subqueries.isEmpty()) {
int count = subqueries.size();
WhereClauseSubPlan[] subPlans = new WhereClauseSubPlan[count];
@@ -401,6 +427,16 @@ public class QueryCompiler {
plan = HashJoinPlan.create(select, plan, null, subPlans);
}
+ if (innerPlan != null) {
+ if (LiteralExpression.isTrue(where)) {
+ where = null; // we do not pass "true" as filter
+ }
+ plan = select.isAggregate() || select.isDistinct() ?
+ new ClientAggregatePlan(context, select, tableRef, projector, limit, where, orderBy, groupBy, having, plan)
+ : new ClientScanPlan(context, select, tableRef, projector, limit, where, orderBy, plan);
+
+ }
+
return plan;
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2bdc33bc/phoenix-core/src/main/java/org/apache/phoenix/compile/SubqueryRewriter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/SubqueryRewriter.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/SubqueryRewriter.java
index 4b37259..3e470ce 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/SubqueryRewriter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/SubqueryRewriter.java
@@ -238,7 +238,7 @@ public class SubqueryRewriter extends ParseNodeRewriter {
groupbyNodes.add(aliasedNode.getNode());
}
groupbyNodes.addAll(subquery.getGroupBy());
- subquery = NODE_FACTORY.select(subquery, selectNodes, where, groupbyNodes, true);
+ subquery = NODE_FACTORY.select(subquery, subquery.isDistinct(), selectNodes, where, groupbyNodes, true);
}
ParseNode onNode = conditionExtractor.getJoinCondition();
@@ -323,11 +323,11 @@ public class SubqueryRewriter extends ParseNodeRewriter {
}
if (derivedTableAlias == null) {
- subquery = NODE_FACTORY.select(subquery, selectNodes, where, groupbyNodes, true);
+ subquery = NODE_FACTORY.select(subquery, false, selectNodes, where, groupbyNodes, true);
} else {
List<ParseNode> derivedTableGroupBy = Lists.newArrayListWithExpectedSize(subquery.getGroupBy().size() + groupbyNodes.size());
- derivedTableGroupBy.addAll(subquery.getGroupBy());
derivedTableGroupBy.addAll(groupbyNodes);
+ derivedTableGroupBy.addAll(subquery.getGroupBy());
List<AliasedNode> derivedTableSelect = Lists.newArrayListWithExpectedSize(aliasedNodes.size() + selectNodes.size() - 1);
derivedTableSelect.addAll(aliasedNodes);
for (int i = 1; i < selectNodes.size(); i++) {
@@ -338,8 +338,8 @@ public class SubqueryRewriter extends ParseNodeRewriter {
selectNodes.set(i, aliasedNode);
groupbyNodes.set(i - 1, aliasedNode.getNode());
}
- SelectStatement derivedTableStmt = NODE_FACTORY.select(subquery, derivedTableSelect, where, derivedTableGroupBy, true);
- subquery = NODE_FACTORY.select(Collections.singletonList(NODE_FACTORY.derivedTable(derivedTableAlias, derivedTableStmt)), subquery.getHint(), false, selectNodes, null, groupbyNodes, null, Collections.<OrderByNode> emptyList(), null, subquery.getBindCount(), true, subquery.hasSequence());
+ SelectStatement derivedTableStmt = NODE_FACTORY.select(subquery, subquery.isDistinct(), derivedTableSelect, where, derivedTableGroupBy, true);
+ subquery = NODE_FACTORY.select(Collections.singletonList(NODE_FACTORY.derivedTable(derivedTableAlias, derivedTableStmt)), subquery.getHint(), false, selectNodes, null, groupbyNodes, null, Collections.<OrderByNode> emptyList(), null, subquery.getBindCount(), true, false);
}
ParseNode onNode = conditionExtractor.getJoinCondition();
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2bdc33bc/phoenix-core/src/main/java/org/apache/phoenix/compile/TrackOrderPreservingExpressionCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/TrackOrderPreservingExpressionCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/TrackOrderPreservingExpressionCompiler.java
index ebf117d..c17d3f0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/TrackOrderPreservingExpressionCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/TrackOrderPreservingExpressionCompiler.java
@@ -24,8 +24,10 @@ import java.util.Comparator;
import java.util.List;
import org.apache.phoenix.compile.GroupByCompiler.GroupBy;
+import org.apache.phoenix.execute.TupleProjector;
import org.apache.phoenix.expression.Expression;
import org.apache.phoenix.expression.LiteralExpression;
+import org.apache.phoenix.expression.RowKeyColumnExpression;
import org.apache.phoenix.expression.function.FunctionExpression;
import org.apache.phoenix.expression.function.FunctionExpression.OrderPreserving;
import org.apache.phoenix.parse.CaseParseNode;
@@ -35,8 +37,8 @@ import org.apache.phoenix.parse.MultiplyParseNode;
import org.apache.phoenix.parse.SubtractParseNode;
import org.apache.phoenix.schema.ColumnRef;
import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTableType;
import org.apache.phoenix.schema.SortOrder;
-import org.apache.phoenix.util.SchemaUtil;
import com.google.common.collect.Lists;
@@ -56,12 +58,13 @@ public class TrackOrderPreservingExpressionCompiler extends ExpressionCompiler {
private final List<Entry> entries;
private final Ordering ordering;
private final int positionOffset;
+ private final TupleProjector tupleProjector; // for derived-table query compilation
private OrderPreserving orderPreserving = OrderPreserving.YES;
private ColumnRef columnRef;
private boolean isOrderPreserving = true;
private Boolean isReverse;
- TrackOrderPreservingExpressionCompiler(StatementContext context, GroupBy groupBy, int expectedEntrySize, Ordering ordering) {
+ TrackOrderPreservingExpressionCompiler(StatementContext context, GroupBy groupBy, int expectedEntrySize, Ordering ordering, TupleProjector tupleProjector) {
super(context, groupBy);
PTable table = context.getResolver().getTables().get(0).getTable();
boolean isSalted = table.getBucketNum() != null;
@@ -71,6 +74,7 @@ public class TrackOrderPreservingExpressionCompiler extends ExpressionCompiler {
positionOffset = (isSalted ? 1 : 0) + (isMultiTenant ? 1 : 0) + (isSharedViewIndex ? 1 : 0);
entries = Lists.newArrayListWithExpectedSize(expectedEntrySize);
this.ordering = ordering;
+ this.tupleProjector = tupleProjector;
}
public Boolean isReverse() {
@@ -158,7 +162,7 @@ public class TrackOrderPreservingExpressionCompiler extends ExpressionCompiler {
ColumnRef ref = super.resolveColumn(node);
// If we encounter any non PK column, then we can't aggregate on-the-fly
// because the distinct groups have no correlation to the KV column value
- if (!SchemaUtil.isPKColumn(ref.getColumn())) {
+ if (getColumnPKPosition(ref) < 0) {
orderPreserving = OrderPreserving.NO;
}
@@ -172,6 +176,17 @@ public class TrackOrderPreservingExpressionCompiler extends ExpressionCompiler {
}
return ref;
}
+
+ private int getColumnPKPosition(ColumnRef ref) {
+ if (tupleProjector != null && ref.getTable().getType() == PTableType.SUBQUERY) {
+ Expression expression = tupleProjector.getExpressions()[ref.getColumnPosition()];
+ if (expression instanceof RowKeyColumnExpression) {
+ return ((RowKeyColumnExpression) expression).getPosition();
+ }
+ }
+
+ return ref.getPKSlotPosition();
+ }
public boolean addEntry(Expression expression) {
if (expression instanceof LiteralExpression) {
@@ -205,7 +220,7 @@ public class TrackOrderPreservingExpressionCompiler extends ExpressionCompiler {
return entries;
}
- public static class Entry {
+ public class Entry {
private final Expression expression;
private final ColumnRef columnRef;
private final OrderPreserving orderPreserving;
@@ -221,7 +236,7 @@ public class TrackOrderPreservingExpressionCompiler extends ExpressionCompiler {
}
public int getPkPosition() {
- return columnRef.getPKSlotPosition();
+ return getColumnPKPosition(columnRef);
}
public int getColumnPosition() {
@@ -232,4 +247,4 @@ public class TrackOrderPreservingExpressionCompiler extends ExpressionCompiler {
return orderPreserving;
}
}
-}
\ No newline at end of file
+}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2bdc33bc/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereCompiler.java
index b5f120f..1bf26f2 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereCompiler.java
@@ -76,8 +76,8 @@ public class WhereCompiler {
private WhereCompiler() {
}
- public static Set<SubqueryParseNode> compile(StatementContext context, FilterableStatement statement) throws SQLException {
- return compile(context, statement, null);
+ public static Expression compile(StatementContext context, FilterableStatement statement) throws SQLException {
+ return compile(context, statement, null, null);
}
/**
@@ -90,8 +90,8 @@ public class WhereCompiler {
* @throws ColumnNotFoundException if column name could not be resolved
* @throws AmbiguousColumnException if an unaliased column name is ambiguous across multiple tables
*/
- public static Set<SubqueryParseNode> compile(StatementContext context, FilterableStatement statement, ParseNode viewWhere) throws SQLException {
- return compile(context, statement, viewWhere, Collections.<Expression>emptyList(), false);
+ public static Expression compile(StatementContext context, FilterableStatement statement, ParseNode viewWhere, Set<SubqueryParseNode> subqueryNodes) throws SQLException {
+ return compile(context, statement, viewWhere, Collections.<Expression>emptyList(), false, subqueryNodes);
}
/**
@@ -104,18 +104,20 @@ public class WhereCompiler {
* @throws ColumnNotFoundException if column name could not be resolved
* @throws AmbiguousColumnException if an unaliased column name is ambiguous across multiple tables
*/
- public static Set<SubqueryParseNode> compile(StatementContext context, FilterableStatement statement, ParseNode viewWhere, List<Expression> dynamicFilters, boolean hashJoinOptimization) throws SQLException {
+ public static Expression compile(StatementContext context, FilterableStatement statement, ParseNode viewWhere, List<Expression> dynamicFilters, boolean hashJoinOptimization, Set<SubqueryParseNode> subqueryNodes) throws SQLException {
ParseNode where = statement.getWhere();
- Set<SubqueryParseNode> subqueryNodes = Sets.<SubqueryParseNode> newHashSet();
- SubqueryParseNodeVisitor subqueryVisitor = new SubqueryParseNodeVisitor(context, subqueryNodes);
- if (where != null) {
- where.accept(subqueryVisitor);
- }
- if (viewWhere != null) {
- viewWhere.accept(subqueryVisitor);
+ if (subqueryNodes != null) { // if the subqueryNodes passed in is null, we assume there will be no sub-queries in the WHERE clause.
+ SubqueryParseNodeVisitor subqueryVisitor = new SubqueryParseNodeVisitor(context, subqueryNodes);
+ if (where != null) {
+ where.accept(subqueryVisitor);
+ }
+ if (viewWhere != null) {
+ viewWhere.accept(subqueryVisitor);
+ }
+ if (!subqueryNodes.isEmpty()) {
+ return null;
+ }
}
- if (!subqueryNodes.isEmpty())
- return subqueryNodes;
Set<Expression> extractedNodes = Sets.<Expression>newHashSet();
WhereExpressionCompiler whereCompiler = new WhereExpressionCompiler(context);
@@ -140,7 +142,7 @@ public class WhereCompiler {
expression = WhereOptimizer.pushKeyExpressionsToScan(context, statement, expression, extractedNodes);
setScanFilter(context, statement, expression, whereCompiler.disambiguateWithFamily, hashJoinOptimization);
- return subqueryNodes;
+ return expression;
}
private static class WhereExpressionCompiler extends ExpressionCompiler {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2bdc33bc/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
index 3654d03..00870f0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
@@ -52,13 +52,13 @@ import org.apache.hadoop.io.WritableUtils;
import org.apache.phoenix.cache.GlobalCache;
import org.apache.phoenix.cache.TenantCache;
import org.apache.phoenix.cache.aggcache.SpillableGroupByCache;
+import org.apache.phoenix.execute.TupleProjector;
import org.apache.phoenix.expression.Expression;
import org.apache.phoenix.expression.ExpressionType;
import org.apache.phoenix.expression.aggregator.Aggregator;
import org.apache.phoenix.expression.aggregator.ServerAggregators;
import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
import org.apache.phoenix.join.HashJoinInfo;
-import org.apache.phoenix.join.TupleProjector;
import org.apache.phoenix.memory.MemoryManager.MemoryChunk;
import org.apache.phoenix.query.QueryConstants;
import org.apache.phoenix.schema.PDataType;
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2bdc33bc/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/HashJoinRegionScanner.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/HashJoinRegionScanner.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/HashJoinRegionScanner.java
index 4caefe7..1aecb7a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/HashJoinRegionScanner.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/HashJoinRegionScanner.java
@@ -34,11 +34,11 @@ import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.cache.GlobalCache;
import org.apache.phoenix.cache.HashCache;
import org.apache.phoenix.cache.TenantCache;
+import org.apache.phoenix.execute.TupleProjector;
+import org.apache.phoenix.execute.TupleProjector.ProjectedValueTuple;
import org.apache.phoenix.expression.Expression;
import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
import org.apache.phoenix.join.HashJoinInfo;
-import org.apache.phoenix.join.TupleProjector;
-import org.apache.phoenix.join.TupleProjector.ProjectedValueTuple;
import org.apache.phoenix.parse.JoinTableNode.JoinType;
import org.apache.phoenix.schema.IllegalDataException;
import org.apache.phoenix.schema.KeyValueSchema;
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2bdc33bc/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
index 4389426..8aced7b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.WritableUtils;
import org.apache.phoenix.cache.GlobalCache;
import org.apache.phoenix.cache.TenantCache;
+import org.apache.phoenix.execute.TupleProjector;
import org.apache.phoenix.expression.Expression;
import org.apache.phoenix.expression.KeyValueColumnExpression;
import org.apache.phoenix.expression.OrderByExpression;
@@ -50,7 +51,6 @@ import org.apache.phoenix.iterate.OrderedResultIterator;
import org.apache.phoenix.iterate.RegionScannerResultIterator;
import org.apache.phoenix.iterate.ResultIterator;
import org.apache.phoenix.join.HashJoinInfo;
-import org.apache.phoenix.join.TupleProjector;
import org.apache.phoenix.memory.MemoryManager.MemoryChunk;
import org.apache.phoenix.query.QueryConstants;
import org.apache.phoenix.schema.KeyValueSchema;
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2bdc33bc/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index 3966953..556d69d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -56,6 +56,7 @@ import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.io.WritableUtils;
import org.apache.phoenix.exception.ValueTypeIncompatibleException;
+import org.apache.phoenix.execute.TupleProjector;
import org.apache.phoenix.expression.Expression;
import org.apache.phoenix.expression.ExpressionType;
import org.apache.phoenix.expression.aggregator.Aggregator;
@@ -66,7 +67,6 @@ import org.apache.phoenix.hbase.index.util.KeyValueBuilder;
import org.apache.phoenix.index.PhoenixIndexCodec;
import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
import org.apache.phoenix.join.HashJoinInfo;
-import org.apache.phoenix.join.TupleProjector;
import org.apache.phoenix.query.QueryConstants;
import org.apache.phoenix.query.QueryServicesOptions;
import org.apache.phoenix.schema.ConstraintViolationException;
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2bdc33bc/phoenix-core/src/main/java/org/apache/phoenix/execute/ClientAggregatePlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/ClientAggregatePlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/ClientAggregatePlan.java
new file mode 100644
index 0000000..8cd5a40
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/ClientAggregatePlan.java
@@ -0,0 +1,230 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.execute;
+
+import static org.apache.phoenix.query.QueryConstants.*;
+
+import java.io.IOException;
+import java.sql.SQLException;
+import java.util.Collections;
+import java.util.List;
+
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.phoenix.compile.ExplainPlan;
+import org.apache.phoenix.compile.GroupByCompiler.GroupBy;
+import org.apache.phoenix.compile.OrderByCompiler.OrderBy;
+import org.apache.phoenix.compile.QueryPlan;
+import org.apache.phoenix.compile.RowProjector;
+import org.apache.phoenix.compile.StatementContext;
+import org.apache.phoenix.coprocessor.BaseScannerRegionObserver;
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.expression.OrderByExpression;
+import org.apache.phoenix.expression.aggregator.Aggregators;
+import org.apache.phoenix.expression.aggregator.ServerAggregators;
+import org.apache.phoenix.iterate.AggregatingResultIterator;
+import org.apache.phoenix.iterate.BaseGroupedAggregatingResultIterator;
+import org.apache.phoenix.iterate.DistinctAggregatingResultIterator;
+import org.apache.phoenix.iterate.FilterAggregatingResultIterator;
+import org.apache.phoenix.iterate.FilterResultIterator;
+import org.apache.phoenix.iterate.GroupedAggregatingResultIterator;
+import org.apache.phoenix.iterate.LimitingResultIterator;
+import org.apache.phoenix.iterate.LookAheadResultIterator;
+import org.apache.phoenix.iterate.OrderedAggregatingResultIterator;
+import org.apache.phoenix.iterate.OrderedResultIterator;
+import org.apache.phoenix.iterate.PeekingResultIterator;
+import org.apache.phoenix.iterate.ResultIterator;
+import org.apache.phoenix.iterate.SequenceResultIterator;
+import org.apache.phoenix.iterate.UngroupedAggregatingResultIterator;
+import org.apache.phoenix.parse.FilterableStatement;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.query.QueryServicesOptions;
+import org.apache.phoenix.schema.TableRef;
+import org.apache.phoenix.schema.tuple.MultiKeyValueTuple;
+import org.apache.phoenix.schema.tuple.Tuple;
+import org.apache.phoenix.util.TupleUtil;
+
+import com.google.common.collect.Lists;
+
+public class ClientAggregatePlan extends ClientProcessingPlan {
+ private final GroupBy groupBy;
+ private final Expression having;
+ private final Aggregators serverAggregators;
+ private final Aggregators clientAggregators;
+
+ public ClientAggregatePlan(StatementContext context, FilterableStatement statement, TableRef table, RowProjector projector,
+ Integer limit, Expression where, OrderBy orderBy, GroupBy groupBy, Expression having, QueryPlan delegate) {
+ super(context, statement, table, projector, limit, where, orderBy, delegate);
+ this.groupBy = groupBy;
+ this.having = having;
+ this.serverAggregators =
+ ServerAggregators.deserialize(context.getScan()
+ .getAttribute(BaseScannerRegionObserver.AGGREGATORS), QueryServicesOptions.withDefaults().getConfiguration());
+ this.clientAggregators = context.getAggregationManager().getAggregators();
+ }
+
+ @Override
+ public ResultIterator iterator() throws SQLException {
+ ResultIterator iterator = delegate.iterator();
+ if (where != null) {
+ iterator = new FilterResultIterator(iterator, where);
+ }
+
+ AggregatingResultIterator aggResultIterator;
+ if (groupBy.isEmpty()) {
+ aggResultIterator = new ClientUngroupedAggregatingResultIterator(LookAheadResultIterator.wrap(iterator), serverAggregators);
+ aggResultIterator = new UngroupedAggregatingResultIterator(LookAheadResultIterator.wrap(aggResultIterator), clientAggregators);
+ } else {
+ if (!groupBy.isOrderPreserving()) {
+ int thresholdBytes = context.getConnection().getQueryServices().getProps().getInt(
+ QueryServices.SPOOL_THRESHOLD_BYTES_ATTRIB, QueryServicesOptions.DEFAULT_SPOOL_THRESHOLD_BYTES);
+ List<Expression> keyExpressions = groupBy.getKeyExpressions();
+ List<OrderByExpression> keyExpressionOrderBy = Lists.newArrayListWithExpectedSize(keyExpressions.size());
+ for (Expression keyExpression : keyExpressions) {
+ keyExpressionOrderBy.add(new OrderByExpression(keyExpression, false, true));
+ }
+ iterator = new OrderedResultIterator(iterator, keyExpressionOrderBy, thresholdBytes, limit, projector.getEstimatedRowByteSize());
+ }
+ aggResultIterator = new ClientGroupedAggregatingResultIterator(LookAheadResultIterator.wrap(iterator), serverAggregators, groupBy.getExpressions());
+ aggResultIterator = new GroupedAggregatingResultIterator(LookAheadResultIterator.wrap(aggResultIterator), clientAggregators);
+ }
+
+ if (having != null) {
+ aggResultIterator = new FilterAggregatingResultIterator(aggResultIterator, having);
+ }
+
+ if (statement.isDistinct() && statement.isAggregate()) { // Dedup on client if select distinct and aggregation
+ aggResultIterator = new DistinctAggregatingResultIterator(aggResultIterator, getProjector());
+ }
+
+ ResultIterator resultScanner = aggResultIterator;
+ if (orderBy.getOrderByExpressions().isEmpty()) {
+ if (limit != null) {
+ resultScanner = new LimitingResultIterator(aggResultIterator, limit);
+ }
+ } else {
+ int thresholdBytes = context.getConnection().getQueryServices().getProps().getInt(
+ QueryServices.SPOOL_THRESHOLD_BYTES_ATTRIB, QueryServicesOptions.DEFAULT_SPOOL_THRESHOLD_BYTES);
+ resultScanner = new OrderedAggregatingResultIterator(aggResultIterator, orderBy.getOrderByExpressions(), thresholdBytes, limit);
+ }
+ if (context.getSequenceManager().getSequenceCount() > 0) {
+ resultScanner = new SequenceResultIterator(resultScanner, context.getSequenceManager());
+ }
+
+ return resultScanner;
+ }
+
+ @Override
+ public ExplainPlan getExplainPlan() throws SQLException {
+ List<String> planSteps = Lists.newArrayList(delegate.getExplainPlan().getPlanSteps());
+ if (where != null) {
+ planSteps.add("CLIENT FILTER BY " + where.toString());
+ }
+ if (!groupBy.isEmpty()) {
+ if (!groupBy.isOrderPreserving()) {
+ planSteps.add("CLIENT SORTED BY " + groupBy.getKeyExpressions().toString());
+ }
+ planSteps.add("CLIENT AGGREGATE INTO DISTINCT ROWS BY " + groupBy.getExpressions().toString());
+ } else {
+ planSteps.add("CLIENT AGGREGATE INTO SINGLE ROW");
+ }
+ if (having != null) {
+ planSteps.add("CLIENT AFTER-AGGREGATION FILTER BY " + having.toString());
+ }
+ if (statement.isDistinct() && statement.isAggregate()) {
+ planSteps.add("CLIENT DISTINCT ON " + projector.toString());
+ }
+ if (orderBy.getOrderByExpressions().isEmpty()) {
+ if (limit != null) {
+ planSteps.add("CLIENT " + limit + " ROW LIMIT");
+ }
+ } else {
+ planSteps.add("CLIENT" + (limit == null ? "" : " TOP " + limit + " ROW" + (limit == 1 ? "" : "S")) + " SORTED BY " + orderBy.getOrderByExpressions().toString());
+ }
+ if (context.getSequenceManager().getSequenceCount() > 0) {
+ int nSequences = context.getSequenceManager().getSequenceCount();
+ planSteps.add("CLIENT RESERVE VALUES FROM " + nSequences + " SEQUENCE" + (nSequences == 1 ? "" : "S"));
+ }
+
+ return new ExplainPlan(planSteps);
+ }
+
+ @Override
+ public GroupBy getGroupBy() {
+ return groupBy;
+ }
+
+ private static class ClientGroupedAggregatingResultIterator extends BaseGroupedAggregatingResultIterator {
+ private final List<Expression> groupByExpressions;
+
+ public ClientGroupedAggregatingResultIterator(PeekingResultIterator iterator, Aggregators aggregators, List<Expression> groupByExpressions) {
+ super(iterator, aggregators);
+ this.groupByExpressions = groupByExpressions;
+ }
+
+ @Override
+ protected ImmutableBytesWritable getGroupingKey(Tuple tuple,
+ ImmutableBytesWritable ptr) throws SQLException {
+ try {
+ ImmutableBytesWritable key = TupleUtil.getConcatenatedValue(tuple, groupByExpressions);
+ ptr.set(key.get(), key.getOffset(), key.getLength());
+ return ptr;
+ } catch (IOException e) {
+ throw new SQLException(e);
+ }
+ }
+
+ @Override
+ protected Tuple wrapKeyValueAsResult(KeyValue keyValue) {
+ return new MultiKeyValueTuple(Collections.<KeyValue> singletonList(keyValue));
+ }
+
+ @Override
+ public String toString() {
+ return "ClientGroupedAggregatingResultIterator [resultIterator="
+ + resultIterator + ", aggregators=" + aggregators + ", groupByExpressions="
+ + groupByExpressions + "]";
+ }
+ }
+
+ private static class ClientUngroupedAggregatingResultIterator extends BaseGroupedAggregatingResultIterator {
+
+ public ClientUngroupedAggregatingResultIterator(PeekingResultIterator iterator, Aggregators aggregators) {
+ super(iterator, aggregators);
+ }
+
+ @Override
+ protected ImmutableBytesWritable getGroupingKey(Tuple tuple,
+ ImmutableBytesWritable ptr) throws SQLException {
+ ptr.set(UNGROUPED_AGG_ROW_KEY);
+ return ptr;
+ }
+
+ @Override
+ protected Tuple wrapKeyValueAsResult(KeyValue keyValue)
+ throws SQLException {
+ return new MultiKeyValueTuple(Collections.<KeyValue> singletonList(keyValue));
+ }
+
+ @Override
+ public String toString() {
+ return "ClientUngroupedAggregatingResultIterator [resultIterator="
+ + resultIterator + ", aggregators=" + aggregators + "]";
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2bdc33bc/phoenix-core/src/main/java/org/apache/phoenix/execute/ClientProcessingPlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/ClientProcessingPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/ClientProcessingPlan.java
new file mode 100644
index 0000000..8e787b4
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/ClientProcessingPlan.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.execute;
+
+import org.apache.phoenix.compile.QueryPlan;
+import org.apache.phoenix.compile.RowProjector;
+import org.apache.phoenix.compile.StatementContext;
+import org.apache.phoenix.compile.OrderByCompiler.OrderBy;
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.parse.FilterableStatement;
+import org.apache.phoenix.schema.TableRef;
+
+/**
+ * Query plan that does where, order-by limit at client side, which is
+ * for derived-table queries that cannot be flattened by SubselectRewriter.
+ */
+public abstract class ClientProcessingPlan extends DelegateQueryPlan {
+ protected final StatementContext context;
+ protected final FilterableStatement statement;
+ protected final TableRef table;
+ protected final RowProjector projector;
+ protected final Integer limit;
+ protected final Expression where;
+ protected final OrderBy orderBy;
+
+ public ClientProcessingPlan(StatementContext context, FilterableStatement statement, TableRef table,
+ RowProjector projector, Integer limit, Expression where, OrderBy orderBy, QueryPlan delegate) {
+ super(delegate);
+ this.context = context;
+ this.statement = statement;
+ this.table = table;
+ this.projector = projector;
+ this.limit = limit;
+ this.where = where;
+ this.orderBy = orderBy;
+ }
+
+ @Override
+ public StatementContext getContext() {
+ return context;
+ }
+
+ @Override
+ public TableRef getTableRef() {
+ return table;
+ }
+
+ @Override
+ public RowProjector getProjector() {
+ return projector;
+ }
+
+ @Override
+ public Integer getLimit() {
+ return limit;
+ }
+
+ @Override
+ public OrderBy getOrderBy() {
+ return orderBy;
+ }
+
+ @Override
+ public FilterableStatement getStatement() {
+ return statement;
+ }
+}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2bdc33bc/phoenix-core/src/main/java/org/apache/phoenix/execute/ClientScanPlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/ClientScanPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/ClientScanPlan.java
new file mode 100644
index 0000000..01fbd11
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/ClientScanPlan.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.execute;
+
+import java.sql.SQLException;
+import java.util.List;
+
+import org.apache.phoenix.compile.ExplainPlan;
+import org.apache.phoenix.compile.OrderByCompiler.OrderBy;
+import org.apache.phoenix.compile.QueryPlan;
+import org.apache.phoenix.compile.RowProjector;
+import org.apache.phoenix.compile.StatementContext;
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.iterate.FilterResultIterator;
+import org.apache.phoenix.iterate.LimitingResultIterator;
+import org.apache.phoenix.iterate.OrderedResultIterator;
+import org.apache.phoenix.iterate.ResultIterator;
+import org.apache.phoenix.iterate.SequenceResultIterator;
+import org.apache.phoenix.parse.FilterableStatement;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.query.QueryServicesOptions;
+import org.apache.phoenix.schema.TableRef;
+
+import com.google.common.collect.Lists;
+
+public class ClientScanPlan extends ClientProcessingPlan {
+
+ public ClientScanPlan(StatementContext context,
+ FilterableStatement statement, TableRef table,
+ RowProjector projector, Integer limit, Expression where,
+ OrderBy orderBy, QueryPlan delegate) {
+ super(context, statement, table, projector, limit, where, orderBy,
+ delegate);
+ }
+
+ @Override
+ public ResultIterator iterator() throws SQLException {
+ ResultIterator iterator = delegate.iterator();
+ if (where != null) {
+ iterator = new FilterResultIterator(iterator, where);
+ }
+
+ if (!orderBy.getOrderByExpressions().isEmpty()) { // TopN
+ int thresholdBytes = context.getConnection().getQueryServices().getProps().getInt(
+ QueryServices.SPOOL_THRESHOLD_BYTES_ATTRIB, QueryServicesOptions.DEFAULT_SPOOL_THRESHOLD_BYTES);
+ iterator = new OrderedResultIterator(iterator, orderBy.getOrderByExpressions(), thresholdBytes, limit, projector.getEstimatedRowByteSize());
+ } else if (limit != null) {
+ iterator = new LimitingResultIterator(iterator, limit);
+ }
+
+ if (context.getSequenceManager().getSequenceCount() > 0) {
+ iterator = new SequenceResultIterator(iterator, context.getSequenceManager());
+ }
+
+ return iterator;
+ }
+
+ @Override
+ public ExplainPlan getExplainPlan() throws SQLException {
+ List<String> planSteps = Lists.newArrayList(delegate.getExplainPlan().getPlanSteps());
+ if (where != null) {
+ planSteps.add("CLIENT FILTER BY " + where.toString());
+ }
+ if (!orderBy.getOrderByExpressions().isEmpty()) {
+ planSteps.add("CLIENT" + (limit == null ? "" : " TOP " + limit + " ROW" + (limit == 1 ? "" : "S")) + " SORTED BY " + orderBy.getOrderByExpressions().toString());
+ } else if (limit != null) {
+ planSteps.add("CLIENT " + limit + " ROW LIMIT");
+ }
+ if (context.getSequenceManager().getSequenceCount() > 0) {
+ int nSequences = context.getSequenceManager().getSequenceCount();
+ planSteps.add("CLIENT RESERVE VALUES FROM " + nSequences + " SEQUENCE" + (nSequences == 1 ? "" : "S"));
+ }
+
+ return new ExplainPlan(planSteps);
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2bdc33bc/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java
index 866e506..bc689ca 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java
@@ -49,6 +49,7 @@ import org.apache.phoenix.expression.Expression;
import org.apache.phoenix.expression.InListExpression;
import org.apache.phoenix.expression.LiteralExpression;
import org.apache.phoenix.expression.RowValueConstructorExpression;
+import org.apache.phoenix.iterate.FilterResultIterator;
import org.apache.phoenix.iterate.ResultIterator;
import org.apache.phoenix.job.JobManager.JobCallable;
import org.apache.phoenix.join.HashCacheClient;
@@ -73,7 +74,7 @@ import com.google.common.collect.Lists;
public class HashJoinPlan extends DelegateQueryPlan {
private static final Log LOG = LogFactory.getLog(HashJoinPlan.class);
- private final FilterableStatement statement;
+ private final SelectStatement statement;
private final HashJoinInfo joinInfo;
private final SubPlan[] subPlans;
private final boolean recompileWhereClause;
@@ -85,14 +86,13 @@ public class HashJoinPlan extends DelegateQueryPlan {
private AtomicLong firstJobEndTime;
private List<Expression> keyRangeExpressions;
- public static HashJoinPlan create(FilterableStatement statement,
+ public static HashJoinPlan create(SelectStatement statement,
QueryPlan plan, HashJoinInfo joinInfo, SubPlan[] subPlans) {
- if (plan instanceof BaseQueryPlan)
+ if (!(plan instanceof HashJoinPlan))
return new HashJoinPlan(statement, plan, joinInfo, subPlans, joinInfo == null);
- assert (plan instanceof HashJoinPlan);
HashJoinPlan hashJoinPlan = (HashJoinPlan) plan;
- assert hashJoinPlan.joinInfo == null;
+ assert (hashJoinPlan.joinInfo == null && hashJoinPlan.delegate instanceof BaseQueryPlan);
SubPlan[] mergedSubPlans = new SubPlan[hashJoinPlan.subPlans.length + subPlans.length];
int i = 0;
for (SubPlan subPlan : hashJoinPlan.subPlans) {
@@ -104,7 +104,7 @@ public class HashJoinPlan extends DelegateQueryPlan {
return new HashJoinPlan(statement, hashJoinPlan.delegate, joinInfo, mergedSubPlans, true);
}
- private HashJoinPlan(FilterableStatement statement,
+ private HashJoinPlan(SelectStatement statement,
QueryPlan plan, HashJoinInfo joinInfo, SubPlan[] subPlans, boolean recompileWhereClause) {
super(plan);
this.statement = statement;
@@ -166,6 +166,7 @@ public class HashJoinPlan extends DelegateQueryPlan {
throw firstException;
}
+ Expression postFilter = null;
boolean hasKeyRangeExpressions = keyRangeExpressions != null && !keyRangeExpressions.isEmpty();
if (recompileWhereClause || hasKeyRangeExpressions) {
StatementContext context = delegate.getContext();
@@ -173,10 +174,10 @@ public class HashJoinPlan extends DelegateQueryPlan {
ParseNode viewWhere = table.getViewStatement() == null ? null : new SQLParser(table.getViewStatement()).parseQuery().getWhere();
context.setResolver(FromCompiler.getResolverForQuery((SelectStatement) (delegate.getStatement()), delegate.getContext().getConnection()));
if (recompileWhereClause) {
- WhereCompiler.compile(delegate.getContext(), delegate.getStatement(), viewWhere);
+ postFilter = WhereCompiler.compile(delegate.getContext(), delegate.getStatement(), viewWhere, null);
}
if (hasKeyRangeExpressions) {
- WhereCompiler.compile(delegate.getContext(), delegate.getStatement(), viewWhere, keyRangeExpressions, true);
+ WhereCompiler.compile(delegate.getContext(), delegate.getStatement(), viewWhere, keyRangeExpressions, true, null);
}
}
@@ -185,7 +186,12 @@ public class HashJoinPlan extends DelegateQueryPlan {
HashJoinInfo.serializeHashJoinIntoScan(scan, joinInfo);
}
- return ((BaseQueryPlan) delegate).iterator(dependencies);
+ ResultIterator iterator = joinInfo == null ? delegate.iterator() : ((BaseQueryPlan) delegate).iterator(dependencies);
+ if (statement.getInnerSelectStatement() != null && postFilter != null) {
+ iterator = new FilterResultIterator(iterator, postFilter);
+ }
+
+ return iterator;
}
private Expression createKeyRangeExpression(Expression lhsExpression,
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2bdc33bc/phoenix-core/src/main/java/org/apache/phoenix/execute/TupleProjectionPlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/TupleProjectionPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/TupleProjectionPlan.java
index 410d386..c9cbd15 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/TupleProjectionPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/TupleProjectionPlan.java
@@ -20,14 +20,12 @@ package org.apache.phoenix.execute;
import java.sql.SQLException;
import java.util.List;
-import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.phoenix.compile.ExplainPlan;
import org.apache.phoenix.compile.QueryPlan;
import org.apache.phoenix.expression.Expression;
import org.apache.phoenix.iterate.DelegateResultIterator;
+import org.apache.phoenix.iterate.FilterResultIterator;
import org.apache.phoenix.iterate.ResultIterator;
-import org.apache.phoenix.join.TupleProjector;
-import org.apache.phoenix.schema.IllegalDataException;
import org.apache.phoenix.schema.tuple.Tuple;
import com.google.common.collect.Lists;
@@ -49,52 +47,33 @@ public class TupleProjectionPlan extends DelegateQueryPlan {
if (postFilter != null) {
planSteps.add("CLIENT FILTER BY " + postFilter.toString());
}
-
+
return new ExplainPlan(planSteps);
}
@Override
public ResultIterator iterator() throws SQLException {
- final ImmutableBytesWritable tempPtr = new ImmutableBytesWritable();
-
- return new DelegateResultIterator(delegate.iterator()) {
+ ResultIterator iterator = new DelegateResultIterator(delegate.iterator()) {
@Override
public Tuple next() throws SQLException {
- Tuple tuple = null;
- while (tuple == null) {
- tuple = super.next();
- if (tuple == null) {
- break;
- }
-
- tuple = tupleProjector.projectResults(tuple);
-
- if (postFilter != null) {
- postFilter.reset();
- try {
- if (postFilter.evaluate(tuple, tempPtr)) {
- Boolean b = (Boolean)postFilter.getDataType().toObject(tempPtr);
- if (!b.booleanValue()) {
- tuple = null;
- }
- } else {
- tuple = null;
- }
- } catch (IllegalDataException e) {
- tuple = null;
- }
- }
- }
+ Tuple tuple = super.next();
+ if (tuple == null)
+ return null;
- return tuple;
+ return tupleProjector.projectResults(tuple);
}
@Override
public String toString() {
- return "TupleProjectionResultIterator [projector=" + tupleProjector + ", postFilter="
- + postFilter + "]";
+ return "TupleProjectionResultIterator [projector=" + tupleProjector + "]";
}
};
+
+ if (postFilter != null) {
+ iterator = new FilterResultIterator(iterator, postFilter);
+ }
+
+ return iterator;
}
}
[08/15] git commit: PHOENIX-1382: Phoenix 4.2 RC Issue
Posted by ja...@apache.org.
PHOENIX-1382: Phoenix 4.2 RC Issue
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/ca6c08f0
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/ca6c08f0
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/ca6c08f0
Branch: refs/heads/3.2
Commit: ca6c08f044626386c6a988906617dfc71310d390
Parents: 0d90e2f
Author: Jeffrey Zhong <je...@apache.org>
Authored: Sun Oct 26 22:07:13 2014 -0700
Committer: Jeffrey Zhong <je...@apache.org>
Committed: Sun Oct 26 22:17:16 2014 -0700
----------------------------------------------------------------------
bin/phoenix_utils.py | 2 +-
.../main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/ca6c08f0/bin/phoenix_utils.py
----------------------------------------------------------------------
diff --git a/bin/phoenix_utils.py b/bin/phoenix_utils.py
index 4f7d9c3..2331ae9 100755
--- a/bin/phoenix_utils.py
+++ b/bin/phoenix_utils.py
@@ -49,7 +49,7 @@ def findFileInPathWithoutRecursion(pattern, path):
return ""
def setPath():
- PHOENIX_CLIENT_JAR_PATTERN = "phoenix-*-client*.jar"
+ PHOENIX_CLIENT_JAR_PATTERN = "phoenix-*-client.jar"
PHOENIX_TESTS_JAR_PATTERN = "phoenix-*-tests*.jar"
global current_dir
current_dir = os.path.dirname(os.path.abspath(__file__))
http://git-wip-us.apache.org/repos/asf/phoenix/blob/ca6c08f0/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
index 3944d9e..7979757 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
@@ -56,7 +56,7 @@ import com.google.common.collect.Lists;
*/
public interface MetaDataProtocol extends CoprocessorProtocol {
public static final int PHOENIX_MAJOR_VERSION = 3;
- public static final int PHOENIX_MINOR_VERSION = 1;
+ public static final int PHOENIX_MINOR_VERSION = 2;
public static final int PHOENIX_PATCH_NUMBER = 0;
public static final int PHOENIX_VERSION =
VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, PHOENIX_MINOR_VERSION, PHOENIX_PATCH_NUMBER);
[11/15] git commit: PHOENIX-1376 java.lang.NullPointerException
occurs in JDBC driver
Posted by ja...@apache.org.
PHOENIX-1376 java.lang.NullPointerException occurs in JDBC driver
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/72144f17
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/72144f17
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/72144f17
Branch: refs/heads/3.2
Commit: 72144f17884518b1152b83d294b77f10c2a743a5
Parents: 2681601
Author: James Taylor <jt...@salesforce.com>
Authored: Mon Oct 27 13:40:47 2014 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Mon Oct 27 13:45:38 2014 -0700
----------------------------------------------------------------------
.../src/main/java/org/apache/phoenix/jdbc/PhoenixResultSet.java | 5 -----
.../main/java/org/apache/phoenix/schema/tuple/ResultTuple.java | 2 +-
2 files changed, 1 insertion(+), 6 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/72144f17/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixResultSet.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixResultSet.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixResultSet.java
index e662a3f..8a6cf64 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixResultSet.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixResultSet.java
@@ -1232,9 +1232,4 @@ public class PhoenixResultSet implements ResultSet, SQLCloseable, org.apache.pho
public <T> T getObject(String columnLabel, Class<T> type) throws SQLException {
return (T) getObject(columnLabel); // Just ignore type since we only support built-in types
}
-
- @Override
- public String toString(){
- return "ResultSet:\n"+ "\tclosed: "+this.isClosed+"\n\tcurrent row: "+currentRow;
- }
}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/phoenix/blob/72144f17/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/ResultTuple.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/ResultTuple.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/ResultTuple.java
index 02aebf7..f0dbd9e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/ResultTuple.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/ResultTuple.java
@@ -62,7 +62,7 @@ public class ResultTuple extends BaseTuple {
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("keyvalues=");
- if(this.result.isEmpty()) {
+ if(this.result == null || this.result.isEmpty()) {
sb.append("NONE");
return sb.toString();
}
[03/15] PHOENIX-944 Support derived tables in FROM clause that needs
extra steps of client-side aggregation or other processing
Posted by ja...@apache.org.
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2bdc33bc/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java.orig
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java.orig b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java.orig
deleted file mode 100644
index 8bb91b4..0000000
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java.orig
+++ /dev/null
@@ -1,2197 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.schema;
-
-import static com.google.common.collect.Lists.newArrayListWithExpectedSize;
-import static com.google.common.collect.Sets.newLinkedHashSet;
-import static com.google.common.collect.Sets.newLinkedHashSetWithExpectedSize;
-import static org.apache.phoenix.exception.SQLExceptionCode.INSUFFICIENT_MULTI_TENANT_COLUMNS;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.ARRAY_SIZE;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_COUNT;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_FAMILY;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_NAME;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_SIZE;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DATA_TABLE_NAME;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DATA_TYPE;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DECIMAL_DIGITS;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DEFAULT_COLUMN_FAMILY_NAME;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DISABLE_WAL;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.IMMUTABLE_ROWS;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.INDEX_STATE;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.IS_VIEW_REFERENCED;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.KEY_SEQ;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.LAST_STATS_UPDATE_TIME;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.LINK_TYPE;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.MULTI_TENANT;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.NULLABLE;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.ORDINAL_POSITION;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.PHYSICAL_NAME;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.PK_NAME;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.REGION_NAME;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SALT_BUCKETS;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SORT_ORDER;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_NAME;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_SCHEM;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_SEQ_NUM;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_TYPE;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TENANT_ID;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_CONSTANT;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_INDEX_ID;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_STATEMENT;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_TYPE;
-import static org.apache.phoenix.query.QueryServices.DROP_METADATA_ATTRIB;
-import static org.apache.phoenix.query.QueryServicesOptions.DEFAULT_DROP_METADATA;
-import static org.apache.phoenix.schema.PDataType.VARCHAR;
-
-import java.io.IOException;
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.sql.ResultSetMetaData;
-import java.sql.SQLException;
-import java.sql.SQLFeatureNotSupportedException;
-import java.sql.Types;
-import java.util.BitSet;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.Iterator;
-import java.util.LinkedHashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-import java.util.Set;
-
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.Mutation;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.io.TimeRange;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Pair;
-import org.apache.phoenix.compile.ColumnResolver;
-import org.apache.phoenix.compile.FromCompiler;
-import org.apache.phoenix.compile.MutationPlan;
-import org.apache.phoenix.compile.PostDDLCompiler;
-import org.apache.phoenix.compile.PostIndexDDLCompiler;
-import org.apache.phoenix.compile.QueryPlan;
-import org.apache.phoenix.coprocessor.BaseScannerRegionObserver;
-import org.apache.phoenix.coprocessor.MetaDataProtocol;
-import org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult;
-import org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode;
-import org.apache.phoenix.exception.SQLExceptionCode;
-import org.apache.phoenix.exception.SQLExceptionInfo;
-import org.apache.phoenix.execute.MutationState;
-import org.apache.phoenix.jdbc.PhoenixConnection;
-import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
-import org.apache.phoenix.jdbc.PhoenixStatement;
-import org.apache.phoenix.parse.AddColumnStatement;
-import org.apache.phoenix.parse.AlterIndexStatement;
-import org.apache.phoenix.parse.ColumnDef;
-import org.apache.phoenix.parse.ColumnName;
-import org.apache.phoenix.parse.CreateIndexStatement;
-import org.apache.phoenix.parse.CreateSequenceStatement;
-import org.apache.phoenix.parse.CreateTableStatement;
-import org.apache.phoenix.parse.DropColumnStatement;
-import org.apache.phoenix.parse.DropIndexStatement;
-import org.apache.phoenix.parse.DropSequenceStatement;
-import org.apache.phoenix.parse.DropTableStatement;
-import org.apache.phoenix.parse.NamedTableNode;
-import org.apache.phoenix.parse.ParseNodeFactory;
-import org.apache.phoenix.parse.PrimaryKeyConstraint;
-import org.apache.phoenix.parse.TableName;
-import org.apache.phoenix.parse.UpdateStatisticsStatement;
-import org.apache.phoenix.query.QueryConstants;
-import org.apache.phoenix.query.QueryServices;
-import org.apache.phoenix.query.QueryServicesOptions;
-import org.apache.phoenix.schema.PTable.LinkType;
-import org.apache.phoenix.schema.PTable.ViewType;
-import org.apache.phoenix.util.ByteUtil;
-import org.apache.phoenix.util.IndexUtil;
-import org.apache.phoenix.util.MetaDataUtil;
-import org.apache.phoenix.util.PhoenixRuntime;
-import org.apache.phoenix.util.SchemaUtil;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Objects;
-import com.google.common.collect.Iterators;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import com.google.common.primitives.Ints;
-
-public class MetaDataClient {
- private static final Logger logger = LoggerFactory.getLogger(MetaDataClient.class);
-
- private static final ParseNodeFactory FACTORY = new ParseNodeFactory();
- private static final String CREATE_TABLE =
- "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\"( " +
- TENANT_ID + "," +
- TABLE_SCHEM + "," +
- TABLE_NAME + "," +
- TABLE_TYPE + "," +
- TABLE_SEQ_NUM + "," +
- COLUMN_COUNT + "," +
- SALT_BUCKETS + "," +
- PK_NAME + "," +
- DATA_TABLE_NAME + "," +
- INDEX_STATE + "," +
- IMMUTABLE_ROWS + "," +
- DEFAULT_COLUMN_FAMILY_NAME + "," +
- VIEW_STATEMENT + "," +
- DISABLE_WAL + "," +
- MULTI_TENANT + "," +
- VIEW_TYPE + "," +
- VIEW_INDEX_ID +
- ") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)";
- private static final String CREATE_LINK =
- "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\"( " +
- TENANT_ID + "," +
- TABLE_SCHEM + "," +
- TABLE_NAME + "," +
- COLUMN_FAMILY + "," +
- LINK_TYPE +
- ") VALUES (?, ?, ?, ?, ?)";
- private static final String INCREMENT_SEQ_NUM =
- "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\"( " +
- TENANT_ID + "," +
- TABLE_SCHEM + "," +
- TABLE_NAME + "," +
- TABLE_SEQ_NUM +
- ") VALUES (?, ?, ?, ?)";
- private static final String MUTATE_TABLE =
- "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\"( " +
- TENANT_ID + "," +
- TABLE_SCHEM + "," +
- TABLE_NAME + "," +
- TABLE_TYPE + "," +
- TABLE_SEQ_NUM + "," +
- COLUMN_COUNT +
- ") VALUES (?, ?, ?, ?, ?, ?)";
- private static final String MUTATE_MULTI_TENANT =
- "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\"( " +
- TENANT_ID + "," +
- TABLE_SCHEM + "," +
- TABLE_NAME + "," +
- MULTI_TENANT +
- ") VALUES (?, ?, ?, ?)";
- private static final String MUTATE_DISABLE_WAL =
- "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\"( " +
- TENANT_ID + "," +
- TABLE_SCHEM + "," +
- TABLE_NAME + "," +
- DISABLE_WAL +
- ") VALUES (?, ?, ?, ?)";
- private static final String MUTATE_IMMUTABLE_ROWS =
- "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\"( " +
- TENANT_ID + "," +
- TABLE_SCHEM + "," +
- TABLE_NAME + "," +
- IMMUTABLE_ROWS +
- ") VALUES (?, ?, ?, ?)";
- private static final String UPDATE_INDEX_STATE =
- "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\"( " +
- TENANT_ID + "," +
- TABLE_SCHEM + "," +
- TABLE_NAME + "," +
- INDEX_STATE +
- ") VALUES (?, ?, ?, ?)";
- private static final String UPDATE_INDEX_STATE_TO_ACTIVE =
- "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\"( " +
- TENANT_ID + "," +
- TABLE_SCHEM + "," +
- TABLE_NAME + "," +
- INDEX_STATE + "," +
- INDEX_DISABLE_TIMESTAMP +
- ") VALUES (?, ?, ?, ?, ?)";
- private static final String INSERT_COLUMN =
- "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\"( " +
- TENANT_ID + "," +
- TABLE_SCHEM + "," +
- TABLE_NAME + "," +
- COLUMN_NAME + "," +
- COLUMN_FAMILY + "," +
- DATA_TYPE + "," +
- NULLABLE + "," +
- COLUMN_SIZE + "," +
- DECIMAL_DIGITS + "," +
- ORDINAL_POSITION + "," +
- SORT_ORDER + "," +
- DATA_TABLE_NAME + "," + // write this both in the column and table rows for access by metadata APIs
- ARRAY_SIZE + "," +
- VIEW_CONSTANT + "," +
- IS_VIEW_REFERENCED + "," +
- PK_NAME + "," + // write this both in the column and table rows for access by metadata APIs
- KEY_SEQ +
- ") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)";
- private static final String UPDATE_COLUMN_POSITION =
- "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\" ( " +
- TENANT_ID + "," +
- TABLE_SCHEM + "," +
- TABLE_NAME + "," +
- COLUMN_NAME + "," +
- COLUMN_FAMILY + "," +
- ORDINAL_POSITION +
- ") VALUES (?, ?, ?, ?, ?, ?)";
-
- private final PhoenixConnection connection;
-
- public MetaDataClient(PhoenixConnection connection) {
- this.connection = connection;
- }
-
- public PhoenixConnection getConnection() {
- return connection;
- }
-
- public long getCurrentTime(String schemaName, String tableName) throws SQLException {
- MetaDataMutationResult result = updateCache(schemaName, tableName, true);
- return result.getMutationTime();
- }
-
- private MetaDataMutationResult updateCache(String schemaName, String tableName, boolean alwaysHitServer)
- throws SQLException {
- return updateCache(connection.getTenantId(), schemaName, tableName, alwaysHitServer);
- }
-
- public MetaDataMutationResult updateCache(PName tenantId, String schemaName, String tableName) throws SQLException {
- return updateCache(tenantId, schemaName, tableName, false);
- }
-
- /**
- * Update the cache with the latest as of the connection scn.
- * @param schemaName
- * @param tableName
- * @return the timestamp from the server, negative if the table was added to the cache and positive otherwise
- * @throws SQLException
- */
- public MetaDataMutationResult updateCache(String schemaName, String tableName) throws SQLException {
- return updateCache(schemaName, tableName, false);
- }
-
- private MetaDataMutationResult updateCache(PName tenantId, String schemaName, String tableName, boolean alwaysHitServer) throws SQLException {
- Long scn = connection.getSCN();
- boolean systemTable = SYSTEM_CATALOG_SCHEMA.equals(schemaName);
- // System tables must always have a null tenantId
- tenantId = systemTable ? null : tenantId;
- long clientTimeStamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
- PTable table = null;
- String fullTableName = SchemaUtil.getTableName(schemaName, tableName);
- long tableTimestamp = HConstants.LATEST_TIMESTAMP;
- try {
- table = connection.getMetaDataCache().getTable(new PTableKey(tenantId, fullTableName));
- tableTimestamp = table.getTimeStamp();
- } catch (TableNotFoundException e) {
- // TODO: Try again on services cache, as we may be looking for
- // a global multi-tenant table
- }
- // Don't bother with server call: we can't possibly find a newer table
- if (table != null && !alwaysHitServer && (systemTable || tableTimestamp == clientTimeStamp - 1)) {
- return new MetaDataMutationResult(MutationCode.TABLE_ALREADY_EXISTS,QueryConstants.UNSET_TIMESTAMP,table);
- }
-
- int maxTryCount = tenantId == null ? 1 : 2;
- int tryCount = 0;
- MetaDataMutationResult result;
-
- do {
- final byte[] schemaBytes = PDataType.VARCHAR.toBytes(schemaName);
- final byte[] tableBytes = PDataType.VARCHAR.toBytes(tableName);
- result = connection.getQueryServices().getTable(tenantId, schemaBytes, tableBytes, tableTimestamp, clientTimeStamp);
-
- if (SYSTEM_CATALOG_SCHEMA.equals(schemaName)) {
- return result;
- }
- MutationCode code = result.getMutationCode();
- PTable resultTable = result.getTable();
- // We found an updated table, so update our cache
- if (resultTable != null) {
- // Cache table, even if multi-tenant table found for null tenant_id
- // These may be accessed by tenant-specific connections, as the
- // tenant_id will always be added to mask other tenants data.
- // Otherwise, a tenant would be required to create a VIEW first
- // which is not really necessary unless you want to filter or add
- // columns
- connection.addTable(resultTable);
- return result;
- } else {
- // if (result.getMutationCode() == MutationCode.NEWER_TABLE_FOUND) {
- // TODO: No table exists at the clientTimestamp, but a newer one exists.
- // Since we disallow creation or modification of a table earlier than the latest
- // timestamp, we can handle this such that we don't ask the
- // server again.
- // If table was not found at the current time stamp and we have one cached, remove it.
- // Otherwise, we're up to date, so there's nothing to do.
- if (table != null) {
- result.setTable(table);
- if (code == MutationCode.TABLE_ALREADY_EXISTS) {
- return result;
- }
- if (code == MutationCode.TABLE_NOT_FOUND && tryCount + 1 == maxTryCount) {
- connection.removeTable(tenantId, fullTableName, table.getParentName() == null ? null : table.getParentName().getString(), table.getTimeStamp());
- }
- }
- }
- tenantId = null; // Try again with global tenantId
- } while (++tryCount < maxTryCount);
-
- return result;
- }
-
-
- private void addColumnMutation(String schemaName, String tableName, PColumn column, PreparedStatement colUpsert, String parentTableName, String pkName, Short keySeq, boolean isSalted) throws SQLException {
- colUpsert.setString(1, connection.getTenantId() == null ? null : connection.getTenantId().getString());
- colUpsert.setString(2, schemaName);
- colUpsert.setString(3, tableName);
- colUpsert.setString(4, column.getName().getString());
- colUpsert.setString(5, column.getFamilyName() == null ? null : column.getFamilyName().getString());
- colUpsert.setInt(6, column.getDataType().getSqlType());
- colUpsert.setInt(7, column.isNullable() ? ResultSetMetaData.columnNullable : ResultSetMetaData.columnNoNulls);
- if (column.getMaxLength() == null) {
- colUpsert.setNull(8, Types.INTEGER);
- } else {
- colUpsert.setInt(8, column.getMaxLength());
- }
- if (column.getScale() == null) {
- colUpsert.setNull(9, Types.INTEGER);
- } else {
- colUpsert.setInt(9, column.getScale());
- }
- colUpsert.setInt(10, column.getPosition() + (isSalted ? 0 : 1));
- colUpsert.setInt(11, column.getSortOrder().getSystemValue());
- colUpsert.setString(12, parentTableName);
- if (column.getArraySize() == null) {
- colUpsert.setNull(13, Types.INTEGER);
- } else {
- colUpsert.setInt(13, column.getArraySize());
- }
- colUpsert.setBytes(14, column.getViewConstant());
- colUpsert.setBoolean(15, column.isViewReferenced());
- colUpsert.setString(16, pkName);
- if (keySeq == null) {
- colUpsert.setNull(17, Types.SMALLINT);
- } else {
- colUpsert.setShort(17, keySeq);
- }
- colUpsert.execute();
- }
-
- private PColumn newColumn(int position, ColumnDef def, PrimaryKeyConstraint pkConstraint, String defaultColumnFamily, boolean addingToPK) throws SQLException {
- try {
- ColumnName columnDefName = def.getColumnDefName();
- SortOrder sortOrder = def.getSortOrder();
- boolean isPK = def.isPK();
- if (pkConstraint != null) {
- Pair<ColumnName, SortOrder> pkSortOrder = pkConstraint.getColumn(columnDefName);
- if (pkSortOrder != null) {
- isPK = true;
- sortOrder = pkSortOrder.getSecond();
- }
- }
-
- String columnName = columnDefName.getColumnName();
- PName familyName = null;
- if (def.isPK() && !pkConstraint.getColumnNames().isEmpty() ) {
- throw new SQLExceptionInfo.Builder(SQLExceptionCode.PRIMARY_KEY_ALREADY_EXISTS)
- .setColumnName(columnName).build().buildException();
- }
- boolean isNull = def.isNull();
- if (def.getColumnDefName().getFamilyName() != null) {
- String family = def.getColumnDefName().getFamilyName();
- if (isPK) {
- throw new SQLExceptionInfo.Builder(SQLExceptionCode.PRIMARY_KEY_WITH_FAMILY_NAME)
- .setColumnName(columnName).setFamilyName(family).build().buildException();
- } else if (!def.isNull()) {
- throw new SQLExceptionInfo.Builder(SQLExceptionCode.KEY_VALUE_NOT_NULL)
- .setColumnName(columnName).setFamilyName(family).build().buildException();
- }
- familyName = PNameFactory.newName(family);
- } else if (!isPK) {
- familyName = PNameFactory.newName(defaultColumnFamily == null ? QueryConstants.DEFAULT_COLUMN_FAMILY : defaultColumnFamily);
- }
-
- if (isPK && !addingToPK && pkConstraint.getColumnNames().size() <= 1) {
- if (def.isNull() && def.isNullSet()) {
- throw new SQLExceptionInfo.Builder(SQLExceptionCode.SINGLE_PK_MAY_NOT_BE_NULL)
- .setColumnName(columnName).build().buildException();
- }
- isNull = false;
- }
-
- PColumn column = new PColumnImpl(PNameFactory.newName(columnName), familyName, def.getDataType(),
- def.getMaxLength(), def.getScale(), isNull, position, sortOrder, def.getArraySize(), null, false);
- return column;
- } catch (IllegalArgumentException e) { // Based on precondition check in constructor
- throw new SQLException(e);
- }
- }
-
- public MutationState createTable(CreateTableStatement statement, byte[][] splits, PTable parent, String viewStatement, ViewType viewType, byte[][] viewColumnConstants, BitSet isViewColumnReferenced) throws SQLException {
- PTable table = createTableInternal(statement, splits, parent, viewStatement, viewType, viewColumnConstants, isViewColumnReferenced, null);
- if (table == null || table.getType() == PTableType.VIEW) {
- return new MutationState(0,connection);
- }
- // Hack to get around the case when an SCN is specified on the connection.
- // In this case, we won't see the table we just created yet, so we hack
- // around it by forcing the compiler to not resolve anything.
- PostDDLCompiler compiler = new PostDDLCompiler(connection);
- //connection.setAutoCommit(true);
- // Execute any necessary data updates
- Long scn = connection.getSCN();
- long ts = (scn == null ? table.getTimeStamp() : scn);
- // Getting the schema through the current connection doesn't work when the connection has an scn specified
- // Since the table won't be added to the current connection.
- TableRef tableRef = new TableRef(null, table, ts, false);
- byte[] emptyCF = SchemaUtil.getEmptyColumnFamily(table);
- MutationPlan plan = compiler.compile(Collections.singletonList(tableRef), emptyCF, null, null, tableRef.getTimeStamp());
- return connection.getQueryServices().updateData(plan);
- }
-
- public MutationState updateStatistics(UpdateStatisticsStatement updateStatisticsStmt)
- throws SQLException {
- // Check before updating the stats if we have reached the configured time to reupdate the stats once again
- final long msMinBetweenUpdates = connection
- .getQueryServices()
- .getProps()
- .getLong(QueryServices.MIN_STATS_UPDATE_FREQ_MS_ATTRIB,
- QueryServicesOptions.DEFAULT_MIN_STATS_UPDATE_FREQ_MS);
- ColumnResolver resolver = FromCompiler.getResolver(updateStatisticsStmt, connection);
- PTable table = resolver.getTables().get(0).getTable();
- List<PTable> indexes = table.getIndexes();
- List<PTable> tables = Lists.newArrayListWithExpectedSize(1 + indexes.size());
- if (updateStatisticsStmt.updateColumns()) {
- tables.add(table);
- }
- if (updateStatisticsStmt.updateIndex()) {
- tables.addAll(indexes);
- }
- for(PTable pTable : tables) {
- updateStatisticsInternal(msMinBetweenUpdates, pTable);
- }
- return new MutationState(1, connection);
- }
-
- private MutationState updateStatisticsInternal(long msMinBetweenUpdates, PTable table) throws SQLException {
- PName physicalName = table.getPhysicalName();
- byte[] tenantIdBytes = ByteUtil.EMPTY_BYTE_ARRAY;
- Long scn = connection.getSCN();
- // Always invalidate the cache
- long clientTS = connection.getSCN() == null ? HConstants.LATEST_TIMESTAMP : scn;
- String query = "SELECT CURRENT_DATE() - " + LAST_STATS_UPDATE_TIME + " FROM " + PhoenixDatabaseMetaData.SYSTEM_STATS_NAME
- + " WHERE " + PHYSICAL_NAME + "='" + physicalName.getString() + "' AND " + COLUMN_FAMILY
- + " IS NULL AND " + REGION_NAME + " IS NULL AND " + LAST_STATS_UPDATE_TIME + " IS NOT NULL";
- ResultSet rs = connection.createStatement().executeQuery(query);
- long msSinceLastUpdate = Long.MAX_VALUE;
- if (rs.next()) {
- msSinceLastUpdate = rs.getLong(1);
- }
- if (msSinceLastUpdate >= msMinBetweenUpdates) {
- // Here create the select query.
- String countQuery = "SELECT /*+ NO_CACHE NO_INDEX */ count(*) FROM " + table.getName().getString();
- PhoenixStatement statement = (PhoenixStatement) connection.createStatement();
- QueryPlan plan = statement.compileQuery(countQuery);
- Scan scan = plan.getContext().getScan();
- // Add all CF in the table
- scan.getFamilyMap().clear();
- for (PColumnFamily family : table.getColumnFamilies()) {
- scan.addFamily(family.getName().getBytes());
- }
- scan.setAttribute(BaseScannerRegionObserver.ANALYZE_TABLE, PDataType.TRUE_BYTES);
- KeyValue kv = plan.iterator().next().getValue(0);
- ImmutableBytesWritable tempPtr = plan.getContext().getTempPtr();
- tempPtr.set(kv.getValue());
- // A single Cell will be returned with the count(*) - we decode that here
- long rowCount = PDataType.LONG.getCodec().decodeLong(tempPtr, SortOrder.getDefault());
- // We need to update the stats table so that client will pull the new one with
- // the updated stats.
- connection.getQueryServices().incrementTableTimeStamp(tenantIdBytes,
- Bytes.toBytes(SchemaUtil.getSchemaNameFromFullName(physicalName.getString())),
- Bytes.toBytes(SchemaUtil.getTableNameFromFullName(physicalName.getString())), clientTS);
- return new MutationState(0, connection, rowCount);
- } else {
- return new MutationState(0, connection);
- }
- }
-
- private MutationState buildIndexAtTimeStamp(PTable index, NamedTableNode dataTableNode) throws SQLException {
- // If our connection is at a fixed point-in-time, we need to open a new
- // connection so that our new index table is visible.
- Properties props = new Properties(connection.getClientInfo());
- props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(connection.getSCN()+1));
- PhoenixConnection conn = DriverManager.getConnection(connection.getURL(), props).unwrap(PhoenixConnection.class);
- MetaDataClient newClientAtNextTimeStamp = new MetaDataClient(conn);
-
- // Re-resolve the tableRef from the now newer connection
- conn.setAutoCommit(true);
- ColumnResolver resolver = FromCompiler.getResolver(dataTableNode, conn);
- TableRef tableRef = resolver.getTables().get(0);
- boolean success = false;
- SQLException sqlException = null;
- try {
- MutationState state = newClientAtNextTimeStamp.buildIndex(index, tableRef);
- success = true;
- return state;
- } catch (SQLException e) {
- sqlException = e;
- } finally {
- try {
- conn.close();
- } catch (SQLException e) {
- if (sqlException == null) {
- // If we're not in the middle of throwing another exception
- // then throw the exception we got on close.
- if (success) {
- sqlException = e;
- }
- } else {
- sqlException.setNextException(e);
- }
- }
- if (sqlException != null) {
- throw sqlException;
- }
- }
- throw new IllegalStateException(); // impossible
- }
-
- private MutationState buildIndex(PTable index, TableRef dataTableRef) throws SQLException {
- AlterIndexStatement indexStatement = null;
- boolean wasAutoCommit = connection.getAutoCommit();
- connection.rollback();
- try {
- connection.setAutoCommit(true);
- PostIndexDDLCompiler compiler = new PostIndexDDLCompiler(connection, dataTableRef);
- MutationPlan plan = compiler.compile(index);
- try {
- plan.getContext().setScanTimeRange(new TimeRange(dataTableRef.getLowerBoundTimeStamp(), Long.MAX_VALUE));
- } catch (IOException e) {
- throw new SQLException(e);
- }
- MutationState state = connection.getQueryServices().updateData(plan);
- indexStatement = FACTORY.alterIndex(FACTORY.namedTable(null,
- TableName.create(index.getSchemaName().getString(), index.getTableName().getString())),
- dataTableRef.getTable().getTableName().getString(), false, PIndexState.ACTIVE);
- alterIndex(indexStatement);
-
- return state;
- } finally {
- connection.setAutoCommit(wasAutoCommit);
- }
- }
-
- /**
- * Rebuild indexes from a timestamp which is the value from hbase row key timestamp field
- */
- public void buildPartialIndexFromTimeStamp(PTable index, TableRef dataTableRef) throws SQLException {
- boolean needRestoreIndexState = false;
- // Need to change index state from Disable to InActive when build index partially so that
- // new changes will be indexed during index rebuilding
- AlterIndexStatement indexStatement = FACTORY.alterIndex(FACTORY.namedTable(null,
- TableName.create(index.getSchemaName().getString(), index.getTableName().getString())),
- dataTableRef.getTable().getTableName().getString(), false, PIndexState.INACTIVE);
- alterIndex(indexStatement);
- needRestoreIndexState = true;
- try {
- buildIndex(index, dataTableRef);
- needRestoreIndexState = false;
- } finally {
- if(needRestoreIndexState) {
- // reset index state to disable
- indexStatement = FACTORY.alterIndex(FACTORY.namedTable(null,
- TableName.create(index.getSchemaName().getString(), index.getTableName().getString())),
- dataTableRef.getTable().getTableName().getString(), false, PIndexState.DISABLE);
- alterIndex(indexStatement);
- }
- }
- }
-
- /**
- * Create an index table by morphing the CreateIndexStatement into a CreateTableStatement and calling
- * MetaDataClient.createTable. In doing so, we perform the following translations:
- * 1) Change the type of any columns being indexed to types that support null if the column is nullable.
- * For example, a BIGINT type would be coerced to a DECIMAL type, since a DECIMAL type supports null
- * when it's in the row key while a BIGINT does not.
- * 2) Append any row key column from the data table that is not in the indexed column list. Our indexes
- * rely on having a 1:1 correspondence between the index and data rows.
- * 3) Change the name of the columns to include the column family. For example, if you have a column
- * named "B" in a column family named "A", the indexed column name will be "A:B". This makes it easy
- * to translate the column references in a query to the correct column references in an index table
- * regardless of whether the column reference is prefixed with the column family name or not. It also
- * has the side benefit of allowing the same named column in different column families to both be
- * listed as an index column.
- * @param statement
- * @param splits
- * @return MutationState from population of index table from data table
- * @throws SQLException
- */
- public MutationState createIndex(CreateIndexStatement statement, byte[][] splits) throws SQLException {
- PrimaryKeyConstraint pk = statement.getIndexConstraint();
- TableName indexTableName = statement.getIndexTableName();
-
- List<Pair<ColumnName, SortOrder>> indexedPkColumns = pk.getColumnNames();
- List<ColumnName> includedColumns = statement.getIncludeColumns();
- TableRef tableRef = null;
- PTable table = null;
- boolean retry = true;
- Short viewIndexId = null;
- boolean allocateViewIndexId = false;
- while (true) {
- try {
- ColumnResolver resolver = FromCompiler.getResolver(statement, connection);
- tableRef = resolver.getTables().get(0);
- PTable dataTable = tableRef.getTable();
- boolean isTenantConnection = connection.getTenantId() != null;
- if (isTenantConnection) {
- if (dataTable.getType() != PTableType.VIEW) {
- throw new SQLFeatureNotSupportedException("An index may only be created for a VIEW through a tenant-specific connection");
- }
- }
- int hbaseVersion = connection.getQueryServices().getLowestClusterHBaseVersion();
- if (!dataTable.isImmutableRows()) {
- if (hbaseVersion < PhoenixDatabaseMetaData.MUTABLE_SI_VERSION_THRESHOLD) {
- throw new SQLExceptionInfo.Builder(SQLExceptionCode.NO_MUTABLE_INDEXES).setTableName(indexTableName.getTableName()).build().buildException();
- }
- if (connection.getQueryServices().hasInvalidIndexConfiguration()) {
- throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_MUTABLE_INDEX_CONFIG).setTableName(indexTableName.getTableName()).build().buildException();
- }
- }
- int posOffset = 0;
- Set<PColumn> unusedPkColumns;
- if (dataTable.getBucketNum() != null) { // Ignore SALT column
- unusedPkColumns = new LinkedHashSet<PColumn>(dataTable.getPKColumns().subList(1, dataTable.getPKColumns().size()));
- posOffset++;
- } else {
- unusedPkColumns = new LinkedHashSet<PColumn>(dataTable.getPKColumns());
- }
- List<Pair<ColumnName, SortOrder>> allPkColumns = Lists.newArrayListWithExpectedSize(unusedPkColumns.size());
- List<ColumnDef> columnDefs = Lists.newArrayListWithExpectedSize(includedColumns.size() + indexedPkColumns.size());
-
- if (dataTable.isMultiTenant()) {
- // Add tenant ID column as first column in index
- PColumn col = dataTable.getPKColumns().get(posOffset);
- unusedPkColumns.remove(col);
- PDataType dataType = IndexUtil.getIndexColumnDataType(col);
- ColumnName colName = ColumnName.caseSensitiveColumnName(IndexUtil.getIndexColumnName(col));
- allPkColumns.add(new Pair<ColumnName, SortOrder>(colName, col.getSortOrder()));
- columnDefs.add(FACTORY.columnDef(colName, dataType.getSqlTypeName(), col.isNullable(), col.getMaxLength(), col.getScale(), false, SortOrder.getDefault()));
- }
- if (dataTable.getType() == PTableType.VIEW && dataTable.getViewType() != ViewType.MAPPED) {
- allocateViewIndexId = true;
- // Next add index ID column
- PDataType dataType = MetaDataUtil.getViewIndexIdDataType();
- ColumnName colName = ColumnName.caseSensitiveColumnName(MetaDataUtil.getViewIndexIdColumnName());
- allPkColumns.add(new Pair<ColumnName, SortOrder>(colName, SortOrder.getDefault()));
- columnDefs.add(FACTORY.columnDef(colName, dataType.getSqlTypeName(), false, null, null, false, SortOrder.getDefault()));
- }
- // First columns are the indexed ones
- for (Pair<ColumnName, SortOrder> pair : indexedPkColumns) {
- ColumnName colName = pair.getFirst();
- PColumn col = resolver.resolveColumn(null, colName.getFamilyName(), colName.getColumnName()).getColumn();
- unusedPkColumns.remove(col);
- // Ignore view constants for updatable views as we don't need these in the index
- if (col.getViewConstant() == null) {
- PDataType dataType = IndexUtil.getIndexColumnDataType(col);
- colName = ColumnName.caseSensitiveColumnName(IndexUtil.getIndexColumnName(col));
- allPkColumns.add(new Pair<ColumnName, SortOrder>(colName, pair.getSecond()));
- columnDefs.add(FACTORY.columnDef(colName, dataType.getSqlTypeName(), col.isNullable(), col.getMaxLength(), col.getScale(), false, SortOrder.getDefault()));
- }
- }
-
- // Next all the PK columns from the data table that aren't indexed
- if (!unusedPkColumns.isEmpty()) {
- for (PColumn col : unusedPkColumns) {
- // Don't add columns with constant values from updatable views, as
- // we don't need these in the index
- if (col.getViewConstant() == null) {
- ColumnName colName = ColumnName.caseSensitiveColumnName(IndexUtil.getIndexColumnName(col));
- allPkColumns.add(new Pair<ColumnName, SortOrder>(colName, col.getSortOrder()));
- PDataType dataType = IndexUtil.getIndexColumnDataType(col);
- columnDefs.add(FACTORY.columnDef(colName, dataType.getSqlTypeName(), col.isNullable(), col.getMaxLength(), col.getScale(), false, col.getSortOrder()));
- }
- }
- }
- pk = FACTORY.primaryKey(null, allPkColumns);
-
- // Last all the included columns (minus any PK columns)
- for (ColumnName colName : includedColumns) {
- PColumn col = resolver.resolveColumn(null, colName.getFamilyName(), colName.getColumnName()).getColumn();
- if (SchemaUtil.isPKColumn(col)) {
- if (!unusedPkColumns.contains(col)) {
- throw new SQLExceptionInfo.Builder(SQLExceptionCode.COLUMN_EXIST_IN_DEF).build().buildException();
- }
- } else {
- colName = ColumnName.caseSensitiveColumnName(IndexUtil.getIndexColumnName(col));
- // Check for duplicates between indexed and included columns
- if (pk.contains(colName)) {
- throw new SQLExceptionInfo.Builder(SQLExceptionCode.COLUMN_EXIST_IN_DEF).build().buildException();
- }
- if (!SchemaUtil.isPKColumn(col) && col.getViewConstant() == null) {
- // Need to re-create ColumnName, since the above one won't have the column family name
- colName = ColumnName.caseSensitiveColumnName(col.getFamilyName().getString(), IndexUtil.getIndexColumnName(col));
- columnDefs.add(FACTORY.columnDef(colName, col.getDataType().getSqlTypeName(), col.isNullable(), col.getMaxLength(), col.getScale(), false, col.getSortOrder()));
- }
- }
- }
-
- // Don't re-allocate viewIndexId on ConcurrentTableMutationException,
- // as there's no need to burn another sequence value.
- if (allocateViewIndexId && viewIndexId == null) {
- Long scn = connection.getSCN();
- long timestamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
- PName tenantId = connection.getTenantId();
- String tenantIdStr = tenantId == null ? null : connection.getTenantId().getString();
- PName physicalName = dataTable.getPhysicalName();
- SequenceKey key = MetaDataUtil.getViewIndexSequenceKey(tenantIdStr, physicalName);
- // Create at parent timestamp as we know that will be earlier than now
- // and earlier than any SCN if one is set.
- createSequence(key.getTenantId(), key.getSchemaName(), key.getSequenceName(),
- true, Short.MIN_VALUE, 1, 1, false, Long.MIN_VALUE, Long.MAX_VALUE,
- dataTable.getTimeStamp());
- long[] seqValues = new long[1];
- SQLException[] sqlExceptions = new SQLException[1];
- connection.getQueryServices().incrementSequences(Collections.singletonList(key), timestamp, seqValues, sqlExceptions);
- if (sqlExceptions[0] != null) {
- throw sqlExceptions[0];
- }
- long seqValue = seqValues[0];
- if (seqValue > Short.MAX_VALUE) {
- throw new SQLExceptionInfo.Builder(SQLExceptionCode.TOO_MANY_VIEW_INDEXES)
- .setSchemaName(SchemaUtil.getSchemaNameFromFullName(physicalName.getString())).setTableName(SchemaUtil.getTableNameFromFullName(physicalName.getString())).build().buildException();
- }
- viewIndexId = (short) seqValue;
- }
- // Set DEFAULT_COLUMN_FAMILY_NAME of index to match data table
- // We need this in the props so that the correct column family is created
- if (dataTable.getDefaultFamilyName() != null && dataTable.getType() != PTableType.VIEW) {
- statement.getProps().put("", new Pair<String,Object>(DEFAULT_COLUMN_FAMILY_NAME,dataTable.getDefaultFamilyName().getString()));
- }
- CreateTableStatement tableStatement = FACTORY.createTable(indexTableName, statement.getProps(), columnDefs, pk, statement.getSplitNodes(), PTableType.INDEX, statement.ifNotExists(), null, null, statement.getBindCount());
- table = createTableInternal(tableStatement, splits, dataTable, null, null, null, null, viewIndexId);
- break;
- } catch (ConcurrentTableMutationException e) { // Can happen if parent data table changes while above is in progress
- if (retry) {
- retry = false;
- continue;
- }
- throw e;
- }
- }
- if (table == null) {
- return new MutationState(0,connection);
- }
-
- // If our connection is at a fixed point-in-time, we need to open a new
- // connection so that our new index table is visible.
- if (connection.getSCN() != null) {
- return buildIndexAtTimeStamp(table, statement.getTable());
- }
-
- return buildIndex(table, tableRef);
- }
-
- public MutationState dropSequence(DropSequenceStatement statement) throws SQLException {
- Long scn = connection.getSCN();
- long timestamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
- String schemaName = statement.getSequenceName().getSchemaName();
- String sequenceName = statement.getSequenceName().getTableName();
- String tenantId = connection.getTenantId() == null ? null : connection.getTenantId().getString();
- try {
- connection.getQueryServices().dropSequence(tenantId, schemaName, sequenceName, timestamp);
- } catch (SequenceNotFoundException e) {
- if (statement.ifExists()) {
- return new MutationState(0, connection);
- }
- throw e;
- }
- return new MutationState(1, connection);
- }
-
- public MutationState createSequence(CreateSequenceStatement statement, long startWith,
- long incrementBy, long cacheSize, long minValue, long maxValue) throws SQLException {
- Long scn = connection.getSCN();
- long timestamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
- String tenantId =
- connection.getTenantId() == null ? null : connection.getTenantId().getString();
- return createSequence(tenantId, statement.getSequenceName().getSchemaName(), statement
- .getSequenceName().getTableName(), statement.ifNotExists(), startWith, incrementBy,
- cacheSize, statement.getCycle(), minValue, maxValue, timestamp);
- }
-
- private MutationState createSequence(String tenantId, String schemaName, String sequenceName,
- boolean ifNotExists, long startWith, long incrementBy, long cacheSize, boolean cycle,
- long minValue, long maxValue, long timestamp) throws SQLException {
- try {
- connection.getQueryServices().createSequence(tenantId, schemaName, sequenceName,
- startWith, incrementBy, cacheSize, minValue, maxValue, cycle, timestamp);
- } catch (SequenceAlreadyExistsException e) {
- if (ifNotExists) {
- return new MutationState(0, connection);
- }
- throw e;
- }
- return new MutationState(1, connection);
- }
-
- private static ColumnDef findColumnDefOrNull(List<ColumnDef> colDefs, ColumnName colName) {
- for (ColumnDef colDef : colDefs) {
- if (colDef.getColumnDefName().getColumnName().equals(colName.getColumnName())) {
- return colDef;
- }
- }
- return null;
- }
-
- private PTable createTableInternal(CreateTableStatement statement, byte[][] splits, final PTable parent, String viewStatement, ViewType viewType, final byte[][] viewColumnConstants, final BitSet isViewColumnReferenced, Short viewIndexId) throws SQLException {
- final PTableType tableType = statement.getTableType();
- boolean wasAutoCommit = connection.getAutoCommit();
- connection.rollback();
- try {
- connection.setAutoCommit(false);
- List<Mutation> tableMetaData = Lists.newArrayListWithExpectedSize(statement.getColumnDefs().size() + 3);
-
- TableName tableNameNode = statement.getTableName();
- String schemaName = tableNameNode.getSchemaName();
- String tableName = tableNameNode.getTableName();
- String parentTableName = null;
- PName tenantId = connection.getTenantId();
- String tenantIdStr = tenantId == null ? null : connection.getTenantId().getString();
- boolean multiTenant = false;
- Integer saltBucketNum = null;
- String defaultFamilyName = null;
- boolean isImmutableRows = false;
- List<PName> physicalNames = Collections.emptyList();
- boolean addSaltColumn = false;
- if (parent != null && tableType == PTableType.INDEX) {
- // Index on view
- // TODO: Can we support a multi-tenant index directly on a multi-tenant
- // table instead of only a view? We don't have anywhere to put the link
- // from the table to the index, though.
- if (parent.getType() == PTableType.VIEW && parent.getViewType() != ViewType.MAPPED) {
- PName physicalName = parent.getPhysicalName();
- saltBucketNum = parent.getBucketNum();
- addSaltColumn = (saltBucketNum != null);
- defaultFamilyName = parent.getDefaultFamilyName() == null ? null : parent.getDefaultFamilyName().getString();
- // Set physical name of view index table
- physicalNames = Collections.singletonList(PNameFactory.newName(MetaDataUtil.getViewIndexPhysicalName(physicalName.getBytes())));
- }
-
- multiTenant = parent.isMultiTenant();
- parentTableName = parent.getTableName().getString();
- // Pass through data table sequence number so we can check it hasn't changed
- PreparedStatement incrementStatement = connection.prepareStatement(INCREMENT_SEQ_NUM);
- incrementStatement.setString(1, connection.getTenantId() == null ? null : connection.getTenantId().getString());
- incrementStatement.setString(2, schemaName);
- incrementStatement.setString(3, parentTableName);
- incrementStatement.setLong(4, parent.getSequenceNumber());
- incrementStatement.execute();
- // Get list of mutations and add to table meta data that will be passed to server
- // to guarantee order. This row will always end up last
- tableMetaData.addAll(connection.getMutationState().toMutations().next().getSecond());
- connection.rollback();
-
- // Add row linking from data table row to index table row
- PreparedStatement linkStatement = connection.prepareStatement(CREATE_LINK);
- linkStatement.setString(1, connection.getTenantId() == null ? null : connection.getTenantId().getString());
- linkStatement.setString(2, schemaName);
- linkStatement.setString(3, parentTableName);
- linkStatement.setString(4, tableName);
- linkStatement.setByte(5, LinkType.INDEX_TABLE.getSerializedValue());
- linkStatement.execute();
- }
-
- PrimaryKeyConstraint pkConstraint = statement.getPrimaryKeyConstraint();
- String pkName = null;
- List<Pair<ColumnName,SortOrder>> pkColumnsNames = Collections.<Pair<ColumnName,SortOrder>>emptyList();
- Iterator<Pair<ColumnName,SortOrder>> pkColumnsIterator = Iterators.emptyIterator();
- if (pkConstraint != null) {
- pkColumnsNames = pkConstraint.getColumnNames();
- pkColumnsIterator = pkColumnsNames.iterator();
- pkName = pkConstraint.getName();
- }
-
- Map<String,Object> tableProps = Maps.newHashMapWithExpectedSize(statement.getProps().size());
- Map<String,Object> commonFamilyProps = Collections.emptyMap();
- // Somewhat hacky way of determining if property is for HColumnDescriptor or HTableDescriptor
- HColumnDescriptor defaultDescriptor = new HColumnDescriptor(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES);
- if (!statement.getProps().isEmpty()) {
- commonFamilyProps = Maps.newHashMapWithExpectedSize(statement.getProps().size());
-
- Collection<Pair<String,Object>> props = statement.getProps().get(QueryConstants.ALL_FAMILY_PROPERTIES_KEY);
- for (Pair<String,Object> prop : props) {
- if (defaultDescriptor.getValue(prop.getFirst()) == null) {
- tableProps.put(prop.getFirst(), prop.getSecond());
- } else {
- commonFamilyProps.put(prop.getFirst(), prop.getSecond());
- }
- }
- }
-
- // Although unusual, it's possible to set a mapped VIEW as having immutable rows.
- // This tells Phoenix that you're managing the index maintenance yourself.
- if (tableType != PTableType.INDEX && (tableType != PTableType.VIEW || viewType == ViewType.MAPPED)) {
- Boolean isImmutableRowsProp = (Boolean) tableProps.remove(PTable.IS_IMMUTABLE_ROWS_PROP_NAME);
- if (isImmutableRowsProp == null) {
- isImmutableRows = connection.getQueryServices().getProps().getBoolean(QueryServices.IMMUTABLE_ROWS_ATTRIB, QueryServicesOptions.DEFAULT_IMMUTABLE_ROWS);
- } else {
- isImmutableRows = isImmutableRowsProp;
- }
- }
-
- // Can't set any of these on views or shared indexes on views
- if (tableType != PTableType.VIEW && viewIndexId == null) {
- saltBucketNum = (Integer) tableProps.remove(PhoenixDatabaseMetaData.SALT_BUCKETS);
- if (saltBucketNum != null && (saltBucketNum < 0 || saltBucketNum > SaltingUtil.MAX_BUCKET_NUM)) {
- throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_BUCKET_NUM).build().buildException();
- }
- // Salt the index table if the data table is salted
- if (saltBucketNum == null) {
- if (parent != null) {
- saltBucketNum = parent.getBucketNum();
- }
- } else if (saltBucketNum.intValue() == 0) {
- saltBucketNum = null; // Provides a way for an index to not be salted if its data table is salted
- }
- addSaltColumn = (saltBucketNum != null);
- }
-
- boolean removedProp = false;
- // Can't set MULTI_TENANT or DEFAULT_COLUMN_FAMILY_NAME on an index
- if (tableType != PTableType.INDEX && (tableType != PTableType.VIEW || viewType == ViewType.MAPPED)) {
- Boolean multiTenantProp = (Boolean) tableProps.remove(PhoenixDatabaseMetaData.MULTI_TENANT);
- multiTenant = Boolean.TRUE.equals(multiTenantProp);
- // Remove, but add back after our check below
- defaultFamilyName = (String)tableProps.remove(PhoenixDatabaseMetaData.DEFAULT_COLUMN_FAMILY_NAME);
- removedProp = (defaultFamilyName != null);
- }
-
- boolean disableWAL = false;
- Boolean disableWALProp = (Boolean) tableProps.remove(PhoenixDatabaseMetaData.DISABLE_WAL);
- if (disableWALProp != null) {
- disableWAL = disableWALProp;
- }
- // Delay this check as it is supported to have IMMUTABLE_ROWS and SALT_BUCKETS defined on views
- if ((statement.getTableType() == PTableType.VIEW || viewIndexId != null) && !tableProps.isEmpty()) {
- throw new SQLExceptionInfo.Builder(SQLExceptionCode.VIEW_WITH_PROPERTIES).build().buildException();
- }
- if (removedProp) {
- tableProps.put(PhoenixDatabaseMetaData.DEFAULT_COLUMN_FAMILY_NAME, defaultFamilyName);
- }
-
- List<ColumnDef> colDefs = statement.getColumnDefs();
- List<PColumn> columns;
- LinkedHashSet<PColumn> pkColumns;
-
- if (tenantId != null && (tableType != PTableType.VIEW && viewIndexId == null)) {
- throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_CREATE_TENANT_SPECIFIC_TABLE)
- .setSchemaName(schemaName).setTableName(tableName).build().buildException();
- }
-
- if (tableType == PTableType.VIEW) {
- physicalNames = Collections.singletonList(PNameFactory.newName(parent.getPhysicalName().getString()));
- if (viewType == ViewType.MAPPED) {
- columns = newArrayListWithExpectedSize(colDefs.size());
- pkColumns = newLinkedHashSetWithExpectedSize(colDefs.size());
- } else {
- // Propagate property values to VIEW.
- // TODO: formalize the known set of these properties
- multiTenant = parent.isMultiTenant();
- saltBucketNum = parent.getBucketNum();
- isImmutableRows = parent.isImmutableRows();
- disableWAL = (disableWALProp == null ? parent.isWALDisabled() : disableWALProp);
- defaultFamilyName = parent.getDefaultFamilyName() == null ? null : parent.getDefaultFamilyName().getString();
- List<PColumn> allColumns = parent.getColumns();
- if (saltBucketNum != null) { // Don't include salt column in columns, as it should not have it when created
- allColumns = allColumns.subList(1, allColumns.size());
- }
- columns = newArrayListWithExpectedSize(allColumns.size() + colDefs.size());
- columns.addAll(allColumns);
- pkColumns = newLinkedHashSet(parent.getPKColumns());
- }
- } else {
- columns = newArrayListWithExpectedSize(colDefs.size());
- pkColumns = newLinkedHashSetWithExpectedSize(colDefs.size() + 1); // in case salted
- }
-
- // Don't add link for mapped view, as it just points back to itself and causes the drop to
- // fail because it looks like there's always a view associated with it.
- if (!physicalNames.isEmpty()) {
- // Upsert physical name for mapped view only if the full physical table name is different than the full table name
- // Otherwise, we end up with a self-referencing link and then cannot ever drop the view.
- if (viewType != ViewType.MAPPED
- || !physicalNames.get(0).getString().equals(SchemaUtil.getTableName(schemaName, tableName))) {
- // Add row linking from data table row to physical table row
- PreparedStatement linkStatement = connection.prepareStatement(CREATE_LINK);
- for (PName physicalName : physicalNames) {
- linkStatement.setString(1, connection.getTenantId() == null ? null : connection.getTenantId().getString());
- linkStatement.setString(2, schemaName);
- linkStatement.setString(3, tableName);
- linkStatement.setString(4, physicalName.getString());
- linkStatement.setByte(5, LinkType.PHYSICAL_TABLE.getSerializedValue());
- linkStatement.execute();
- }
- }
- }
-
- PreparedStatement colUpsert = connection.prepareStatement(INSERT_COLUMN);
- Map<String, PName> familyNames = Maps.newLinkedHashMap();
- boolean isPK = false;
-
- int positionOffset = columns.size();
- if (saltBucketNum != null) {
- positionOffset++;
- if (addSaltColumn) {
- pkColumns.add(SaltingUtil.SALTING_COLUMN);
- }
- }
- int position = positionOffset;
-
- for (ColumnDef colDef : colDefs) {
- if (colDef.isPK()) {
- if (isPK) {
- throw new SQLExceptionInfo.Builder(SQLExceptionCode.PRIMARY_KEY_ALREADY_EXISTS)
- .setColumnName(colDef.getColumnDefName().getColumnName()).build().buildException();
- }
- isPK = true;
- } else {
- // do not allow setting NOT-NULL constraint on non-primary columns.
- if ( Boolean.FALSE.equals(colDef.isNull()) &&
- ( isPK || ( pkConstraint != null && !pkConstraint.contains(colDef.getColumnDefName())))) {
- throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_NOT_NULL_CONSTRAINT)
- .setSchemaName(schemaName)
- .setTableName(tableName)
- .setColumnName(colDef.getColumnDefName().getColumnName()).build().buildException();
- }
- }
-
- PColumn column = newColumn(position++, colDef, pkConstraint, defaultFamilyName, false);
- if (SchemaUtil.isPKColumn(column)) {
- // TODO: remove this constraint?
- if (pkColumnsIterator.hasNext() && !column.getName().getString().equals(pkColumnsIterator.next().getFirst().getColumnName())) {
- throw new SQLExceptionInfo.Builder(SQLExceptionCode.PRIMARY_KEY_OUT_OF_ORDER)
- .setSchemaName(schemaName)
- .setTableName(tableName)
- .setColumnName(column.getName().getString())
- .build().buildException();
- }
- if (tableType == PTableType.VIEW && viewType != ViewType.MAPPED) {
- throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_DEFINE_PK_FOR_VIEW)
- .setSchemaName(schemaName)
- .setTableName(tableName)
- .setColumnName(colDef.getColumnDefName().getColumnName())
- .build().buildException();
- }
- if (!pkColumns.add(column)) {
- throw new ColumnAlreadyExistsException(schemaName, tableName, column.getName().getString());
- }
- }
- if (tableType == PTableType.VIEW && hasColumnWithSameNameAndFamily(columns, column)) {
- // we only need to check for dup columns for views because they inherit columns from parent
- throw new ColumnAlreadyExistsException(schemaName, tableName, column.getName().getString());
- }
- columns.add(column);
- if ((colDef.getDataType() == PDataType.VARBINARY || colDef.getDataType().isArrayType())
- && SchemaUtil.isPKColumn(column)
- && pkColumnsIterator.hasNext()) {
- throw new SQLExceptionInfo.Builder(SQLExceptionCode.VARBINARY_IN_ROW_KEY)
- .setSchemaName(schemaName)
- .setTableName(tableName)
- .setColumnName(column.getName().getString())
- .build().buildException();
- }
- if (column.getFamilyName() != null) {
- familyNames.put(column.getFamilyName().getString(),column.getFamilyName());
- }
- }
- // We need a PK definition for a TABLE or mapped VIEW
- if (!isPK && pkColumnsNames.isEmpty() && tableType != PTableType.VIEW && viewType != ViewType.MAPPED) {
- throw new SQLExceptionInfo.Builder(SQLExceptionCode.PRIMARY_KEY_MISSING)
- .setSchemaName(schemaName)
- .setTableName(tableName)
- .build().buildException();
- }
- if (!pkColumnsNames.isEmpty() && pkColumnsNames.size() != pkColumns.size() - positionOffset) { // Then a column name in the primary key constraint wasn't resolved
- Iterator<Pair<ColumnName,SortOrder>> pkColumnNamesIterator = pkColumnsNames.iterator();
- while (pkColumnNamesIterator.hasNext()) {
- ColumnName colName = pkColumnNamesIterator.next().getFirst();
- ColumnDef colDef = findColumnDefOrNull(colDefs, colName);
- if (colDef == null) {
- throw new ColumnNotFoundException(schemaName, tableName, null, colName.getColumnName());
- }
- if (colDef.getColumnDefName().getFamilyName() != null) {
- throw new SQLExceptionInfo.Builder(SQLExceptionCode.PRIMARY_KEY_WITH_FAMILY_NAME)
- .setSchemaName(schemaName)
- .setTableName(tableName)
- .setColumnName(colDef.getColumnDefName().getColumnName() )
- .setFamilyName(colDef.getColumnDefName().getFamilyName())
- .build().buildException();
- }
- }
- // The above should actually find the specific one, but just in case...
- throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_PRIMARY_KEY_CONSTRAINT)
- .setSchemaName(schemaName)
- .setTableName(tableName)
- .build().buildException();
- }
-
- List<Pair<byte[],Map<String,Object>>> familyPropList = Lists.newArrayListWithExpectedSize(familyNames.size());
- if (!statement.getProps().isEmpty()) {
- for (String familyName : statement.getProps().keySet()) {
- if (!familyName.equals(QueryConstants.ALL_FAMILY_PROPERTIES_KEY)) {
- if (familyNames.get(familyName) == null) {
- throw new SQLExceptionInfo.Builder(SQLExceptionCode.PROPERTIES_FOR_FAMILY)
- .setFamilyName(familyName).build().buildException();
- } else if (statement.getTableType() == PTableType.VIEW) {
- throw new SQLExceptionInfo.Builder(SQLExceptionCode.VIEW_WITH_PROPERTIES).build().buildException();
- }
- }
- }
- }
- throwIfInsufficientColumns(schemaName, tableName, pkColumns, saltBucketNum!=null, multiTenant);
-
- for (PName familyName : familyNames.values()) {
- Collection<Pair<String,Object>> props = statement.getProps().get(familyName.getString());
- if (props.isEmpty()) {
- familyPropList.add(new Pair<byte[],Map<String,Object>>(familyName.getBytes(),commonFamilyProps));
- } else {
- Map<String,Object> combinedFamilyProps = Maps.newHashMapWithExpectedSize(props.size() + commonFamilyProps.size());
- combinedFamilyProps.putAll(commonFamilyProps);
- for (Pair<String,Object> prop : props) {
- combinedFamilyProps.put(prop.getFirst(), prop.getSecond());
- }
- familyPropList.add(new Pair<byte[],Map<String,Object>>(familyName.getBytes(),combinedFamilyProps));
- }
- }
-
- if (familyNames.isEmpty()) {
- //if there are no family names, use the default column family name. This also takes care of the case when
- //the table ddl has only PK cols present (which means familyNames is empty).
- byte[] cf = defaultFamilyName == null ? QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES : Bytes.toBytes(defaultFamilyName);
- familyPropList.add(new Pair<byte[],Map<String,Object>>(cf, commonFamilyProps));
- }
-
- // Bootstrapping for our SYSTEM.TABLE that creates itself before it exists
- if (SchemaUtil.isMetaTable(schemaName,tableName)) {
- // TODO: what about stats for system catalog?
- PTable table = PTableImpl.makePTable(tenantId,PNameFactory.newName(schemaName), PNameFactory.newName(tableName), tableType,
- null, MetaDataProtocol.MIN_TABLE_TIMESTAMP, PTable.INITIAL_SEQ_NUM,
- PNameFactory.newName(QueryConstants.SYSTEM_TABLE_PK_NAME), null, columns, null, Collections.<PTable>emptyList(),
- isImmutableRows, Collections.<PName>emptyList(),
- defaultFamilyName == null ? null : PNameFactory.newName(defaultFamilyName), null, Boolean.TRUE.equals(disableWAL), false, null, viewIndexId);
- connection.addTable(table);
- } else if (tableType == PTableType.INDEX && viewIndexId == null) {
- if (tableProps.get(HTableDescriptor.MAX_FILESIZE) == null) {
- int nIndexRowKeyColumns = isPK ? 1 : pkColumnsNames.size();
- int nIndexKeyValueColumns = columns.size() - nIndexRowKeyColumns;
- int nBaseRowKeyColumns = parent.getPKColumns().size() - (parent.getBucketNum() == null ? 0 : 1);
- int nBaseKeyValueColumns = parent.getColumns().size() - parent.getPKColumns().size();
- /*
- * Approximate ratio between index table size and data table size:
- * More or less equal to the ratio between the number of key value columns in each. We add one to
- * the key value column count to take into account our empty key value. We add 1/4 for any key
- * value data table column that was moved into the index table row key.
- */
- double ratio = (1+nIndexKeyValueColumns + (nIndexRowKeyColumns - nBaseRowKeyColumns)/4d)/(1+nBaseKeyValueColumns);
- HTableDescriptor descriptor = connection.getQueryServices().getTableDescriptor(parent.getPhysicalName().getBytes());
- if (descriptor != null) { // Is null for connectionless
- long maxFileSize = descriptor.getMaxFileSize();
- if (maxFileSize == -1) { // If unset, use default
- maxFileSize = HConstants.DEFAULT_MAX_FILE_SIZE;
- }
- tableProps.put(HTableDescriptor.MAX_FILESIZE, (long)(maxFileSize * ratio));
- }
- }
- }
-
- short nextKeySeq = 0;
- for (int i = 0; i < columns.size(); i++) {
- PColumn column = columns.get(i);
- final int columnPosition = column.getPosition();
- // For client-side cache, we need to update the column
- if (isViewColumnReferenced != null) {
- if (viewColumnConstants != null && columnPosition < viewColumnConstants.length) {
- columns.set(i, column = new DelegateColumn(column) {
- @Override
- public byte[] getViewConstant() {
- return viewColumnConstants[columnPosition];
- }
- @Override
- public boolean isViewReferenced() {
- return isViewColumnReferenced.get(columnPosition);
- }
- });
- } else {
- columns.set(i, column = new DelegateColumn(column) {
- @Override
- public boolean isViewReferenced() {
- return isViewColumnReferenced.get(columnPosition);
- }
- });
- }
- }
- Short keySeq = SchemaUtil.isPKColumn(column) ? ++nextKeySeq : null;
- addColumnMutation(schemaName, tableName, column, colUpsert, parentTableName, pkName, keySeq, saltBucketNum != null);
- }
-
- tableMetaData.addAll(connection.getMutationState().toMutations().next().getSecond());
- connection.rollback();
-
- String dataTableName = parent == null || tableType == PTableType.VIEW ? null : parent.getTableName().getString();
- PIndexState indexState = parent == null || tableType == PTableType.VIEW ? null : PIndexState.BUILDING;
- PreparedStatement tableUpsert = connection.prepareStatement(CREATE_TABLE);
- tableUpsert.setString(1, tenantIdStr);
- tableUpsert.setString(2, schemaName);
- tableUpsert.setString(3, tableName);
- tableUpsert.setString(4, tableType.getSerializedValue());
- tableUpsert.setLong(5, PTable.INITIAL_SEQ_NUM);
- tableUpsert.setInt(6, position);
- if (saltBucketNum != null) {
- tableUpsert.setInt(7, saltBucketNum);
- } else {
- tableUpsert.setNull(7, Types.INTEGER);
- }
- tableUpsert.setString(8, pkName);
- tableUpsert.setString(9, dataTableName);
- tableUpsert.setString(10, indexState == null ? null : indexState.getSerializedValue());
- tableUpsert.setBoolean(11, isImmutableRows);
- tableUpsert.setString(12, defaultFamilyName);
- tableUpsert.setString(13, viewStatement);
- tableUpsert.setBoolean(14, disableWAL);
- tableUpsert.setBoolean(15, multiTenant);
- if (viewType == null) {
- tableUpsert.setNull(16, Types.TINYINT);
- } else {
- tableUpsert.setByte(16, viewType.getSerializedValue());
- }
- if (viewIndexId == null) {
- tableUpsert.setNull(17, Types.SMALLINT);
- } else {
- tableUpsert.setShort(17, viewIndexId);
- }
- tableUpsert.execute();
-
- tableMetaData.addAll(connection.getMutationState().toMutations().next().getSecond());
- connection.rollback();
-
- /*
- * The table metadata must be in the following order:
- * 1) table header row
- * 2) everything else
- * 3) parent table header row
- */
- Collections.reverse(tableMetaData);
-
- splits = SchemaUtil.processSplits(splits, pkColumns, saltBucketNum, connection.getQueryServices().getProps().getBoolean(
- QueryServices.ROW_KEY_ORDER_SALTED_TABLE_ATTRIB, QueryServicesOptions.DEFAULT_ROW_KEY_ORDER_SALTED_TABLE));
- MetaDataMutationResult result = connection.getQueryServices().createTable(
- tableMetaData,
- viewType == ViewType.MAPPED || viewIndexId != null ? physicalNames.get(0).getBytes() : null,
- tableType, tableProps, familyPropList, splits);
- MutationCode code = result.getMutationCode();
- switch(code) {
- case TABLE_ALREADY_EXISTS:
- connection.addTable(result.getTable());
- if (!statement.ifNotExists()) {
- throw new TableAlreadyExistsException(schemaName, tableName);
- }
- return null;
- case PARENT_TABLE_NOT_FOUND:
- throw new TableNotFoundException(schemaName, parent.getName().getString());
- case NEWER_TABLE_FOUND:
- throw new NewerTableAlreadyExistsException(schemaName, tableName);
- case UNALLOWED_TABLE_MUTATION:
- throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_MUTATE_TABLE)
- .setSchemaName(schemaName).setTableName(tableName).build().buildException();
- case CONCURRENT_TABLE_MUTATION:
- connection.addTable(result.getTable());
- throw new ConcurrentTableMutationException(schemaName, tableName);
- default:
- PTable table = PTableImpl.makePTable(
- tenantId, PNameFactory.newName(schemaName), PNameFactory.newName(tableName), tableType, indexState, result.getMutationTime(),
- PTable.INITIAL_SEQ_NUM, pkName == null ? null : PNameFactory.newName(pkName), saltBucketNum, columns,
- dataTableName == null ? null : PNameFactory.newName(dataTableName), Collections.<PTable>emptyList(), isImmutableRows, physicalNames,
- defaultFamilyName == null ? null : PNameFactory.newName(defaultFamilyName), viewStatement, Boolean.TRUE.equals(disableWAL), multiTenant, viewType, viewIndexId);
- connection.addTable(table);
- return table;
- }
- } finally {
- connection.setAutoCommit(wasAutoCommit);
- }
- }
-
- private static boolean hasColumnWithSameNameAndFamily(Collection<PColumn> columns, PColumn column) {
- for (PColumn currColumn : columns) {
- if (Objects.equal(currColumn.getFamilyName(), column.getFamilyName()) &&
- Objects.equal(currColumn.getName(), column.getName())) {
- return true;
- }
- }
- return false;
- }
-
- /**
- * A table can be a parent table to tenant-specific tables if all of the following conditions are true:
- * <p>
- * FOR TENANT-SPECIFIC TABLES WITH TENANT_TYPE_ID SPECIFIED:
- * <ol>
- * <li>It has 3 or more PK columns AND
- * <li>First PK (tenant id) column is not nullible AND
- * <li>Firsts PK column's data type is either VARCHAR or CHAR AND
- * <li>Second PK (tenant type id) column is not nullible AND
- * <li>Second PK column data type is either VARCHAR or CHAR
- * </ol>
- * FOR TENANT-SPECIFIC TABLES WITH NO TENANT_TYPE_ID SPECIFIED:
- * <ol>
- * <li>It has 2 or more PK columns AND
- * <li>First PK (tenant id) column is not nullible AND
- * <li>Firsts PK column's data type is either VARCHAR or CHAR
- * </ol>
- */
- private static void throwIfInsufficientColumns(String schemaName, String tableName, Collection<PColumn> columns, boolean isSalted, boolean isMultiTenant) throws SQLException {
- if (!isMultiTenant) {
- return;
- }
- int nPKColumns = columns.size() - (isSalted ? 1 : 0);
- if (nPKColumns < 2) {
- throw new SQLExceptionInfo.Builder(INSUFFICIENT_MULTI_TENANT_COLUMNS).setSchemaName(schemaName).setTableName(tableName).build().buildException();
- }
- Iterator<PColumn> iterator = columns.iterator();
- if (isSalted) {
- iterator.next();
- }
- // Tenant ID must be VARCHAR or CHAR and be NOT NULL
- // NOT NULL is a requirement, since otherwise the table key would conflict
- // potentially with the global table definition.
- PColumn tenantIdCol = iterator.next();
- if (!tenantIdCol.getDataType().isCoercibleTo(VARCHAR) || tenantIdCol.isNullable()) {
- throw new SQLExceptionInfo.Builder(INSUFFICIENT_MULTI_TENANT_COLUMNS).setSchemaName(schemaName).setTableName(tableName).build().buildException();
- }
- }
-
- public MutationState dropTable(DropTableStatement statement) throws SQLException {
- String schemaName = statement.getTableName().getSchemaName();
- String tableName = statement.getTableName().getTableName();
- return dropTable(schemaName, tableName, null, statement.getTableType(), statement.ifExists(), statement.cascade());
- }
-
- public MutationState dropIndex(DropIndexStatement statement) throws SQLException {
- String schemaName = statement.getTableName().getSchemaName();
- String tableName = statement.getIndexName().getName();
- String parentTableName = statement.getTableName().getTableName();
- return dropTable(schemaName, tableName, parentTableName, PTableType.INDEX, statement.ifExists(), false);
- }
-
- private MutationState dropTable(String schemaName, String tableName, String parentTableName, PTableType tableType,
- boolean ifExists, boolean cascade) throws SQLException {
- connection.rollback();
- boolean wasAutoCommit = connection.getAutoCommit();
- try {
- PName tenantId = connection.getTenantId();
- String tenantIdStr = tenantId == null ? null : tenantId.getString();
- byte[] key = SchemaUtil.getTableKey(tenantIdStr, schemaName, tableName);
- Long scn = connection.getSCN();
- long clientTimeStamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
- List<Mutation> tableMetaData = Lists.newArrayListWithExpectedSize(2);
- @SuppressWarnings("deprecation") // FIXME: Remove when unintentionally deprecated method is fixed (HBASE-7870).
- Delete tableDelete = new Delete(key, clientTimeStamp, null);
- tableMetaData.add(tableDelete);
- if (parentTableName != null) {
- byte[] linkKey = MetaDataUtil.getParentLinkKey(tenantIdStr, schemaName, parentTableName, tableName);
- @SuppressWarnings("deprecation") // FIXME: Remove when unintentionally deprecated method is fixed (HBASE-7870).
- Delete linkDelete = new Delete(linkKey, clientTimeStamp, null);
- tableMetaData.add(linkDelete);
- }
-
- MetaDataMutationResult result = connection.getQueryServices().dropTable(tableMetaData, tableType, cascade);
- MutationCode code = result.getMutationCode();
- switch(code) {
- case TABLE_NOT_FOUND:
- if (!ifExists) {
- throw new TableNotFoundException(schemaName, tableName);
- }
- break;
- case NEWER_TABLE_FOUND:
- throw new NewerTableAlreadyExistsException(schemaName, tableName);
- case UNALLOWED_TABLE_MUTATION:
- throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_MUTATE_TABLE)
- .setSchemaName(schemaName).setTableName(tableName).build().buildException();
- default:
- connection.removeTable(tenantId, SchemaUtil.getTableName(schemaName, tableName), parentTableName, result.getMutationTime());
-
- // TODO: we need to drop the index data when a view is dropped
- boolean dropMetaData = connection.getQueryServices().getProps().getBoolean(DROP_METADATA_ATTRIB, DEFAULT_DROP_METADATA);
-
- if (result.getTable() != null && tableType != PTableType.VIEW) {
- connection.setAutoCommit(true);
- PTable table = result.getTable();
- long ts = (scn == null ? result.getMutationTime() : scn);
- // Create empty table and schema - they're only used to get the name from
- // PName name, PTableType type, long timeStamp, long sequenceNumber, List<PColumn> columns
- List<TableRef> tableRefs = Lists.newArrayListWithExpectedSize(2 + table.getIndexes().size());
- // All multi-tenant tables have a view index table, so no need to check in that case
- if (tableType == PTableType.TABLE && (table.isMultiTenant() || MetaDataUtil.hasViewIndexTable(connection, table.getPhysicalName()))) {
- MetaDataUtil.deleteViewIndexSequences(connection, table.getPhysicalName());
- // TODO: consider removing this, as the DROP INDEX done for each DROP VIEW command
- // would have deleted all the rows already
- if (!dropMetaData) {
- String viewIndexSchemaName = MetaDataUtil.getViewIndexSchemaName(schemaName);
- String viewIndexTableName = MetaDataUtil.getViewIndexTableName(tableName);
- PTable viewIndexTable = new PTableImpl(null, viewIndexSchemaName, viewIndexTableName, ts, table.getColumnFamilies());
- tableRefs.add(new TableRef(null, viewIndexTable, ts, false));
- }
- }
- // Delete everything in the column. You'll still be able to do queries at earlier timestamps
- tableRefs.add(new TableRef(null, table, ts, false));
- // TODO: Let the standard mutable secondary index maintenance handle this?
- for (PTable index: table.getIndexes()) {
- tableRefs.add(new TableRef(null, index, ts, false));
- }
- deleteFromStatsTable(tableRefs, ts);
- if (!dropMetaData) {
- MutationPlan plan = new PostDDLCompiler(connection).compile(tableRefs, null, null, Collections.<PColumn>emptyList(), ts);
- return connection.getQueryServices().updateData(plan);
- }
- }
- break;
- }
- return new MutationState(0,connection);
- } finally {
- connection.setAutoCommit(wasAutoCommit);
- }
- }
-
- private void deleteFromStatsTable(List<TableRef> tableRefs, long ts) throws SQLException {
- Properties props = new Properties(connection.getClientInfo());
- props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts));
- Connection conn = DriverManager.getConnection(connection.getURL(), props);
- conn.setAutoCommit(true);
- boolean success = false;
- SQLException sqlException = null;
- try {
- StringBuilder buf = new StringBuilder("DELETE FROM SYSTEM.STATS WHERE PHYSICAL_NAME IN (");
- for (TableRef ref : tableRefs) {
- buf.append("'" + ref.getTable().getName().getString() + "',");
- }
- buf.setCharAt(buf.length() - 1, ')');
- conn.createStatement().execute(buf.toString());
- success = true;
- } catch (SQLException e) {
- sqlException = e;
- } finally {
- try {
- conn.close();
- } catch (SQLException e) {
- if (sqlException == null) {
- // If we're not in the middle of throwing another exception
- // then throw the exception we got on close.
- if (success) {
- sqlException = e;
- }
- } else {
- sqlException.setNextException(e);
- }
- }
- if (sqlException != null) { throw sqlException; }
- }
- }
-
- private MutationCode processMutationResult(String schemaName, String tableName, MetaDataMutationResult result) throws SQLException {
- final MutationCode mutationCode = result.getMutationCode();
- PName tenantId = connection.getTenantId();
- switch (mutationCode) {
- case TABLE_NOT_FOUND:
- // Only called for add/remove column so parentTableName will always be null
- connection.removeTable(tenantId, SchemaUtil.getTableName(schemaName, tableName), null, HConstants.LATEST_TIMESTAMP);
- throw new TableNotFoundException(schemaName, tableName);
- case UNALLOWED_TABLE_MUTATION:
- Str
<TRUNCATED>
[06/15] git commit: PHOENIX-1366 ORDINAL_POSITION incorrect for
multi-tenant table over tenant-specific connection (Bruno Dumon)
Posted by ja...@apache.org.
PHOENIX-1366 ORDINAL_POSITION incorrect for multi-tenant table over tenant-specific connection (Bruno Dumon)
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/60fee11b
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/60fee11b
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/60fee11b
Branch: refs/heads/3.2
Commit: 60fee11be5e9968d7922215c24aaeb29cdaf416b
Parents: 2bdc33b
Author: James Taylor <jt...@salesforce.com>
Authored: Sun Oct 26 20:46:03 2014 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Sun Oct 26 20:51:21 2014 -0700
----------------------------------------------------------------------
.../end2end/TenantSpecificTablesDDLIT.java | 23 ++++++++++-----
.../phoenix/jdbc/PhoenixDatabaseMetaData.java | 31 ++++++++++++++++----
2 files changed, 42 insertions(+), 12 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/60fee11b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDDLIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDDLIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDDLIT.java
index 589e963..42fe5b8 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDDLIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDDLIT.java
@@ -22,6 +22,8 @@ import static org.apache.phoenix.exception.SQLExceptionCode.CANNOT_DROP_PK;
import static org.apache.phoenix.exception.SQLExceptionCode.CANNOT_MODIFY_VIEW_PK;
import static org.apache.phoenix.exception.SQLExceptionCode.CANNOT_MUTATE_TABLE;
import static org.apache.phoenix.exception.SQLExceptionCode.TABLE_UNDEFINED;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.KEY_SEQ;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.ORDINAL_POSITION;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TYPE_SEQUENCE;
@@ -554,21 +556,22 @@ public class TenantSpecificTablesDDLIT extends BaseTenantSpecificTablesIT {
// make sure tenants see parent table's columns and their own
rs = meta.getColumns(null, null, StringUtil.escapeLike(TENANT_TABLE_NAME) + "%", null);
assertTrue(rs.next());
- assertColumnMetaData(rs, null, TENANT_TABLE_NAME, "user");
+ assertColumnMetaData(rs, null, TENANT_TABLE_NAME, "user", 1);
assertTrue(rs.next());
// (tenant_id column is not visible in tenant-specific connection)
- assertColumnMetaData(rs, null, TENANT_TABLE_NAME, "tenant_type_id");
+ assertColumnMetaData(rs, null, TENANT_TABLE_NAME, "tenant_type_id", 2);
+ assertEquals(1, rs.getInt(KEY_SEQ));
assertTrue(rs.next());
- assertColumnMetaData(rs, null, TENANT_TABLE_NAME, "id");
+ assertColumnMetaData(rs, null, TENANT_TABLE_NAME, "id", 3);
assertTrue(rs.next());
- assertColumnMetaData(rs, null, TENANT_TABLE_NAME, "tenant_col");
+ assertColumnMetaData(rs, null, TENANT_TABLE_NAME, "tenant_col", 4);
assertTrue(rs.next());
- assertColumnMetaData(rs, null, TENANT_TABLE_NAME_NO_TENANT_TYPE_ID, "user");
+ assertColumnMetaData(rs, null, TENANT_TABLE_NAME_NO_TENANT_TYPE_ID, "user", 1);
assertTrue(rs.next());
// (tenant_id column is not visible in tenant-specific connection)
- assertColumnMetaData(rs, null, TENANT_TABLE_NAME_NO_TENANT_TYPE_ID, "id");
+ assertColumnMetaData(rs, null, TENANT_TABLE_NAME_NO_TENANT_TYPE_ID, "id", 2);
assertTrue(rs.next());
- assertColumnMetaData(rs, null, TENANT_TABLE_NAME_NO_TENANT_TYPE_ID, "tenant_col");
+ assertColumnMetaData(rs, null, TENANT_TABLE_NAME_NO_TENANT_TYPE_ID, "tenant_col", 3);
assertFalse(rs.next());
}
finally {
@@ -587,4 +590,10 @@ public class TenantSpecificTablesDDLIT extends BaseTenantSpecificTablesIT {
assertEquals(table, rs.getString("TABLE_NAME"));
assertEquals(SchemaUtil.normalizeIdentifier(column), rs.getString("COLUMN_NAME"));
}
+
+ private void assertColumnMetaData(ResultSet rs, String schema, String table, String column, int ordinalPosition)
+ throws SQLException {
+ assertColumnMetaData(rs, schema, table, column);
+ assertEquals(ordinalPosition, rs.getInt(ORDINAL_POSITION));
+ }
}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/phoenix/blob/60fee11b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
index f0b709c..0cf34dc 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
@@ -26,8 +26,12 @@ import java.sql.Statement;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
+import java.util.List;
import org.apache.commons.lang.StringEscapeUtils;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.compile.ColumnProjector;
import org.apache.phoenix.compile.ExpressionProjector;
@@ -55,6 +59,7 @@ import org.apache.phoenix.schema.PTable.LinkType;
import org.apache.phoenix.schema.PTableType;
import org.apache.phoenix.schema.RowKeyValueAccessor;
import org.apache.phoenix.schema.SortOrder;
+import org.apache.phoenix.schema.tuple.ResultTuple;
import org.apache.phoenix.schema.tuple.SingleKeyValueTuple;
import org.apache.phoenix.schema.tuple.Tuple;
import org.apache.phoenix.util.ByteUtil;
@@ -398,7 +403,7 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData, org.apache.pho
SQL_DATA_TYPE + "," +
SQL_DATETIME_SUB + "," +
CHAR_OCTET_LENGTH + "," +
- ORDINAL_POSITION + "," +
+ "CASE WHEN TENANT_POS_SHIFT THEN ORDINAL_POSITION-1 ELSE ORDINAL_POSITION END AS " + ORDINAL_POSITION + "," +
"CASE " + NULLABLE + " WHEN " + DatabaseMetaData.attributeNoNulls + " THEN '" + Boolean.FALSE.toString() + "' WHEN " + DatabaseMetaData.attributeNullable + " THEN '" + Boolean.TRUE.toString() + "' END AS " + IS_NULLABLE + "," +
SCOPE_CATALOG + "," +
SCOPE_SCHEMA + "," +
@@ -410,8 +415,8 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData, org.apache.pho
DATA_TYPE + " " + TYPE_ID + "," +// raw type id for potential internal consumption
VIEW_CONSTANT + "," +
MULTI_TENANT + "," +
- KEY_SEQ +
- " from " + SYSTEM_CATALOG + " " + SYSTEM_CATALOG_ALIAS);
+ "CASE WHEN TENANT_POS_SHIFT THEN KEY_SEQ-1 ELSE KEY_SEQ END AS " + KEY_SEQ +
+ " from " + SYSTEM_CATALOG + " " + SYSTEM_CATALOG_ALIAS + "(TENANT_POS_SHIFT BOOLEAN)");
StringBuilder where = new StringBuilder();
addTenantIdFilter(where, catalog);
if (schemaPattern != null) {
@@ -456,7 +461,7 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData, org.apache.pho
} else {
buf.append(" where " + where);
}
- buf.append(" order by " + TENANT_ID + "," + TABLE_SCHEM + "," + TABLE_NAME + "," + ORDINAL_POSITION);
+ buf.append(" order by " + TENANT_ID + "," + TABLE_SCHEM + "," + TABLE_NAME + "," + SYSTEM_CATALOG_ALIAS + "." + ORDINAL_POSITION);
Statement stmt;
if (isTenantSpecificConnection) {
@@ -492,6 +497,7 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData, org.apache.pho
private final int multiTenantIndex;
private final int keySeqIndex;
private boolean inMultiTenantTable;
+ private boolean tenantColumnSkipped;
private TenantColumnFilteringIterator(ResultIterator delegate, RowProjector rowProjector) throws SQLException {
super(delegate);
@@ -510,15 +516,30 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData, org.apache.pho
&& getColumn(tuple, columnFamilyIndex) == null && getColumn(tuple, columnNameIndex) == null) {
// new table, check if it is multitenant
inMultiTenantTable = getColumn(tuple, multiTenantIndex) == Boolean.TRUE;
+ tenantColumnSkipped = false;
// skip row representing table
tuple = super.next();
}
- if (tuple != null && inMultiTenantTable && new Short((short)1).equals(getColumn(tuple, keySeqIndex))) {
+ if (tuple != null && inMultiTenantTable && !tenantColumnSkipped
+ && new Long(1L).equals(getColumn(tuple, keySeqIndex))) {
+ tenantColumnSkipped = true;
// skip tenant id primary key column
return next();
}
+ if (tuple != null && tenantColumnSkipped) {
+ ResultTuple resultTuple = (ResultTuple)tuple;
+ List<Cell> cells = resultTuple.getResult().listCells();
+ KeyValue kv = new KeyValue(resultTuple.getResult().getRow(), QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES,
+ Bytes.toBytes("TENANT_POS_SHIFT"), PDataType.TRUE_BYTES);
+ List<Cell> newCells = Lists.newArrayListWithCapacity(cells.size() + 1);
+ newCells.addAll(cells);
+ newCells.add(kv);
+ Collections.sort(newCells, KeyValue.COMPARATOR);
+ resultTuple.setResult(Result.create(newCells));
+ }
+
return tuple;
}
[12/15] git commit: PHOENIX-1390 Stats not updated on client after
major compaction
Posted by ja...@apache.org.
PHOENIX-1390 Stats not updated on client after major compaction
Conflicts:
phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
Conflicts:
phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseQueryIT.java
phoenix-core/src/it/java/org/apache/phoenix/end2end/ReverseScanIT.java
phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
phoenix-core/src/main/java/org/apache/phoenix/schema/stats/PTableStatsImpl.java
phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollector.java
phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsUtil.java
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/6c94dc6e
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/6c94dc6e
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/6c94dc6e
Branch: refs/heads/3.2
Commit: 6c94dc6eb62c1cb4e255d9817af8c985be739785
Parents: 72144f1
Author: James Taylor <jt...@salesforce.com>
Authored: Tue Oct 28 12:44:37 2014 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Tue Oct 28 15:18:39 2014 -0700
----------------------------------------------------------------------
.../end2end/BaseClientManagedTimeIT.java | 15 +-
.../org/apache/phoenix/end2end/BaseQueryIT.java | 3 +-
.../end2end/ClientTimeArithmeticQueryIT.java | 43 ++++++
.../phoenix/end2end/InMemoryOrderByIT.java | 4 +-
.../org/apache/phoenix/end2end/QueryIT.java | 24 ++-
.../org/apache/phoenix/end2end/SequenceIT.java | 7 +-
.../phoenix/end2end/SpooledOrderByIT.java | 4 +-
.../phoenix/end2end/StatsCollectorIT.java | 55 ++++++-
.../apache/phoenix/end2end/UpsertSelectIT.java | 4 +-
.../phoenix/compile/ExpressionCompiler.java | 2 +-
.../coprocessor/MetaDataEndpointImpl.java | 35 +----
.../UngroupedAggregateRegionObserver.java | 21 ++-
.../org/apache/phoenix/query/QueryServices.java | 1 +
.../phoenix/query/QueryServicesOptions.java | 7 +-
.../apache/phoenix/schema/MetaDataClient.java | 4 +-
.../phoenix/schema/stats/PTableStats.java | 7 +
.../phoenix/schema/stats/PTableStatsImpl.java | 12 +-
.../schema/stats/StatisticsCollector.java | 154 +++++++++++--------
.../phoenix/schema/stats/StatisticsScanner.java | 1 -
.../phoenix/schema/stats/StatisticsUtil.java | 6 +-
.../phoenix/schema/stats/StatisticsWriter.java | 39 +++--
21 files changed, 297 insertions(+), 151 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/6c94dc6e/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseClientManagedTimeIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseClientManagedTimeIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseClientManagedTimeIT.java
index 14dffcb..1acd5b3 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseClientManagedTimeIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseClientManagedTimeIT.java
@@ -17,16 +17,21 @@
*/
package org.apache.phoenix.end2end;
+import java.util.Map;
+
import javax.annotation.concurrent.NotThreadSafe;
import org.apache.hadoop.conf.Configuration;
import org.apache.phoenix.query.BaseTest;
+import org.apache.phoenix.query.QueryServices;
import org.apache.phoenix.util.ReadOnlyProps;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.experimental.categories.Category;
+import com.google.common.collect.Maps;
+
/**
* Base class for tests that manage their own time stamps
* We need to separate these from tests that rely on hbase to set
@@ -54,9 +59,17 @@ public abstract class BaseClientManagedTimeIT extends BaseTest {
deletePriorTables(ts - 1, getUrl());
}
+ public static Map<String,String> getDefaultProps() {
+ Map<String,String> props = Maps.newHashMapWithExpectedSize(5);
+ // Must update config before starting server
+ props.put(QueryServices.STATS_USE_CURRENT_TIME_ATTRIB, Boolean.FALSE.toString());
+ return props;
+ }
+
@BeforeClass
public static void doSetup() throws Exception {
- setUpTestDriver(ReadOnlyProps.EMPTY_PROPS);
+ Map<String,String> props = getDefaultProps();
+ setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
}
@AfterClass
http://git-wip-us.apache.org/repos/asf/phoenix/blob/6c94dc6e/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseQueryIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseQueryIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseQueryIT.java
index 25b947d..2b00096 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseQueryIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseQueryIT.java
@@ -52,7 +52,6 @@ import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
@@ -75,7 +74,7 @@ public abstract class BaseQueryIT extends BaseClientManagedTimeIT {
@BeforeClass
@Shadower(classBeingShadowed = BaseClientManagedTimeIT.class)
public static void doSetup() throws Exception {
- Map<String,String> props = Maps.newHashMapWithExpectedSize(5);
+ Map<String,String> props = getDefaultProps();
props.put(QueryServices.QUEUE_SIZE_ATTRIB, Integer.toString(5000));
props.put(IndexWriterUtils.HTABLE_THREAD_KEY, Integer.toString(100));
// Make a small batch size to test multiple calls to reserve sequences
http://git-wip-us.apache.org/repos/asf/phoenix/blob/6c94dc6e/phoenix-core/src/it/java/org/apache/phoenix/end2end/ClientTimeArithmeticQueryIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ClientTimeArithmeticQueryIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ClientTimeArithmeticQueryIT.java
index 98b233c..d709b9c 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ClientTimeArithmeticQueryIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ClientTimeArithmeticQueryIT.java
@@ -49,6 +49,7 @@ import java.util.Properties;
import org.apache.phoenix.util.PhoenixRuntime;
import org.apache.phoenix.util.PropertiesUtil;
+import org.apache.phoenix.util.TestUtil;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.runner.RunWith;
@@ -596,5 +597,47 @@ public class ClientTimeArithmeticQueryIT extends BaseQueryIT {
}
}
+ @Test
+ public void testDateDateSubtract() throws Exception {
+ String url;
+ Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+
+ url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" + (ts + 15);
+ Connection conn = DriverManager.getConnection(url, props);
+ PreparedStatement statement = conn.prepareStatement("UPSERT INTO ATABLE(organization_id,entity_id,a_time) VALUES(?,?,?)");
+ statement.setString(1, getOrganizationId());
+ statement.setString(2, ROW2);
+ statement.setDate(3, date);
+ statement.execute();
+ statement.setString(2, ROW3);
+ statement.setDate(3, date);
+ statement.execute();
+ statement.setString(2, ROW4);
+ statement.setDate(3, new Date(date.getTime() + TestUtil.MILLIS_IN_DAY - 1));
+ statement.execute();
+ statement.setString(2, ROW6);
+ statement.setDate(3, new Date(date.getTime() + TestUtil.MILLIS_IN_DAY - 1));
+ statement.execute();
+ statement.setString(2, ROW9);
+ statement.setDate(3, date);
+ statement.execute();
+ conn.commit();
+ conn.close();
+
+ url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" + (ts + 25);
+ conn = DriverManager.getConnection(url, props);
+ try {
+ statement = conn.prepareStatement("SELECT entity_id, b_string FROM ATABLE WHERE a_date - a_time > 1");
+ ResultSet rs = statement.executeQuery();
+ @SuppressWarnings("unchecked")
+ List<List<Object>> expectedResults = Lists.newArrayList(
+ Arrays.<Object>asList(ROW3, E_VALUE),
+ Arrays.<Object>asList( ROW6, E_VALUE),
+ Arrays.<Object>asList(ROW9, E_VALUE));
+ assertValuesEqualsResultSet(rs, expectedResults);
+ } finally {
+ conn.close();
+ }
+ }
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/6c94dc6e/phoenix-core/src/it/java/org/apache/phoenix/end2end/InMemoryOrderByIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/InMemoryOrderByIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/InMemoryOrderByIT.java
index 48a0581..533143c 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/InMemoryOrderByIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/InMemoryOrderByIT.java
@@ -24,8 +24,6 @@ import org.apache.phoenix.util.ReadOnlyProps;
import org.junit.BeforeClass;
import org.junit.experimental.categories.Category;
-import com.google.common.collect.Maps;
-
@Category(ClientManagedTimeTest.class)
public class InMemoryOrderByIT extends OrderByIT {
@@ -35,7 +33,7 @@ public class InMemoryOrderByIT extends OrderByIT {
@BeforeClass
@Shadower(classBeingShadowed = BaseClientManagedTimeIT.class)
public static void doSetup() throws Exception {
- Map<String,String> props = Maps.newHashMapWithExpectedSize(3);
+ Map<String,String> props = getDefaultProps();
props.put(QueryServices.SPOOL_THRESHOLD_BYTES_ATTRIB, Integer.toString(1024*1024));
// Must update config before starting server
setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
http://git-wip-us.apache.org/repos/asf/phoenix/blob/6c94dc6e/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryIT.java
index a537087..36d800a 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryIT.java
@@ -248,7 +248,7 @@ public class QueryIT extends BaseQueryIT {
@Test
public void testPointInTimeScan() throws Exception {
// Override value that was set at creation time
- String url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" + (ts + 1); // Run query at timestamp 5
+ String url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" + (ts + 10);
Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
Connection upsertConn = DriverManager.getConnection(url, props);
String upsertStmt =
@@ -265,13 +265,15 @@ public class QueryIT extends BaseQueryIT {
stmt.setString(2, ROW4);
stmt.setInt(3, 5);
stmt.execute(); // should commit too
- Connection conn1 = DriverManager.getConnection(getUrl(), props);
+
+ url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" + (ts + 15);
+ Connection conn1 = DriverManager.getConnection(url, props);
analyzeTable(conn1, "ATABLE");
conn1.close();
upsertConn.close();
// Override value again, but should be ignored since it's past the SCN
- url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" + (ts + 3); // Run query at timestamp 5
+ url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" + (ts + 30);
upsertConn = DriverManager.getConnection(url, props);
upsertConn.setAutoCommit(true); // Test auto commit
// Insert all rows at ts
@@ -283,7 +285,7 @@ public class QueryIT extends BaseQueryIT {
upsertConn.close();
String query = "SELECT organization_id, a_string AS a FROM atable WHERE organization_id=? and a_integer = 5";
- props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 2));
+ props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 20));
Connection conn = DriverManager.getConnection(getUrl(), props);
PreparedStatement statement = conn.prepareStatement(query);
statement.setString(1, tenantId);
@@ -392,7 +394,7 @@ public class QueryIT extends BaseQueryIT {
" A_TIMESTAMP) " +
"VALUES (?, ?, ?)";
// Override value that was set at creation time
- String url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" + (ts + 1);
+ String url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" + (ts + 10);
Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
Connection upsertConn = DriverManager.getConnection(url, props);
upsertConn.setAutoCommit(true); // Test auto commit
@@ -403,9 +405,12 @@ public class QueryIT extends BaseQueryIT {
byte[] ts1 = PDataType.TIMESTAMP.toBytes(tsValue1);
stmt.setTimestamp(3, tsValue1);
stmt.execute();
- Connection conn1 = DriverManager.getConnection(getUrl(), props);
+
+ url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" + (ts + 15);
+ Connection conn1 = DriverManager.getConnection(url, props);
analyzeTable(conn1, "ATABLE");
conn1.close();
+
updateStmt =
"upsert into " +
"ATABLE(" +
@@ -424,15 +429,18 @@ public class QueryIT extends BaseQueryIT {
stmt.setTime(4, new Time(tsValue2.getTime()));
stmt.execute();
upsertConn.close();
- conn1 = DriverManager.getConnection(getUrl(), props);
+
+ url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" + (ts + 20);
+ conn1 = DriverManager.getConnection(url, props);
analyzeTable(conn1, "ATABLE");
conn1.close();
+
analyzeTable(upsertConn, "ATABLE");
assertTrue(compare(CompareOp.GREATER, new ImmutableBytesWritable(ts2), new ImmutableBytesWritable(ts1)));
assertFalse(compare(CompareOp.GREATER, new ImmutableBytesWritable(ts1), new ImmutableBytesWritable(ts1)));
String query = "SELECT entity_id, a_timestamp, a_time FROM aTable WHERE organization_id=? and a_timestamp > ?";
- props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 3)); // Execute at timestamp 2
+ props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 30)); // Execute at timestamp 2
Connection conn = DriverManager.getConnection(getUrl(), props);
try {
PreparedStatement statement = conn.prepareStatement(query);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/6c94dc6e/phoenix-core/src/it/java/org/apache/phoenix/end2end/SequenceIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SequenceIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SequenceIT.java
index d610633..acaa1bb 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SequenceIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SequenceIT.java
@@ -51,7 +51,6 @@ import org.junit.Test;
import org.junit.experimental.categories.Category;
import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
@Category(ClientManagedTimeTest.class)
public class SequenceIT extends BaseClientManagedTimeIT {
@@ -63,11 +62,9 @@ public class SequenceIT extends BaseClientManagedTimeIT {
@BeforeClass
@Shadower(classBeingShadowed = BaseClientManagedTimeIT.class)
public static void doSetup() throws Exception {
-
- Map<String,String> props = Maps.newHashMapWithExpectedSize(1);
- // Make a small batch size to test multiple calls to reserve sequences
- props.put(QueryServices.SEQUENCE_CACHE_SIZE_ATTRIB, Long.toString(BATCH_SIZE));
+ Map<String,String> props = getDefaultProps();
// Must update config before starting server
+ props.put(QueryServices.SEQUENCE_CACHE_SIZE_ATTRIB, Long.toString(BATCH_SIZE));
setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/6c94dc6e/phoenix-core/src/it/java/org/apache/phoenix/end2end/SpooledOrderByIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SpooledOrderByIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SpooledOrderByIT.java
index 2533a29..c35ecab 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SpooledOrderByIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SpooledOrderByIT.java
@@ -24,15 +24,13 @@ import org.apache.phoenix.util.ReadOnlyProps;
import org.junit.BeforeClass;
import org.junit.experimental.categories.Category;
-import com.google.common.collect.Maps;
-
@Category(ClientManagedTimeTest.class)
public class SpooledOrderByIT extends OrderByIT {
@BeforeClass
@Shadower(classBeingShadowed = BaseClientManagedTimeIT.class)
public static void doSetup() throws Exception {
- Map<String,String> props = Maps.newHashMapWithExpectedSize(3);
+ Map<String,String> props = getDefaultProps();
props.put(QueryServices.SPOOL_THRESHOLD_BYTES_ATTRIB, Integer.toString(100));
// Must update config before starting server
setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
http://git-wip-us.apache.org/repos/asf/phoenix/blob/6c94dc6e/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
index 51ad543..b9a0e88 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
@@ -18,6 +18,8 @@
package org.apache.phoenix.end2end;
import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
+import static org.apache.phoenix.util.TestUtil.getAllSplits;
+import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
@@ -27,9 +29,15 @@ import java.sql.DriverManager;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
+import java.util.List;
import java.util.Map;
import java.util.Properties;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.query.ConnectionQueryServices;
+import org.apache.phoenix.query.KeyRange;
import org.apache.phoenix.query.QueryServices;
import org.apache.phoenix.util.PropertiesUtil;
import org.apache.phoenix.util.ReadOnlyProps;
@@ -41,7 +49,8 @@ import com.google.common.collect.Maps;
@Category(NeedsOwnMiniClusterTest.class)
public class StatsCollectorIT extends BaseOwnClusterHBaseManagedTimeIT {
-
+ private static final String STATS_TEST_TABLE_NAME = "S";
+
@BeforeClass
public static void doSetup() throws Exception {
Map<String,String> props = Maps.newHashMapWithExpectedSize(3);
@@ -222,4 +231,48 @@ public class StatsCollectorIT extends BaseOwnClusterHBaseManagedTimeIT {
return stmt;
}
+ private void compactTable(Connection conn) throws IOException, InterruptedException, SQLException {
+ ConnectionQueryServices services = conn.unwrap(PhoenixConnection.class).getQueryServices();
+ HBaseAdmin admin = services.getAdmin();
+ try {
+ admin.flush(STATS_TEST_TABLE_NAME);
+ admin.majorCompact(STATS_TEST_TABLE_NAME);
+ Thread.sleep(10000); // FIXME: how do we know when compaction is done?
+ } finally {
+ admin.close();
+ }
+ services.clearCache();
+ }
+
+ @Test
+ public void testCompactUpdatesStats() throws Exception {
+ int nRows = 10;
+ Connection conn;
+ PreparedStatement stmt;
+ Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+ conn = DriverManager.getConnection(getUrl(), props);
+ conn.createStatement().execute("CREATE TABLE " + STATS_TEST_TABLE_NAME + "(k CHAR(1) PRIMARY KEY, v INTEGER) " + HColumnDescriptor.KEEP_DELETED_CELLS + "=" + Boolean.FALSE);
+ stmt = conn.prepareStatement("UPSERT INTO " + STATS_TEST_TABLE_NAME + " VALUES(?,?)");
+ for (int i = 0; i < nRows; i++) {
+ stmt.setString(1, Character.toString((char) ('a' + i)));
+ stmt.setInt(2, i);
+ stmt.executeUpdate();
+ }
+ conn.commit();
+
+ compactTable(conn);
+ conn = DriverManager.getConnection(getUrl(), props);
+ List<KeyRange>keyRanges = getAllSplits(conn, STATS_TEST_TABLE_NAME);
+ assertEquals(nRows+1, keyRanges.size());
+
+ int nDeletedRows = conn.createStatement().executeUpdate("DELETE FROM " + STATS_TEST_TABLE_NAME + " WHERE V < 5");
+ conn.commit();
+ assertEquals(5, nDeletedRows);
+
+ compactTable(conn);
+
+ keyRanges = getAllSplits(conn, STATS_TEST_TABLE_NAME);
+ assertEquals(nRows/2+1, keyRanges.size());
+
+ }
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/6c94dc6e/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectIT.java
index 642ba62..ac54fe4 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectIT.java
@@ -55,8 +55,6 @@ import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
-import com.google.common.collect.Maps;
-
@Category(ClientManagedTimeTest.class)
public class UpsertSelectIT extends BaseClientManagedTimeIT {
@@ -64,7 +62,7 @@ public class UpsertSelectIT extends BaseClientManagedTimeIT {
@BeforeClass
@Shadower(classBeingShadowed = BaseClientManagedTimeIT.class)
public static void doSetup() throws Exception {
- Map<String,String> props = Maps.newHashMapWithExpectedSize(5);
+ Map<String,String> props = getDefaultProps();
props.put(QueryServices.QUEUE_SIZE_ATTRIB, Integer.toString(500));
props.put(QueryServices.THREAD_POOL_SIZE_ATTRIB, Integer.toString(64));
http://git-wip-us.apache.org/repos/asf/phoenix/blob/6c94dc6e/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
index 135ef01..409950c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
@@ -871,7 +871,7 @@ public class ExpressionCompiler extends UnsupportedAllParseNodeVisitor<Expressio
if (isType1Date || isType2Date) {
if (isType1Date && isType2Date) {
i = 2;
- theType = PDataType.LONG;
+ theType = PDataType.DECIMAL;
} else if (isType1Date && type2 != null
&& type2.isCoercibleTo(PDataType.DECIMAL)) {
i = 2;
http://git-wip-us.apache.org/repos/asf/phoenix/blob/6c94dc6e/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index f7c0aae..38277c5 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -465,6 +465,7 @@ public class MetaDataEndpointImpl extends BaseEndpointCoprocessor implements Met
try {
statsHTable = getEnvironment().getTable(PhoenixDatabaseMetaData.SYSTEM_STATS_NAME_BYTES);
stats = StatisticsUtil.readStatistics(statsHTable, physicalTableName.getBytes(), clientTimeStamp);
+ timeStamp = Math.max(timeStamp, stats.getTimestamp());
} catch (org.apache.hadoop.hbase.TableNotFoundException e) {
logger.warn(PhoenixDatabaseMetaData.SYSTEM_STATS_NAME + " not online yet?");
} finally {
@@ -1119,33 +1120,6 @@ public class MetaDataEndpointImpl extends BaseEndpointCoprocessor implements Met
return null; // impossible
}
}
-
- private PTable incrementTableTimestamp(byte[] key, long clientTimeStamp) throws IOException, SQLException {
- RegionCoprocessorEnvironment env = getEnvironment();
- HRegion region = env.getRegion();
- Integer lid = region.getLock(null, key, true);
- if (lid == null) {
- throw new IOException("Failed to acquire lock on " + Bytes.toStringBinary(key));
- }
- try {
- PTable table = doGetTable(key, clientTimeStamp, lid);
- if (table != null) {
- long tableTimeStamp = table.getTimeStamp() + 1;
- List<Mutation> mutations = Lists.newArrayListWithExpectedSize(1);
- Put p = new Put(key);
- p.add(TABLE_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, tableTimeStamp, ByteUtil.EMPTY_BYTE_ARRAY);
- mutations.add(p);
- region.mutateRowsWithLocks(mutations, Collections.<byte[]> emptySet());
-
- Cache<ImmutableBytesPtr, PTable> metaDataCache = GlobalCache.getInstance(getEnvironment()).getMetaDataCache();
- ImmutableBytesPtr cacheKey = new ImmutableBytesPtr(key);
- metaDataCache.invalidate(cacheKey);
- }
- return table;
- } finally {
- region.releaseRowLock(lid);
- }
- }
private PTable doGetTable(byte[] key, long clientTimeStamp) throws IOException, SQLException {
return doGetTable(key, clientTimeStamp, null);
@@ -1413,8 +1387,11 @@ public class MetaDataEndpointImpl extends BaseEndpointCoprocessor implements Met
public void incrementTableTimeStamp(byte[] tenantId, byte[] schemaName, byte[] tableName, final long clientTimeStamp)
throws IOException {
try {
- byte[] tableKey = SchemaUtil.getTableKey(tenantId, schemaName, tableName);
- incrementTableTimestamp(tableKey, clientTimeStamp);
+ byte[] key = SchemaUtil.getTableKey(tenantId, schemaName, tableName);
+ ImmutableBytesPtr cacheKey = new ImmutableBytesPtr(key);
+ Cache<ImmutableBytesPtr, PTable> metaDataCache =
+ GlobalCache.getInstance(this.getEnvironment()).getMetaDataCache();
+ metaDataCache.invalidate(cacheKey);
} catch (Throwable t) {
ServerUtil.throwIOException(SchemaUtil.getTableName(schemaName, tableName), t);
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/6c94dc6e/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index 556d69d..e4215e2 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -68,6 +68,7 @@ import org.apache.phoenix.index.PhoenixIndexCodec;
import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
import org.apache.phoenix.join.HashJoinInfo;
import org.apache.phoenix.query.QueryConstants;
+import org.apache.phoenix.query.QueryServices;
import org.apache.phoenix.query.QueryServicesOptions;
import org.apache.phoenix.schema.ConstraintViolationException;
import org.apache.phoenix.schema.PColumn;
@@ -433,9 +434,13 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
if (!table.equals(PhoenixDatabaseMetaData.SYSTEM_STATS_NAME)
&& scanType.equals(ScanType.MAJOR_COMPACT)) {
try {
- // TODO: for users that manage timestamps themselves, we should provide
- // a means of specifying/getting this.
- long clientTimeStamp = TimeKeeper.SYSTEM.getCurrentTime();
+ boolean useCurrentTime =
+ c.getEnvironment().getConfiguration().getBoolean(QueryServices.STATS_USE_CURRENT_TIME_ATTRIB,
+ QueryServicesOptions.DEFAULT_STATS_USE_CURRENT_TIME);
+ // Provides a means of clients controlling their timestamps to not use current time
+ // when background tasks are updating stats. Instead we track the max timestamp of
+ // the cells and use that.
+ long clientTimeStamp = useCurrentTime ? TimeKeeper.SYSTEM.getCurrentTime() : StatisticsCollector.NO_TIMESTAMP;
StatisticsCollector stats = new StatisticsCollector(c.getEnvironment(), table, clientTimeStamp);
internalScan =
stats.createCompactionScanner(c.getEnvironment().getRegion(), store, scanners, scanType, earliestPutTs, s);
@@ -459,9 +464,13 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
if (!table.equals(PhoenixDatabaseMetaData.SYSTEM_STATS_NAME)) {
StatisticsCollector stats = null;
try {
- // TODO: for users that manage timestamps themselves, we should provide
- // a means of specifying/getting this.
- long clientTimeStamp = TimeKeeper.SYSTEM.getCurrentTime();
+ boolean useCurrentTime =
+ e.getEnvironment().getConfiguration().getBoolean(QueryServices.STATS_USE_CURRENT_TIME_ATTRIB,
+ QueryServicesOptions.DEFAULT_STATS_USE_CURRENT_TIME);
+ // Provides a means of clients controlling their timestamps to not use current time
+ // when background tasks are updating stats. Instead we track the max timestamp of
+ // the cells and use that.
+ long clientTimeStamp = useCurrentTime ? TimeKeeper.SYSTEM.getCurrentTime() : StatisticsCollector.NO_TIMESTAMP;
stats = new StatisticsCollector(e.getEnvironment(), table, clientTimeStamp);
stats.collectStatsDuringSplit(e.getEnvironment().getConfiguration(), l, r, region);
} catch (IOException ioe) {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/6c94dc6e/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
index a8536a4..d3faf2e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
@@ -124,6 +124,7 @@ public interface QueryServices extends SQLCloseable {
public static final String MIN_STATS_UPDATE_FREQ_MS_ATTRIB = "phoenix.stats.minUpdateFrequency";
public static final String STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB = "phoenix.stats.guidepost.width";
public static final String STATS_GUIDEPOST_PER_REGION_ATTRIB = "phoenix.stats.guidepost.per.region";
+ public static final String STATS_USE_CURRENT_TIME_ATTRIB = "phoenix.stats.useCurrentTime";
public static final String SEQUENCE_SALT_BUCKETS_ATTRIB = "phoenix.sequence.saltBuckets";
/**
http://git-wip-us.apache.org/repos/asf/phoenix/blob/6c94dc6e/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
index ad2b48a..117f285 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
@@ -52,6 +52,7 @@ import static org.apache.phoenix.query.QueryServices.SPOOL_DIRECTORY;
import static org.apache.phoenix.query.QueryServices.SPOOL_THRESHOLD_BYTES_ATTRIB;
import static org.apache.phoenix.query.QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB;
import static org.apache.phoenix.query.QueryServices.STATS_UPDATE_FREQ_MS_ATTRIB;
+import static org.apache.phoenix.query.QueryServices.STATS_USE_CURRENT_TIME_ATTRIB;
import static org.apache.phoenix.query.QueryServices.THREAD_POOL_SIZE_ATTRIB;
import static org.apache.phoenix.query.QueryServices.THREAD_TIMEOUT_MS_ATTRIB;
import static org.apache.phoenix.query.QueryServices.USE_INDEXES_ATTRIB;
@@ -131,8 +132,9 @@ public class QueryServicesOptions {
public static final long DEFAULT_STATS_HISTOGRAM_DEPTH_BYTE = 1024 * 1024 * 30;
public static final int DEFAULT_STATS_UPDATE_FREQ_MS = 15 * 60000; // 15min
- public static final int DEFAULT_GUIDE_POSTS_PER_REGION = 20;
-
+ public static final long DEFAULT_STATS_GUIDEPOST_WIDTH_BYTES = 100 * 1024 *1024; // 100MB
+ public static final boolean DEFAULT_STATS_USE_CURRENT_TIME = true;
+
public static final boolean DEFAULT_USE_REVERSE_SCAN = true;
/**
@@ -160,6 +162,7 @@ public class QueryServicesOptions {
public static QueryServicesOptions withDefaults() {
Configuration config = HBaseFactoryProvider.getConfigurationFactory().getConfiguration();
QueryServicesOptions options = new QueryServicesOptions(config)
+ .setIfUnset(STATS_USE_CURRENT_TIME_ATTRIB, DEFAULT_STATS_USE_CURRENT_TIME)
.setIfUnset(KEEP_ALIVE_MS_ATTRIB, DEFAULT_KEEP_ALIVE_MS)
.setIfUnset(THREAD_POOL_SIZE_ATTRIB, DEFAULT_THREAD_POOL_SIZE)
.setIfUnset(QUEUE_SIZE_ATTRIB, DEFAULT_QUEUE_SIZE)
http://git-wip-us.apache.org/repos/asf/phoenix/blob/6c94dc6e/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index 0efbad6..7421f86 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -594,13 +594,13 @@ public class MetaDataClient {
Long scn = connection.getSCN();
// Always invalidate the cache
long clientTimeStamp = connection.getSCN() == null ? HConstants.LATEST_TIMESTAMP : scn;
- String query = "SELECT CURRENT_DATE() - " + LAST_STATS_UPDATE_TIME + " FROM " + PhoenixDatabaseMetaData.SYSTEM_STATS_NAME
+ String query = "SELECT CURRENT_DATE()," + LAST_STATS_UPDATE_TIME + " FROM " + PhoenixDatabaseMetaData.SYSTEM_STATS_NAME
+ " WHERE " + PHYSICAL_NAME + "='" + physicalName.getString() + "' AND " + COLUMN_FAMILY
+ " IS NULL AND " + REGION_NAME + " IS NULL AND " + LAST_STATS_UPDATE_TIME + " IS NOT NULL";
ResultSet rs = connection.createStatement().executeQuery(query);
long msSinceLastUpdate = Long.MAX_VALUE;
if (rs.next()) {
- msSinceLastUpdate = rs.getLong(1);
+ msSinceLastUpdate = rs.getLong(1) - rs.getLong(2);
}
if (msSinceLastUpdate < msMinBetweenUpdates) {
return 0;
http://git-wip-us.apache.org/repos/asf/phoenix/blob/6c94dc6e/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/PTableStats.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/PTableStats.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/PTableStats.java
index 2c26739..ce6a9fc 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/PTableStats.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/PTableStats.java
@@ -57,6 +57,11 @@ public interface PTableStats extends Writable {
public int getEstimatedSize() {
return 0;
}
+
+ @Override
+ public long getTimestamp() {
+ return StatisticsCollector.NO_TIMESTAMP;
+ }
};
/**
@@ -67,4 +72,6 @@ public interface PTableStats extends Writable {
SortedMap<byte[], GuidePostsInfo> getGuidePosts();
int getEstimatedSize();
+
+ long getTimestamp();
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/6c94dc6e/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/PTableStatsImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/PTableStatsImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/PTableStatsImpl.java
index fab9f52..8531979 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/PTableStatsImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/PTableStatsImpl.java
@@ -28,6 +28,7 @@ import java.util.TreeMap;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.WritableUtils;
+import org.apache.phoenix.coprocessor.MetaDataProtocol;
import org.apache.phoenix.util.SizedUtil;
import com.google.common.collect.Lists;
@@ -40,13 +41,15 @@ import com.sun.istack.NotNull;
public class PTableStatsImpl implements PTableStats {
private final SortedMap<byte[], GuidePostsInfo> guidePosts;
private final int estimatedSize;
+ private final long timeStamp;
public PTableStatsImpl() {
- this(new TreeMap<byte[], GuidePostsInfo>(Bytes.BYTES_COMPARATOR));
+ this(new TreeMap<byte[], GuidePostsInfo>(Bytes.BYTES_COMPARATOR), MetaDataProtocol.MIN_TABLE_TIMESTAMP);
}
- public PTableStatsImpl(@NotNull SortedMap<byte[], GuidePostsInfo> guidePosts) {
+ public PTableStatsImpl(@NotNull SortedMap<byte[], GuidePostsInfo> guidePosts, long timeStamp) {
this.guidePosts = guidePosts;
+ this.timeStamp = timeStamp;
int estimatedSize = SizedUtil.OBJECT_SIZE + SizedUtil.INT_SIZE + SizedUtil.sizeOfTreeMap(guidePosts.size());
for (Map.Entry<byte[], GuidePostsInfo> entry : guidePosts.entrySet()) {
byte[] cf = entry.getKey();
@@ -119,4 +122,9 @@ public class PTableStatsImpl implements PTableStats {
public int getEstimatedSize() {
return estimatedSize;
}
+
+ @Override
+ public long getTimestamp() {
+ return timeStamp;
+ }
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/6c94dc6e/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollector.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollector.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollector.java
index 2e7bfd9..3bdb9a3 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollector.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollector.java
@@ -14,8 +14,6 @@ import java.util.Collections;
import java.util.List;
import java.util.Map;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValue;
@@ -33,12 +31,15 @@ import org.apache.hadoop.hbase.regionserver.Store;
import org.apache.hadoop.hbase.regionserver.StoreScanner;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
+import org.apache.phoenix.coprocessor.MetaDataProtocol;
import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
import org.apache.phoenix.query.QueryServices;
import org.apache.phoenix.query.QueryServicesOptions;
import org.apache.phoenix.util.ByteUtil;
import org.apache.phoenix.util.TimeKeeper;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
@@ -52,35 +53,48 @@ import com.google.common.collect.Maps;
* board for now.
*/
public class StatisticsCollector {
+ private static final Logger logger = LoggerFactory.getLogger(StatisticsCollector.class);
+ public static final long NO_TIMESTAMP = -1;
+
+
private Map<String, byte[]> minMap = Maps.newHashMap();
private Map<String, byte[]> maxMap = Maps.newHashMap();
private long guidepostDepth;
+ private boolean useCurrentTime;
+ private long maxTimeStamp = MetaDataProtocol.MIN_TABLE_TIMESTAMP;
private Map<String, Pair<Long,GuidePostsInfo>> guidePostsMap = Maps.newHashMap();
// Tracks the bytecount per family if it has reached the guidePostsDepth
private Map<ImmutableBytesPtr, Boolean> familyMap = Maps.newHashMap();
protected StatisticsWriter statsTable;
- // Ensures that either analyze or compaction happens at any point of time.
- private static final Log LOG = LogFactory.getLog(StatisticsCollector.class);
public StatisticsCollector(RegionCoprocessorEnvironment env, String tableName, long clientTimeStamp)
throws IOException {
Configuration config = env.getConfiguration();
HTableInterface statsHTable = env.getTable((PhoenixDatabaseMetaData.SYSTEM_STATS_NAME_BYTES));
- long maxFileSize = statsHTable.getTableDescriptor().getMaxFileSize();
- if (maxFileSize <= 0) { // HBase brain dead API doesn't give you the "real" max file size if it's not set...
- maxFileSize = HConstants.DEFAULT_MAX_FILE_SIZE;
+ useCurrentTime =
+ config.getBoolean(QueryServices.STATS_USE_CURRENT_TIME_ATTRIB,
+ QueryServicesOptions.DEFAULT_STATS_USE_CURRENT_TIME);
+ int guidepostPerRegion = config.getInt(QueryServices.STATS_GUIDEPOST_PER_REGION_ATTRIB, 0);
+ if (guidepostPerRegion > 0) {
+ long maxFileSize = statsHTable.getTableDescriptor().getMaxFileSize();
+ if (maxFileSize <= 0) { // HBase brain dead API doesn't give you the "real" max file size if it's not set...
+ maxFileSize = HConstants.DEFAULT_MAX_FILE_SIZE;
+ }
+ guidepostDepth = maxFileSize / guidepostPerRegion;
+ } else {
+ guidepostDepth = config.getLong(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB,
+ QueryServicesOptions.DEFAULT_STATS_GUIDEPOST_WIDTH_BYTES);
}
- guidepostDepth = config.getLong(
- QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB,
- maxFileSize
- / config.getInt(QueryServices.STATS_GUIDEPOST_PER_REGION_ATTRIB,
- QueryServicesOptions.DEFAULT_GUIDE_POSTS_PER_REGION));
// Get the stats table associated with the current table on which the CP is
// triggered
this.statsTable = StatisticsWriter.newWriter(statsHTable, tableName, clientTimeStamp);
}
+ public long getMaxTimeStamp() {
+ return maxTimeStamp;
+ }
+
public void close() throws IOException {
this.statsTable.close();
}
@@ -89,12 +103,12 @@ public class StatisticsCollector {
try {
ArrayList<Mutation> mutations = new ArrayList<Mutation>();
writeStatsToStatsTable(region, true, mutations, TimeKeeper.SYSTEM.getCurrentTime());
- if (LOG.isDebugEnabled()) {
- LOG.debug("Committing new stats for the region " + region.getRegionInfo());
+ if (logger.isDebugEnabled()) {
+ logger.debug("Committing new stats for the region " + region.getRegionInfo());
}
commitStats(mutations);
} catch (IOException e) {
- LOG.error(e);
+ logger.error("Unable to commit new stats", e);
} finally {
clear();
}
@@ -106,20 +120,20 @@ public class StatisticsCollector {
// update the statistics table
for (ImmutableBytesPtr fam : familyMap.keySet()) {
if (delete) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Deleting the stats for the region " + region.getRegionInfo());
+ if(logger.isDebugEnabled()) {
+ logger.debug("Deleting the stats for the region "+region.getRegionInfo());
}
statsTable.deleteStats(region.getRegionInfo().getRegionNameAsString(), this, Bytes.toString(fam.copyBytesIfNecessary()),
mutations);
}
- if (LOG.isDebugEnabled()) {
- LOG.debug("Adding new stats for the region " + region.getRegionInfo());
+ if(logger.isDebugEnabled()) {
+ logger.debug("Adding new stats for the region "+region.getRegionInfo());
}
statsTable.addStats((region.getRegionInfo().getRegionNameAsString()), this, Bytes.toString(fam.copyBytesIfNecessary()),
mutations);
}
} catch (IOException e) {
- LOG.error("Failed to update statistics table!", e);
+ logger.error("Failed to update statistics table!", e);
throw e;
}
}
@@ -138,7 +152,7 @@ public class StatisticsCollector {
mutations);
}
} catch (IOException e) {
- LOG.error("Failed to delete from statistics table!", e);
+ logger.error("Failed to delete from statistics table!", e);
throw e;
}
}
@@ -164,29 +178,58 @@ public class StatisticsCollector {
}
}
- public void collectStatsDuringSplit(Configuration conf, HRegion l, HRegion r, HRegion parent)
+ public InternalScanner createCompactionScanner(HRegion region, Store store,
+ List<? extends KeyValueScanner> scanners, ScanType scanType, long earliestPutTs, InternalScanner s)
+ throws IOException {
+ // See if this is for Major compaction
+ InternalScanner internalScan = s;
+ if (scanType.equals(ScanType.MAJOR_COMPACT)) {
+ // this is the first CP accessed, so we need to just create a major
+ // compaction scanner, just
+ // like in the compactor
+ if (s == null) {
+ Scan scan = new Scan();
+ scan.setMaxVersions(store.getFamily().getMaxVersions());
+ long smallestReadPoint = store.getHRegion().getSmallestReadPoint();
+ internalScan = new StoreScanner(store, store.getScanInfo(), scan, scanners, scanType,
+ smallestReadPoint, earliestPutTs);
+ }
+ if (logger.isDebugEnabled()) {
+ logger.debug("Compaction scanner created for stats");
+ }
+ InternalScanner scanner = getInternalScanner(region, store, internalScan, store.getColumnFamilyName());
+ if (scanner != null) {
+ internalScan = scanner;
+ }
+ }
+ return internalScan;
+ }
+
+
+ public void collectStatsDuringSplit(Configuration conf, HRegion l, HRegion r, HRegion region)
throws IOException {
// Invoke collectStat here
try {
// Create a delete operation on the parent region
// Then write the new guide posts for individual regions
List<Mutation> mutations = Lists.newArrayListWithExpectedSize(3);
- long currentTime = TimeKeeper.SYSTEM.getCurrentTime();
- deleteStatsFromStatsTable(parent, mutations, currentTime);
- if (LOG.isDebugEnabled()) {
- LOG.debug("Collecting stats for the daughter region " + l.getRegionInfo());
+
+ long currentTime = useCurrentTime ? TimeKeeper.SYSTEM.getCurrentTime() : -1;
+ deleteStatsFromStatsTable(region, mutations, currentTime);
+ if (logger.isDebugEnabled()) {
+ logger.debug("Collecting stats for the daughter region " + l.getRegionInfo());
}
collectStatsForSplitRegions(conf, l, mutations, currentTime);
- if (LOG.isDebugEnabled()) {
- LOG.debug("Collecting stats for the daughter region " + r.getRegionInfo());
+ if (logger.isDebugEnabled()) {
+ logger.debug("Collecting stats for the daughter region " + r.getRegionInfo());
}
collectStatsForSplitRegions(conf, r, mutations, currentTime);
- if (LOG.isDebugEnabled()) {
- LOG.debug("Committing stats for the daughter regions as part of split " + r.getRegionInfo());
+ if (logger.isDebugEnabled()) {
+ logger.debug("Committing stats for the daughter regions as part of split " + r.getRegionInfo());
}
} catch (IOException e) {
- LOG.error("Error while capturing stats after split of region "
- + parent.getRegionInfo().getRegionNameAsString(), e);
+ logger.error("Error while capturing stats after split of region "
+ + region.getRegionInfo().getRegionNameAsString(), e);
}
}
@@ -202,47 +245,20 @@ public class StatisticsCollector {
count = scanRegion(scanner, count);
writeStatsToStatsTable(daughter, false, mutations, currentTime);
} catch (IOException e) {
- LOG.error(e);
+ logger.error("Unable to collects stats during split", e);
toThrow = e;
} finally {
- try {
- if (scanner != null) scanner.close();
- } catch (IOException e) {
- LOG.error(e);
- if (toThrow != null) toThrow = e;
- } finally {
- if (toThrow != null) throw toThrow;
- }
- }
- }
-
- public InternalScanner createCompactionScanner(HRegion region, Store store,
- List<? extends KeyValueScanner> scanners, ScanType scanType, long earliestPutTs, InternalScanner s)
- throws IOException {
- // See if this is for Major compaction
- InternalScanner internalScan = s;
- if (scanType.equals(ScanType.MAJOR_COMPACT)) {
- // this is the first CP accessed, so we need to just create a major
- // compaction scanner, just
- // like in the compactor
- if (s == null) {
- Scan scan = new Scan();
- scan.setMaxVersions(store.getFamily().getMaxVersions());
- long smallestReadPoint = store.getHRegion().getSmallestReadPoint();
- internalScan = new StoreScanner(store, store.getScanInfo(), scan, scanners, scanType,
- smallestReadPoint, earliestPutTs);
- }
- if (LOG.isDebugEnabled()) {
- LOG.debug("Compaction scanner created for stats");
- }
- InternalScanner scanner = getInternalScanner(region, store, internalScan, store.getColumnFamilyName());
- if (scanner != null) {
- internalScan = scanner;
+ try {
+ if (scanner != null) scanner.close();
+ } catch (IOException e) {
+ logger.error("Unable to close scanner after split", e);
+ if (toThrow != null) toThrow = e;
+ } finally {
+ if (toThrow != null) throw toThrow;
}
}
- return internalScan;
}
-
+
private Scan createScan(Configuration conf) {
Scan scan = new Scan();
scan.setCaching(conf.getInt(QueryServices.SCAN_CACHE_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_SCAN_CACHE_SIZE));
@@ -262,6 +278,7 @@ public class StatisticsCollector {
this.minMap.clear();
this.guidePostsMap.clear();
this.familyMap.clear();
+ maxTimeStamp = MetaDataProtocol.MIN_TABLE_TIMESTAMP;
}
public void updateStatistic(KeyValue kv) {
@@ -285,6 +302,7 @@ public class StatisticsCollector {
maxMap.put(fam, row);
}
}
+ maxTimeStamp = Math.max(maxTimeStamp, kv.getTimestamp());
// TODO : This can be moved to an interface so that we could collect guide posts in different ways
Pair<Long,GuidePostsInfo> gps = guidePostsMap.get(fam);
if (gps == null) {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/6c94dc6e/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsScanner.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsScanner.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsScanner.java
index 4e28123..239085d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsScanner.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsScanner.java
@@ -33,7 +33,6 @@ public class StatisticsScanner implements InternalScanner {
public StatisticsScanner(StatisticsCollector tracker, StatisticsWriter stats, HRegion region,
InternalScanner delegate, byte[] family) {
- // should there be only one tracker?
this.tracker = tracker;
this.stats = stats;
this.delegate = delegate;
http://git-wip-us.apache.org/repos/asf/phoenix/blob/6c94dc6e/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsUtil.java
index 8d7dd00..a48b04a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsUtil.java
@@ -65,6 +65,7 @@ public class StatisticsUtil {
ResultScanner scanner = statsHTable.getScanner(s);
try {
Result result = null;
+ long timeStamp = MetaDataProtocol.MIN_TABLE_TIMESTAMP;
TreeMap<byte[], GuidePostsInfo> guidePostsPerCf = new TreeMap<byte[], GuidePostsInfo>(Bytes.BYTES_COMPARATOR);
while ((result = scanner.next()) != null) {
KeyValue current = result.raw()[0];
@@ -78,9 +79,12 @@ public class StatisticsUtil {
if (oldInfo != null) {
newInfo.combine(oldInfo);
}
+ if (current.getTimestamp() > timeStamp) {
+ timeStamp = current.getTimestamp();
+ }
}
if (!guidePostsPerCf.isEmpty()) {
- return new PTableStatsImpl(guidePostsPerCf);
+ return new PTableStatsImpl(guidePostsPerCf, timeStamp);
}
} finally {
scanner.close();
http://git-wip-us.apache.org/repos/asf/phoenix/blob/6c94dc6e/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java
index 3f391f5..4118bb9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java
@@ -44,7 +44,9 @@ public class StatisticsWriter implements Closeable {
clientTimeStamp = TimeKeeper.SYSTEM.getCurrentTime();
}
StatisticsWriter statsTable = new StatisticsWriter(hTable, tableName, clientTimeStamp);
- statsTable.commitLastStatsUpdatedTime();
+ if (clientTimeStamp != StatisticsCollector.NO_TIMESTAMP) { // Otherwise we do this later as we don't know the ts yet
+ statsTable.commitLastStatsUpdatedTime();
+ }
return statsTable;
}
@@ -83,26 +85,31 @@ public class StatisticsWriter implements Closeable {
*/
public void addStats(String regionName, StatisticsCollector tracker, String fam, List<Mutation> mutations) throws IOException {
if (tracker == null) { return; }
-
+ boolean useMaxTimeStamp = clientTimeStamp == StatisticsCollector.NO_TIMESTAMP;
+ long timeStamp = clientTimeStamp;
+ if (useMaxTimeStamp) { // When using max timestamp, we write the update time later because we only know the ts now
+ timeStamp = tracker.getMaxTimeStamp();
+ mutations.add(getLastStatsUpdatedTimePut(timeStamp));
+ }
byte[] prefix = StatisticsUtil.getRowKey(tableName, PDataType.VARCHAR.toBytes(fam),
PDataType.VARCHAR.toBytes(regionName));
Put put = new Put(prefix);
GuidePostsInfo gp = tracker.getGuidePosts(fam);
if (gp != null) {
put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, PhoenixDatabaseMetaData.GUIDE_POSTS_COUNT_BYTES,
- clientTimeStamp, PDataType.LONG.toBytes((gp.getGuidePosts().size())));
+ timeStamp, PDataType.LONG.toBytes((gp.getGuidePosts().size())));
put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, PhoenixDatabaseMetaData.GUIDE_POSTS_BYTES,
- clientTimeStamp, PDataType.VARBINARY.toBytes(gp.toBytes()));
+ timeStamp, PDataType.VARBINARY.toBytes(gp.toBytes()));
put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, PhoenixDatabaseMetaData.GUIDE_POSTS_WIDTH_BYTES,
- clientTimeStamp, PDataType.LONG.toBytes(gp.getByteCount()));
+ timeStamp, PDataType.LONG.toBytes(gp.getByteCount()));
}
put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, PhoenixDatabaseMetaData.MIN_KEY_BYTES,
- clientTimeStamp, PDataType.VARBINARY.toBytes(tracker.getMinKey(fam)));
+ timeStamp, PDataType.VARBINARY.toBytes(tracker.getMinKey(fam)));
put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, PhoenixDatabaseMetaData.MAX_KEY_BYTES,
- clientTimeStamp, PDataType.VARBINARY.toBytes(tracker.getMaxKey(fam)));
+ timeStamp, PDataType.VARBINARY.toBytes(tracker.getMaxKey(fam)));
// Add our empty column value so queries behave correctly
put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES,
- clientTimeStamp, ByteUtil.EMPTY_BYTE_ARRAY);
+ timeStamp, ByteUtil.EMPTY_BYTE_ARRAY);
mutations.add(put);
}
@@ -115,21 +122,27 @@ public class StatisticsWriter implements Closeable {
}
}
- private void commitLastStatsUpdatedTime() throws IOException {
- // Always use wallclock time for this, as it's a mechanism to prevent
- // stats from being collected too often.
+ private Put getLastStatsUpdatedTimePut(long timeStamp) {
long currentTime = TimeKeeper.SYSTEM.getCurrentTime();
byte[] prefix = tableName;
Put put = new Put(prefix);
- put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, PhoenixDatabaseMetaData.LAST_STATS_UPDATE_TIME_BYTES, clientTimeStamp,
+ put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, PhoenixDatabaseMetaData.LAST_STATS_UPDATE_TIME_BYTES, timeStamp,
PDataType.DATE.toBytes(new Date(currentTime)));
+ return put;
+ }
+
+ private void commitLastStatsUpdatedTime() throws IOException {
+ // Always use wallclock time for this, as it's a mechanism to prevent
+ // stats from being collected too often.
+ Put put = getLastStatsUpdatedTimePut(clientTimeStamp);
statisticsTable.put(put);
}
public void deleteStats(String regionName, StatisticsCollector tracker, String fam, List<Mutation> mutations)
throws IOException {
+ long timeStamp = clientTimeStamp == StatisticsCollector.NO_TIMESTAMP ? tracker.getMaxTimeStamp() : clientTimeStamp;
byte[] prefix = StatisticsUtil.getRowKey(tableName, PDataType.VARCHAR.toBytes(fam),
PDataType.VARCHAR.toBytes(regionName));
- mutations.add(new Delete(prefix, clientTimeStamp - 1));
+ mutations.add(new Delete(prefix, timeStamp - 1));
}
}
\ No newline at end of file
[07/15] git commit: PHOENIX-1366 Use static constants
Posted by ja...@apache.org.
PHOENIX-1366 Use static constants
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/0d90e2fc
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/0d90e2fc
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/0d90e2fc
Branch: refs/heads/3.2
Commit: 0d90e2fc92e6013a48d6479fc8208733edab9d52
Parents: 60fee11
Author: James Taylor <jt...@salesforce.com>
Authored: Sun Oct 26 22:13:31 2014 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Sun Oct 26 22:13:31 2014 -0700
----------------------------------------------------------------------
.../phoenix/jdbc/PhoenixDatabaseMetaData.java | 32 +++++++++++---------
1 file changed, 18 insertions(+), 14 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/0d90e2fc/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
index 0cf34dc..5560d48 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
@@ -29,7 +29,6 @@ import java.util.Collections;
import java.util.List;
import org.apache.commons.lang.StringEscapeUtils;
-import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.util.Bytes;
@@ -257,6 +256,9 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData, org.apache.pho
public static final String PARENT_TENANT_ID = "PARENT_TENANT_ID";
public static final byte[] PARENT_TENANT_ID_BYTES = Bytes.toBytes(PARENT_TENANT_ID);
+ private static final String TENANT_POS_SHIFT = "TENANT_POS_SHIFT";
+ private static final byte[] TENANT_POS_SHIFT_BYTES = Bytes.toBytes(TENANT_POS_SHIFT);
+
private final PhoenixConnection connection;
private final ResultSet emptyResultSet;
@@ -403,7 +405,7 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData, org.apache.pho
SQL_DATA_TYPE + "," +
SQL_DATETIME_SUB + "," +
CHAR_OCTET_LENGTH + "," +
- "CASE WHEN TENANT_POS_SHIFT THEN ORDINAL_POSITION-1 ELSE ORDINAL_POSITION END AS " + ORDINAL_POSITION + "," +
+ "CASE WHEN " + TENANT_POS_SHIFT + " THEN " + ORDINAL_POSITION + "-1 ELSE " + ORDINAL_POSITION + " END AS " + ORDINAL_POSITION + "," +
"CASE " + NULLABLE + " WHEN " + DatabaseMetaData.attributeNoNulls + " THEN '" + Boolean.FALSE.toString() + "' WHEN " + DatabaseMetaData.attributeNullable + " THEN '" + Boolean.TRUE.toString() + "' END AS " + IS_NULLABLE + "," +
SCOPE_CATALOG + "," +
SCOPE_SCHEMA + "," +
@@ -415,8 +417,8 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData, org.apache.pho
DATA_TYPE + " " + TYPE_ID + "," +// raw type id for potential internal consumption
VIEW_CONSTANT + "," +
MULTI_TENANT + "," +
- "CASE WHEN TENANT_POS_SHIFT THEN KEY_SEQ-1 ELSE KEY_SEQ END AS " + KEY_SEQ +
- " from " + SYSTEM_CATALOG + " " + SYSTEM_CATALOG_ALIAS + "(TENANT_POS_SHIFT BOOLEAN)");
+ "CASE WHEN " + TENANT_POS_SHIFT + " THEN " + KEY_SEQ + "-1 ELSE " + KEY_SEQ + " END AS " + KEY_SEQ +
+ " from " + SYSTEM_CATALOG + " " + SYSTEM_CATALOG_ALIAS + "(" + TENANT_POS_SHIFT + " BOOLEAN)");
StringBuilder where = new StringBuilder();
addTenantIdFilter(where, catalog);
if (schemaPattern != null) {
@@ -521,23 +523,25 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData, org.apache.pho
tuple = super.next();
}
- if (tuple != null && inMultiTenantTable && !tenantColumnSkipped
- && new Long(1L).equals(getColumn(tuple, keySeqIndex))) {
- tenantColumnSkipped = true;
- // skip tenant id primary key column
- return next();
+ if (tuple != null && inMultiTenantTable && !tenantColumnSkipped) {
+ Object value = getColumn(tuple, keySeqIndex);
+ if (value != null && ((Number)value).longValue() == 1L) {
+ tenantColumnSkipped = true;
+ // skip tenant id primary key column
+ return next();
+ }
}
if (tuple != null && tenantColumnSkipped) {
ResultTuple resultTuple = (ResultTuple)tuple;
- List<Cell> cells = resultTuple.getResult().listCells();
- KeyValue kv = new KeyValue(resultTuple.getResult().getRow(), QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES,
- Bytes.toBytes("TENANT_POS_SHIFT"), PDataType.TRUE_BYTES);
- List<Cell> newCells = Lists.newArrayListWithCapacity(cells.size() + 1);
+ List<KeyValue> cells = resultTuple.getResult().list();
+ KeyValue kv = new KeyValue(resultTuple.getResult().getRow(), TABLE_FAMILY_BYTES,
+ TENANT_POS_SHIFT_BYTES, PDataType.TRUE_BYTES);
+ List<KeyValue> newCells = Lists.newArrayListWithCapacity(cells.size() + 1);
newCells.addAll(cells);
newCells.add(kv);
Collections.sort(newCells, KeyValue.COMPARATOR);
- resultTuple.setResult(Result.create(newCells));
+ resultTuple.setResult(new Result(newCells));
}
return tuple;
[02/15] git commit: PHOENIX-1373: Ctrl-C out of sqlline causes
terminal to be useless
Posted by ja...@apache.org.
PHOENIX-1373: Ctrl-C out of sqlline causes terminal to be useless
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/f8db1d55
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/f8db1d55
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/f8db1d55
Branch: refs/heads/3.2
Commit: f8db1d55108a26ecd8a85e6431d81955f6c66c93
Parents: f1cbcc4
Author: Jeffrey Zhong <je...@apache.org>
Authored: Thu Oct 23 17:34:43 2014 -0700
Committer: Jeffrey Zhong <je...@apache.org>
Committed: Thu Oct 23 17:36:55 2014 -0700
----------------------------------------------------------------------
bin/sqlline.py | 2 ++
1 file changed, 2 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/f8db1d55/bin/sqlline.py
----------------------------------------------------------------------
diff --git a/bin/sqlline.py b/bin/sqlline.py
index d41c2e7..f48e527 100755
--- a/bin/sqlline.py
+++ b/bin/sqlline.py
@@ -31,6 +31,8 @@ def kill_child():
if childProc is not None:
childProc.terminate()
childProc.kill()
+ if os.name != 'nt':
+ os.system("reset")
atexit.register(kill_child)
phoenix_utils.setPath()
[13/15] git commit: PHOENIX-1391 Remove obsolete hint
Posted by ja...@apache.org.
PHOENIX-1391 Remove obsolete hint
Conflicts:
phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/4767f139
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/4767f139
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/4767f139
Branch: refs/heads/3.2
Commit: 4767f139b2ef125549a4a727bdb33bbf9d54bda2
Parents: 6c94dc6
Author: James Taylor <jt...@salesforce.com>
Authored: Tue Oct 28 14:08:08 2014 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Tue Oct 28 15:28:14 2014 -0700
----------------------------------------------------------------------
.../end2end/SkipScanAfterManualSplitIT.java | 2 +-
.../phoenix/coprocessor/MetaDataEndpointImpl.java | 2 +-
.../phoenix/coprocessor/MetaDataProtocol.java | 2 +-
.../org/apache/phoenix/execute/BaseQueryPlan.java | 6 ++++++
.../org/apache/phoenix/iterate/ExplainTable.java | 17 +++++++++++++++--
.../apache/phoenix/iterate/ParallelIterators.java | 2 +-
.../phoenix/jdbc/PhoenixDatabaseMetaData.java | 15 +++++++--------
.../java/org/apache/phoenix/parse/HintNode.java | 9 ++++-----
.../phoenix/query/ConnectionQueryServices.java | 2 +-
.../phoenix/query/ConnectionQueryServicesImpl.java | 4 ++--
.../query/ConnectionlessQueryServicesImpl.java | 2 +-
.../query/DelegateConnectionQueryServices.java | 4 ++--
.../org/apache/phoenix/schema/MetaDataClient.java | 2 +-
.../java/org/apache/phoenix/util/ScanUtil.java | 13 +++++++++----
.../phoenix/query/BaseConnectionlessQueryTest.java | 2 ++
.../org/apache/phoenix/query/QueryPlanTest.java | 8 ++++++++
16 files changed, 62 insertions(+), 30 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/4767f139/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanAfterManualSplitIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanAfterManualSplitIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanAfterManualSplitIT.java
index a07ad0e..3c0344c 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanAfterManualSplitIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanAfterManualSplitIT.java
@@ -128,7 +128,7 @@ public class SkipScanAfterManualSplitIT extends BaseHBaseManagedTimeIT {
assertEquals(nRegions, nInitialRegions);
int nRows = 2;
- String query = "SELECT /*+ NO_INTRA_REGION_PARALLELIZATION */ count(*) FROM S WHERE a IN ('tl','jt',' a',' b',' c',' d')";
+ String query = "SELECT count(*) FROM S WHERE a IN ('tl','jt',' a',' b',' c',' d')";
ResultSet rs1 = conn.createStatement().executeQuery(query);
assertTrue(rs1.next());
nRegions = services.getAllTableRegions(TABLE_NAME_BYTES).size();
http://git-wip-us.apache.org/repos/asf/phoenix/blob/4767f139/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index 38277c5..0327c35 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -1384,7 +1384,7 @@ public class MetaDataEndpointImpl extends BaseEndpointCoprocessor implements Met
}
@Override
- public void incrementTableTimeStamp(byte[] tenantId, byte[] schemaName, byte[] tableName, final long clientTimeStamp)
+ public void clearTableFromCache(byte[] tenantId, byte[] schemaName, byte[] tableName, final long clientTimeStamp)
throws IOException {
try {
byte[] key = SchemaUtil.getTableKey(tenantId, schemaName, tableName);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/4767f139/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
index 7979757..76fb129 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
@@ -266,7 +266,7 @@ public interface MetaDataProtocol extends CoprocessorProtocol {
*/
void clearCache();
- void incrementTableTimeStamp(byte[] tenantId, byte[] schema, byte[] tableName, long clientTimestamp)
+ void clearTableFromCache(byte[] tenantId, byte[] schema, byte[] tableName, long clientTimestamp)
throws IOException;
/**
http://git-wip-us.apache.org/repos/asf/phoenix/blob/4767f139/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
index 3a59828..9053bab 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
@@ -36,6 +36,7 @@ import org.apache.phoenix.iterate.ParallelIterators.ParallelIteratorFactory;
import org.apache.phoenix.iterate.ResultIterator;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.parse.FilterableStatement;
+import org.apache.phoenix.parse.HintNode.Hint;
import org.apache.phoenix.schema.TableRef;
import org.apache.phoenix.util.SQLCloseable;
import org.apache.phoenix.util.SQLCloseables;
@@ -132,6 +133,11 @@ public abstract class BaseQueryPlan implements QueryPlan {
}
Scan scan = context.getScan();
+
+ if (statement.getHint().hasHint(Hint.SMALL)) {
+ ScanUtil.setSmall(scan, true);
+ }
+
// Set producer on scan so HBase server does round robin processing
//setProducer(scan);
// Set the time range on the scan so we don't get back rows newer than when the statement was compiled
http://git-wip-us.apache.org/repos/asf/phoenix/blob/4767f139/phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java
index 40a0cff..8c04383 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java
@@ -31,9 +31,12 @@ import org.apache.hadoop.hbase.filter.PageFilter;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.compile.GroupByCompiler.GroupBy;
+import org.apache.phoenix.compile.OrderByCompiler.OrderBy;
import org.apache.phoenix.compile.ScanRanges;
import org.apache.phoenix.compile.StatementContext;
import org.apache.phoenix.coprocessor.BaseScannerRegionObserver;
+import org.apache.phoenix.parse.HintNode;
+import org.apache.phoenix.parse.HintNode.Hint;
import org.apache.phoenix.query.KeyRange;
import org.apache.phoenix.query.KeyRange.Bound;
import org.apache.phoenix.schema.PDataType;
@@ -50,15 +53,19 @@ public abstract class ExplainTable {
protected final StatementContext context;
protected final TableRef tableRef;
protected final GroupBy groupBy;
+ protected final OrderBy orderBy;
+ protected final HintNode hint;
public ExplainTable(StatementContext context, TableRef table) {
- this(context,table,GroupBy.EMPTY_GROUP_BY);
+ this(context,table,GroupBy.EMPTY_GROUP_BY, OrderBy.EMPTY_ORDER_BY, HintNode.EMPTY_HINT_NODE);
}
- public ExplainTable(StatementContext context, TableRef table, GroupBy groupBy) {
+ public ExplainTable(StatementContext context, TableRef table, GroupBy groupBy, OrderBy orderBy, HintNode hintNode) {
this.context = context;
this.tableRef = table;
this.groupBy = groupBy;
+ this.orderBy = orderBy;
+ this.hint = hintNode;
}
private boolean explainSkipScan(StringBuilder buf) {
@@ -90,6 +97,12 @@ public abstract class ExplainTable {
StringBuilder buf = new StringBuilder(prefix);
ScanRanges scanRanges = context.getScanRanges();
boolean hasSkipScanFilter = false;
+ if (hint.hasHint(Hint.SMALL)) {
+ buf.append("SMALL ");
+ }
+ if (OrderBy.REV_ROW_KEY_ORDER_BY.equals(orderBy)) {
+ buf.append("REVERSE ");
+ }
if (scanRanges.isEverything()) {
buf.append("FULL SCAN ");
} else {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/4767f139/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java
index 43fa4f5..f5c4027 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java
@@ -110,7 +110,7 @@ public class ParallelIterators extends ExplainTable implements ResultIterators {
public ParallelIterators(QueryPlan plan, Integer perScanLimit, ParallelIteratorFactory iteratorFactory)
throws SQLException {
- super(plan.getContext(), plan.getTableRef(), plan.getGroupBy());
+ super(plan.getContext(), plan.getTableRef(), plan.getGroupBy(), plan.getOrderBy(), plan.getStatement().getHint());
this.plan = plan;
StatementContext context = plan.getContext();
TableRef tableRef = plan.getTableRef();
http://git-wip-us.apache.org/repos/asf/phoenix/blob/4767f139/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
index 5560d48..5f2e8b8 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
@@ -49,7 +49,6 @@ import org.apache.phoenix.hbase.index.util.VersionUtil;
import org.apache.phoenix.iterate.DelegateResultIterator;
import org.apache.phoenix.iterate.MaterializedResultIterator;
import org.apache.phoenix.iterate.ResultIterator;
-import org.apache.phoenix.parse.HintNode.Hint;
import org.apache.phoenix.query.QueryConstants;
import org.apache.phoenix.schema.PDataType;
import org.apache.phoenix.schema.PDatum;
@@ -333,7 +332,7 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData, org.apache.pho
@Override
public ResultSet getCatalogs() throws SQLException {
- StringBuilder buf = new StringBuilder("select /*+" + Hint.NO_INTRA_REGION_PARALLELIZATION + "*/\n" +
+ StringBuilder buf = new StringBuilder("select \n" +
" DISTINCT " + TENANT_ID + " " + TABLE_CAT +
" from " + SYSTEM_CATALOG + " " + SYSTEM_CATALOG_ALIAS +
" where " + COLUMN_NAME + " is null" +
@@ -389,7 +388,7 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData, org.apache.pho
@Override
public ResultSet getColumns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern)
throws SQLException {
- StringBuilder buf = new StringBuilder("select /*+" + Hint.NO_INTRA_REGION_PARALLELIZATION + "*/\n " +
+ StringBuilder buf = new StringBuilder("select \n " +
TENANT_ID + " " + TABLE_CAT + "," + // use this for tenant id
TABLE_SCHEM + "," +
TABLE_NAME + " ," +
@@ -647,7 +646,7 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData, org.apache.pho
if (unique) { // No unique indexes
return emptyResultSet;
}
- StringBuilder buf = new StringBuilder("select /*+" + Hint.NO_INTRA_REGION_PARALLELIZATION + "*/\n" +
+ StringBuilder buf = new StringBuilder("select \n" +
TENANT_ID + " " + TABLE_CAT + ",\n" + // use this column for column family name
TABLE_SCHEM + ",\n" +
DATA_TABLE_NAME + " " + TABLE_NAME + ",\n" +
@@ -800,7 +799,7 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData, org.apache.pho
if (table == null || table.length() == 0) {
return emptyResultSet;
}
- StringBuilder buf = new StringBuilder("select /*+" + Hint.NO_INTRA_REGION_PARALLELIZATION + "*/\n" +
+ StringBuilder buf = new StringBuilder("select \n" +
TENANT_ID + " " + TABLE_CAT + "," + // use catalog for tenant_id
TABLE_SCHEM + "," +
TABLE_NAME + " ," +
@@ -874,7 +873,7 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData, org.apache.pho
@Override
public ResultSet getSchemas(String catalog, String schemaPattern) throws SQLException {
- StringBuilder buf = new StringBuilder("select /*+" + Hint.NO_INTRA_REGION_PARALLELIZATION + "*/\n distinct " +
+ StringBuilder buf = new StringBuilder("select distinct \n" +
TENANT_ID + " " + TABLE_CATALOG + "," + // no catalog for tables
TABLE_SCHEM +
" from " + SYSTEM_CATALOG + " " + SYSTEM_CATALOG_ALIAS +
@@ -899,7 +898,7 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData, org.apache.pho
@Override
public ResultSet getSuperTables(String catalog, String schemaPattern, String tableNamePattern) throws SQLException {
- StringBuilder buf = new StringBuilder("select /*+" + Hint.NO_INTRA_REGION_PARALLELIZATION + "*/\n" +
+ StringBuilder buf = new StringBuilder("select \n" +
TENANT_ID + " " + TABLE_CAT + "," + // Use tenantId for catalog
TABLE_SCHEM + "," +
TABLE_NAME + "," +
@@ -986,7 +985,7 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData, org.apache.pho
@Override
public ResultSet getTables(String catalog, String schemaPattern, String tableNamePattern, String[] types)
throws SQLException {
- StringBuilder buf = new StringBuilder("select /*+" + Hint.NO_INTRA_REGION_PARALLELIZATION + "*/\n" +
+ StringBuilder buf = new StringBuilder("select \n" +
TENANT_ID + " " + TABLE_CAT + "," + // tenant_id is the catalog
TABLE_SCHEM + "," +
TABLE_NAME + " ," +
http://git-wip-us.apache.org/repos/asf/phoenix/blob/4767f139/phoenix-core/src/main/java/org/apache/phoenix/parse/HintNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/HintNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/HintNode.java
index 0ded0b5..ea20114 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/HintNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/HintNode.java
@@ -55,11 +55,6 @@ public class HintNode {
*/
SKIP_SCAN_HASH_JOIN,
/**
- * Prevents the spawning of multiple threads during
- * query processing.
- */
- NO_INTRA_REGION_PARALLELIZATION,
- /**
* Prevents the usage of indexes, forcing usage
* of the data table for a query.
*/
@@ -100,6 +95,10 @@ public class HintNode {
* between 2 selected columns this will be give better performance.
*/
NO_SEEK_TO_COLUMN,
+ /**
+ * Saves an RPC call on the scan. See Scan.setSmall(true) in HBase documentation.
+ */
+ SMALL,
};
private final Map<Hint,String> hints;
http://git-wip-us.apache.org/repos/asf/phoenix/blob/4767f139/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java
index 965a515..da61e51 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java
@@ -104,7 +104,7 @@ public interface ConnectionQueryServices extends QueryServices, MetaDataMutated
public boolean supportsFeature(Feature feature);
public String getUserName();
- public void incrementTableTimeStamp(final byte[] tenantId, final byte[] schemaName, final byte[] tableName, long clientTS) throws SQLException;
+ public void clearTableFromCache(final byte[] tenantId, final byte[] schemaName, final byte[] tableName, long clientTS) throws SQLException;
public PTableStats getTableStats(byte[] physicalName, long clientTimeStamp) throws SQLException;
http://git-wip-us.apache.org/repos/asf/phoenix/blob/4767f139/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 3e46a30..557f6d4 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -1846,7 +1846,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
}
@Override
- public void incrementTableTimeStamp(final byte[] tenantId, final byte[] schemaName, final byte[] tableName,
+ public void clearTableFromCache(final byte[] tenantId, final byte[] schemaName, final byte[] tableName,
final long clientTS) throws SQLException {
// clear the meta data cache for the table here
try {
@@ -1858,7 +1858,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
new Batch.Call<MetaDataProtocol, MetaDataMutationResult>() {
@Override
public MetaDataMutationResult call(MetaDataProtocol instance) throws IOException {
- instance.incrementTableTimeStamp(tenantId, schemaName, tableName, clientTS);
+ instance.clearTableFromCache(tenantId, schemaName, tableName, clientTS);
// TODO : Should this really return a result?Return null
return new MetaDataMutationResult();
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/4767f139/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
index 7d0a109..b29f2b6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
@@ -229,7 +229,7 @@ public class ConnectionlessQueryServicesImpl extends DelegateQueryServices imple
}
@Override
- public void incrementTableTimeStamp(byte[] tenantId, byte[] schemaName, byte[] tableName, long clientTS)
+ public void clearTableFromCache(byte[] tenantId, byte[] schemaName, byte[] tableName, long clientTS)
throws SQLException {}
// TODO: share this with ConnectionQueryServicesImpl
@Override
http://git-wip-us.apache.org/repos/asf/phoenix/blob/4767f139/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java b/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java
index defad5b..ae0b689 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java
@@ -229,9 +229,9 @@ public class DelegateConnectionQueryServices extends DelegateQueryServices imple
}
@Override
- public void incrementTableTimeStamp(byte[] tenantId, byte[] schemaName, byte[] tableName, long clientTS)
+ public void clearTableFromCache(byte[] tenantId, byte[] schemaName, byte[] tableName, long clientTS)
throws SQLException {
- getDelegate().incrementTableTimeStamp(tenantId, schemaName, tableName, clientTS);
+ getDelegate().clearTableFromCache(tenantId, schemaName, tableName, clientTS);
}
@Override
http://git-wip-us.apache.org/repos/asf/phoenix/blob/4767f139/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index 7421f86..a460f9f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -621,7 +621,7 @@ public class MetaDataClient {
// We need to update the stats table so that client will pull the new one with
// the updated stats.
- connection.getQueryServices().incrementTableTimeStamp(tenantIdBytes,
+ connection.getQueryServices().clearTableFromCache(tenantIdBytes,
Bytes.toBytes(SchemaUtil.getSchemaNameFromFullName(physicalName.getString())),
Bytes.toBytes(SchemaUtil.getTableNameFromFullName(physicalName.getString())), clientTimeStamp);
return rowCount;
http://git-wip-us.apache.org/repos/asf/phoenix/blob/4767f139/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java
index 90fbba7..bfbaa37 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java
@@ -59,7 +59,11 @@ import com.google.common.collect.Lists;
public class ScanUtil {
public static final int[] SINGLE_COLUMN_SLOT_SPAN = new int[1];
private static final byte[] ZERO_BYTE_ARRAY = new byte[1024];
-
+ // Don't use constants in Scan as they didn't exist before.
+ // Doing this maintains backward compatibility.
+ private static final String REVERSED_ATTR = "_reversed_";
+ private static final String SMALL_ATTRIB = "_small_";
+
private ScanUtil() {
}
@@ -432,10 +436,7 @@ public class ScanUtil {
return key;
}
- private static final String REVERSED_ATTR = "_reversed_";
-
public static void setReversed(Scan scan) {
- // TODO: set attribute dynamically here to prevent dependency on newer HBase release
scan.setAttribute(REVERSED_ATTR, PDataType.TRUE_BYTES);
}
@@ -538,4 +539,8 @@ public class ScanUtil {
}
return tenantId;
}
+
+ public static void setSmall(Scan scan, boolean b) {
+ scan.setAttribute(SMALL_ATTRIB, Bytes.toBytes(true));
+ }
}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/phoenix/blob/4767f139/phoenix-core/src/test/java/org/apache/phoenix/query/BaseConnectionlessQueryTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseConnectionlessQueryTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseConnectionlessQueryTest.java
index 8ac322f..8f17a7c 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseConnectionlessQueryTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseConnectionlessQueryTest.java
@@ -27,6 +27,7 @@ import static org.apache.phoenix.util.TestUtil.JOIN_ORDER_TABLE_FULL_NAME;
import static org.apache.phoenix.util.TestUtil.JOIN_SUPPLIER_TABLE_FULL_NAME;
import static org.apache.phoenix.util.TestUtil.MULTI_CF_NAME;
import static org.apache.phoenix.util.TestUtil.PHOENIX_CONNECTIONLESS_JDBC_URL;
+import static org.apache.phoenix.util.TestUtil.PTSDB2_NAME;
import static org.apache.phoenix.util.TestUtil.PTSDB3_NAME;
import static org.apache.phoenix.util.TestUtil.PTSDB_NAME;
import static org.apache.phoenix.util.TestUtil.TABLE_WITH_ARRAY;
@@ -104,6 +105,7 @@ public class BaseConnectionlessQueryTest extends BaseTest {
ensureTableCreated(getUrl(), ENTITY_HISTORY_TABLE_NAME);
ensureTableCreated(getUrl(), FUNKY_NAME);
ensureTableCreated(getUrl(), PTSDB_NAME);
+ ensureTableCreated(getUrl(), PTSDB2_NAME);
ensureTableCreated(getUrl(), PTSDB3_NAME);
ensureTableCreated(getUrl(), MULTI_CF_NAME);
ensureTableCreated(getUrl(), JOIN_ORDER_TABLE_FULL_NAME);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/4767f139/phoenix-core/src/test/java/org/apache/phoenix/query/QueryPlanTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/QueryPlanTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/QueryPlanTest.java
index fd22e47..1e3df0b 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/query/QueryPlanTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/query/QueryPlanTest.java
@@ -41,6 +41,14 @@ public class QueryPlanTest extends BaseConnectionlessQueryTest {
"CLIENT PARALLEL 1-WAY SKIP SCAN ON 3 KEYS OVER PTSDB3 [~'na3'] - [~'na1']\n" +
" SERVER FILTER BY FIRST KEY ONLY",
+ "SELECT /*+ SMALL*/ host FROM PTSDB3 WHERE host IN ('na1', 'na2','na3')",
+ "CLIENT PARALLEL 1-WAY SMALL SKIP SCAN ON 3 KEYS OVER PTSDB3 [~'na3'] - [~'na1']\n" +
+ " SERVER FILTER BY FIRST KEY ONLY",
+
+ "SELECT inst,date FROM PTSDB2 WHERE inst = 'na1' ORDER BY inst DESC, date DESC",
+ "CLIENT PARALLEL 1-WAY REVERSE RANGE SCAN OVER PTSDB2 ['na1']\n" +
+ " SERVER FILTER BY FIRST KEY ONLY",
+
"SELECT host FROM PTSDB WHERE inst IS NULL AND host IS NOT NULL AND date >= to_date('2013-01-01')",
"CLIENT PARALLEL 1-WAY RANGE SCAN OVER PTSDB [null,not null]\n" +
" SERVER FILTER BY FIRST KEY ONLY AND DATE >= '2013-01-01 00:00:00.000'",
[15/15] git commit: Update CHANGES
Posted by ja...@apache.org.
Update CHANGES
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/8b460b5c
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/8b460b5c
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/8b460b5c
Branch: refs/heads/3.2
Commit: 8b460b5c086f46b3e636133df90932c80c1a643a
Parents: 311b5aa
Author: Mujtaba <mu...@apache.org>
Authored: Tue Oct 28 16:28:12 2014 -0700
Committer: Mujtaba <mu...@apache.org>
Committed: Tue Oct 28 16:28:12 2014 -0700
----------------------------------------------------------------------
CHANGES | 9 +++++++++
1 file changed, 9 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/8b460b5c/CHANGES
----------------------------------------------------------------------
diff --git a/CHANGES b/CHANGES
index bdc6b9a..40157f9 100644
--- a/CHANGES
+++ b/CHANGES
@@ -6,6 +6,7 @@ Release Notes - Phoenix - Version 3.2
* [PHOENIX-943] - Handle pushed down post-filters for subquery in joins with limit and non-groupby aggregation
* [PHOENIX-945] - Support correlated subqueries in comparison without ANY/SOME/ALL
* [PHOENIX-1085] - Commonize logic for adding salt byte and adding region start key in ParallelIterators
+ * [PHOENIX-1168] - Support non-correlated sub-queries in where clause having a comparison operator with no modifier or a comparison operator modified by ANY, SOME or ALL
* [PHOENIX-1259] - Perform partial scan for ANALYZE when table salted or local index
* [PHOENIX-1263] - Only cache guideposts on physical PTable
* [PHOENIX-1264] - Add StatisticsCollector to existing tables on first connection to cluster
@@ -21,7 +22,9 @@ Release Notes - Phoenix - Version 3.2
** Bug
* [PHOENIX-105] - Remove org.apache.commons.csv source once available in Maven repo
+ * [PHOENIX-897] - psql command doesn't allow using certain characters in invocation
* [PHOENIX-941] - Parallelize within regions to prevent rpc timeout
+ * [PHOENIX-944] - Support derived tables in FROM clause that needs extra steps of client-side aggregation or other processing
* [PHOENIX-954] - View index sequences of a non multi tenant table are not getting deleted after main table drop
* [PHOENIX-973] - Lexer skips unexpected characters
* [PHOENIX-1044] - Phoenix-Pig: No results returned unless all used columns are selected
@@ -81,9 +84,15 @@ Release Notes - Phoenix - Version 3.2
* [PHOENIX-1360] - NPE in SpoolingResultIterator
* [PHOENIX-1361] - Sequence value goes backwards if sequence validated before reserved
* [PHOENIX-1365] - Make sequence salt buckets configurable
+ * [PHOENIX-1366] - ORDINAL_POSITION incorrect for multi-tenant table over tenant-specific connection
* [PHOENIX-1368] - Persist link from VIEW back to its child VIEW
* [PHOENIX-1369] - Add back encode/decode methods as deprecated
* [PHOENIX-1370] - Allow query timeout to differ from RPC timeout
+ * [PHOENIX-1376] - java.lang.NullPointerException occurs in JDBC driver
+ * [PHOENIX-1382] - Phoenix 4.2 RC Issue
+ * [PHOENIX-1385] - Adding, dropping and adding columns fails with NPE
+ * [PHOENIX-1390] - Stats not updated on client after major compaction
+ * [PHOENIX-1391] - Remove obsolete hint
** Improvement
* [PHOENIX-619] - Support DELETE over table with immutable index when possible
[14/15] git commit: Update QueryPlanTest to remove reverse test
Posted by ja...@apache.org.
Update QueryPlanTest to remove reverse test
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/311b5aa0
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/311b5aa0
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/311b5aa0
Branch: refs/heads/3.2
Commit: 311b5aa0ac4cbe6f0db0654340769a4cd4be84eb
Parents: 4767f13
Author: James Taylor <jt...@salesforce.com>
Authored: Tue Oct 28 15:54:46 2014 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Tue Oct 28 15:54:46 2014 -0700
----------------------------------------------------------------------
.../src/test/java/org/apache/phoenix/query/QueryPlanTest.java | 4 ----
1 file changed, 4 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/311b5aa0/phoenix-core/src/test/java/org/apache/phoenix/query/QueryPlanTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/QueryPlanTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/QueryPlanTest.java
index 1e3df0b..5205c99 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/query/QueryPlanTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/query/QueryPlanTest.java
@@ -45,10 +45,6 @@ public class QueryPlanTest extends BaseConnectionlessQueryTest {
"CLIENT PARALLEL 1-WAY SMALL SKIP SCAN ON 3 KEYS OVER PTSDB3 [~'na3'] - [~'na1']\n" +
" SERVER FILTER BY FIRST KEY ONLY",
- "SELECT inst,date FROM PTSDB2 WHERE inst = 'na1' ORDER BY inst DESC, date DESC",
- "CLIENT PARALLEL 1-WAY REVERSE RANGE SCAN OVER PTSDB2 ['na1']\n" +
- " SERVER FILTER BY FIRST KEY ONLY",
-
"SELECT host FROM PTSDB WHERE inst IS NULL AND host IS NOT NULL AND date >= to_date('2013-01-01')",
"CLIENT PARALLEL 1-WAY RANGE SCAN OVER PTSDB [null,not null]\n" +
" SERVER FILTER BY FIRST KEY ONLY AND DATE >= '2013-01-01 00:00:00.000'",