You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by pb...@apache.org on 2018/01/31 21:43:47 UTC
[1/2] phoenix git commit: PHOENIX-4437 Make
QueryPlan.getEstimatedBytesToScan() independent of getExplainPlan() and pull
optimize() out of getExplainPlan()
Repository: phoenix
Updated Branches:
refs/heads/4.x-HBase-1.2 878a264e5 -> afe21dc72
PHOENIX-4437 Make QueryPlan.getEstimatedBytesToScan() independent of getExplainPlan() and pull optimize() out of getExplainPlan()
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/eb9de14b
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/eb9de14b
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/eb9de14b
Branch: refs/heads/4.x-HBase-1.2
Commit: eb9de14b6b70a465c162b9928c4ae466deea3ee2
Parents: 878a264
Author: maryannxue <ma...@gmail.com>
Authored: Thu Dec 21 18:31:04 2017 +0000
Committer: Pedro Boado <pe...@gmail.com>
Committed: Wed Jan 31 20:55:59 2018 +0000
----------------------------------------------------------------------
.../end2end/ExplainPlanWithStatsEnabledIT.java | 2 +-
.../apache/phoenix/execute/BaseQueryPlan.java | 45 ++++++--------
.../apache/phoenix/execute/HashJoinPlan.java | 59 +++++++++---------
.../phoenix/execute/SortMergeJoinPlan.java | 63 ++++++++++----------
.../org/apache/phoenix/execute/UnionPlan.java | 53 ++++++++--------
.../apache/phoenix/jdbc/PhoenixStatement.java | 9 ++-
6 files changed, 119 insertions(+), 112 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/eb9de14b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExplainPlanWithStatsEnabledIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExplainPlanWithStatsEnabledIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExplainPlanWithStatsEnabledIT.java
index 49efa97..f13510b 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExplainPlanWithStatsEnabledIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExplainPlanWithStatsEnabledIT.java
@@ -298,7 +298,7 @@ public class ExplainPlanWithStatsEnabledIT extends ParallelStatsEnabledIT {
try (Connection conn = DriverManager.getConnection(getUrl())) {
conn.setAutoCommit(false);
Estimate info = getByteRowEstimates(conn, sql, binds);
- assertEquals((Long) 200l, info.estimatedBytes);
+ assertEquals((Long) 176l, info.estimatedBytes);
assertEquals((Long) 2l, info.estimatedRows);
assertTrue(info.estimateInfoTs > 0);
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/eb9de14b/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
index 31f67b7..380037f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
@@ -117,7 +117,7 @@ public abstract class BaseQueryPlan implements QueryPlan {
protected Long estimatedRows;
protected Long estimatedSize;
protected Long estimateInfoTimestamp;
- private boolean explainPlanCalled;
+ private boolean getEstimatesCalled;
protected BaseQueryPlan(
@@ -498,32 +498,17 @@ public abstract class BaseQueryPlan implements QueryPlan {
@Override
public ExplainPlan getExplainPlan() throws SQLException {
- explainPlanCalled = true;
if (context.getScanRanges() == ScanRanges.NOTHING) {
return new ExplainPlan(Collections.singletonList("DEGENERATE SCAN OVER " + getTableRef().getTable().getName().getString()));
}
- // If cost-based optimizer is enabled, we need to initialize a dummy iterator to
- // get the stats for computing costs.
- boolean costBased =
- context.getConnection().getQueryServices().getConfiguration().getBoolean(
- QueryServices.COST_BASED_OPTIMIZER_ENABLED, QueryServicesOptions.DEFAULT_COST_BASED_OPTIMIZER_ENABLED);
- if (costBased) {
- ResultIterator iterator = iterator();
- iterator.close();
- }
- // Optimize here when getting explain plan, as queries don't get optimized until after compilation
- QueryPlan plan = context.getConnection().getQueryServices().getOptimizer().optimize(context.getStatement(), this);
- ExplainPlan exp = plan instanceof BaseQueryPlan ? new ExplainPlan(getPlanSteps(plan.iterator())) : plan.getExplainPlan();
- if (!costBased) { // do not override estimates if they are used for cost calculation.
- this.estimatedRows = plan.getEstimatedRowsToScan();
- this.estimatedSize = plan.getEstimatedBytesToScan();
- this.estimateInfoTimestamp = plan.getEstimateInfoTimestamp();
- }
- return exp;
+ ResultIterator iterator = iterator();
+ ExplainPlan explainPlan = new ExplainPlan(getPlanSteps(iterator));
+ iterator.close();
+ return explainPlan;
}
- private List<String> getPlanSteps(ResultIterator iterator){
+ private List<String> getPlanSteps(ResultIterator iterator) {
List<String> planSteps = Lists.newArrayListWithExpectedSize(5);
iterator.explain(planSteps);
return planSteps;
@@ -536,26 +521,32 @@ public abstract class BaseQueryPlan implements QueryPlan {
@Override
public Long getEstimatedRowsToScan() throws SQLException {
- if (!explainPlanCalled) {
- getExplainPlan();
+ if (!getEstimatesCalled) {
+ getEstimates();
}
return estimatedRows;
}
@Override
public Long getEstimatedBytesToScan() throws SQLException {
- if (!explainPlanCalled) {
- getExplainPlan();
+ if (!getEstimatesCalled) {
+ getEstimates();
}
return estimatedSize;
}
@Override
public Long getEstimateInfoTimestamp() throws SQLException {
- if (!explainPlanCalled) {
- getExplainPlan();
+ if (!getEstimatesCalled) {
+ getEstimates();
}
return estimateInfoTimestamp;
}
+ private void getEstimates() throws SQLException {
+ getEstimatesCalled = true;
+ // Initialize a dummy iterator to get the estimates based on stats.
+ ResultIterator iterator = iterator();
+ iterator.close();
+ }
}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/phoenix/blob/eb9de14b/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java
index 2d2ff4e..23a0da6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java
@@ -99,7 +99,7 @@ public class HashJoinPlan extends DelegateQueryPlan {
private Long estimatedRows;
private Long estimatedBytes;
private Long estimateInfoTs;
- private boolean explainPlanCalled;
+ private boolean getEstimatesCalled;
public static HashJoinPlan create(SelectStatement statement,
QueryPlan plan, HashJoinInfo joinInfo, SubPlan[] subPlans) throws SQLException {
@@ -247,7 +247,6 @@ public class HashJoinPlan extends DelegateQueryPlan {
@Override
public ExplainPlan getExplainPlan() throws SQLException {
- explainPlanCalled = true;
List<String> planSteps = Lists.newArrayList(delegate.getExplainPlan().getPlanSteps());
int count = subPlans.length;
for (int i = 0; i < count; i++) {
@@ -263,26 +262,6 @@ public class HashJoinPlan extends DelegateQueryPlan {
if (joinInfo != null && joinInfo.getLimit() != null) {
planSteps.add(" JOIN-SCANNER " + joinInfo.getLimit() + " ROW LIMIT");
}
- for (SubPlan subPlan : subPlans) {
- if (subPlan.getInnerPlan().getEstimatedBytesToScan() == null
- || subPlan.getInnerPlan().getEstimatedRowsToScan() == null
- || subPlan.getInnerPlan().getEstimateInfoTimestamp() == null) {
- /*
- * If any of the sub plans doesn't have the estimate info available, then we don't
- * provide estimate for the overall plan
- */
- estimatedBytes = null;
- estimatedRows = null;
- estimateInfoTs = null;
- break;
- } else {
- estimatedBytes =
- add(estimatedBytes, subPlan.getInnerPlan().getEstimatedBytesToScan());
- estimatedRows = add(estimatedRows, subPlan.getInnerPlan().getEstimatedRowsToScan());
- estimateInfoTs =
- getMin(estimateInfoTs, subPlan.getInnerPlan().getEstimateInfoTimestamp());
- }
- }
return new ExplainPlan(planSteps);
}
@@ -520,27 +499,51 @@ public class HashJoinPlan extends DelegateQueryPlan {
@Override
public Long getEstimatedRowsToScan() throws SQLException {
- if (!explainPlanCalled) {
- getExplainPlan();
+ if (!getEstimatesCalled) {
+ getEstimates();
}
return estimatedRows;
}
@Override
public Long getEstimatedBytesToScan() throws SQLException {
- if (!explainPlanCalled) {
- getExplainPlan();
+ if (!getEstimatesCalled) {
+ getEstimates();
}
return estimatedBytes;
}
@Override
public Long getEstimateInfoTimestamp() throws SQLException {
- if (!explainPlanCalled) {
- getExplainPlan();
+ if (!getEstimatesCalled) {
+ getEstimates();
}
return estimateInfoTs;
}
+
+ private void getEstimates() throws SQLException {
+ getEstimatesCalled = true;
+ for (SubPlan subPlan : subPlans) {
+ if (subPlan.getInnerPlan().getEstimatedBytesToScan() == null
+ || subPlan.getInnerPlan().getEstimatedRowsToScan() == null
+ || subPlan.getInnerPlan().getEstimateInfoTimestamp() == null) {
+ /*
+ * If any of the sub plans doesn't have the estimate info available, then we don't
+ * provide estimate for the overall plan
+ */
+ estimatedBytes = null;
+ estimatedRows = null;
+ estimateInfoTs = null;
+ break;
+ } else {
+ estimatedBytes =
+ add(estimatedBytes, subPlan.getInnerPlan().getEstimatedBytesToScan());
+ estimatedRows = add(estimatedRows, subPlan.getInnerPlan().getEstimatedRowsToScan());
+ estimateInfoTs =
+ getMin(estimateInfoTs, subPlan.getInnerPlan().getEstimateInfoTimestamp());
+ }
+ }
+ }
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/eb9de14b/phoenix-core/src/main/java/org/apache/phoenix/execute/SortMergeJoinPlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/SortMergeJoinPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/SortMergeJoinPlan.java
index 3e380da..2436d1e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/SortMergeJoinPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/SortMergeJoinPlan.java
@@ -95,7 +95,7 @@ public class SortMergeJoinPlan implements QueryPlan {
private Long estimatedBytes;
private Long estimatedRows;
private Long estimateInfoTs;
- private boolean explainPlanCalled;
+ private boolean getEstimatesCalled;
public SortMergeJoinPlan(StatementContext context, FilterableStatement statement, TableRef table,
JoinType type, QueryPlan lhsPlan, QueryPlan rhsPlan, List<Expression> lhsKeyExpressions, List<Expression> rhsKeyExpressions,
@@ -157,7 +157,6 @@ public class SortMergeJoinPlan implements QueryPlan {
@Override
public ExplainPlan getExplainPlan() throws SQLException {
- explainPlanCalled = true;
List<String> steps = Lists.newArrayList();
steps.add("SORT-MERGE-JOIN (" + type.toString().toUpperCase() + ") TABLES");
for (String step : lhsPlan.getExplainPlan().getPlanSteps()) {
@@ -167,28 +166,6 @@ public class SortMergeJoinPlan implements QueryPlan {
for (String step : rhsPlan.getExplainPlan().getPlanSteps()) {
steps.add(" " + step);
}
- if ((lhsPlan.getEstimatedBytesToScan() == null || rhsPlan.getEstimatedBytesToScan() == null)
- || (lhsPlan.getEstimatedRowsToScan() == null
- || rhsPlan.getEstimatedRowsToScan() == null)
- || (lhsPlan.getEstimateInfoTimestamp() == null
- || rhsPlan.getEstimateInfoTimestamp() == null)) {
- /*
- * If any of the sub plans doesn't have the estimate info available, then we don't
- * provide estimate for the overall plan
- */
- estimatedBytes = null;
- estimatedRows = null;
- estimateInfoTs = null;
- } else {
- estimatedBytes =
- add(add(estimatedBytes, lhsPlan.getEstimatedBytesToScan()),
- rhsPlan.getEstimatedBytesToScan());
- estimatedRows =
- add(add(estimatedRows, lhsPlan.getEstimatedRowsToScan()),
- rhsPlan.getEstimatedRowsToScan());
- estimateInfoTs =
- getMin(lhsPlan.getEstimateInfoTimestamp(), rhsPlan.getEstimateInfoTimestamp());
- }
return new ExplainPlan(steps);
}
@@ -754,25 +731,51 @@ public class SortMergeJoinPlan implements QueryPlan {
@Override
public Long getEstimatedRowsToScan() throws SQLException {
- if (!explainPlanCalled) {
- getExplainPlan();
+ if (!getEstimatesCalled) {
+ getEstimates();
}
return estimatedRows;
}
@Override
public Long getEstimatedBytesToScan() throws SQLException {
- if (!explainPlanCalled) {
- getExplainPlan();
+ if (!getEstimatesCalled) {
+ getEstimates();
}
return estimatedBytes;
}
@Override
public Long getEstimateInfoTimestamp() throws SQLException {
- if (!explainPlanCalled) {
- getExplainPlan();
+ if (!getEstimatesCalled) {
+ getEstimates();
}
return estimateInfoTs;
}
+
+ private void getEstimates() throws SQLException {
+ getEstimatesCalled = true;
+ if ((lhsPlan.getEstimatedBytesToScan() == null || rhsPlan.getEstimatedBytesToScan() == null)
+ || (lhsPlan.getEstimatedRowsToScan() == null
+ || rhsPlan.getEstimatedRowsToScan() == null)
+ || (lhsPlan.getEstimateInfoTimestamp() == null
+ || rhsPlan.getEstimateInfoTimestamp() == null)) {
+ /*
+ * If any of the sub plans doesn't have the estimate info available, then we don't
+ * provide estimate for the overall plan
+ */
+ estimatedBytes = null;
+ estimatedRows = null;
+ estimateInfoTs = null;
+ } else {
+ estimatedBytes =
+ add(add(estimatedBytes, lhsPlan.getEstimatedBytesToScan()),
+ rhsPlan.getEstimatedBytesToScan());
+ estimatedRows =
+ add(add(estimatedRows, lhsPlan.getEstimatedRowsToScan()),
+ rhsPlan.getEstimatedRowsToScan());
+ estimateInfoTs =
+ getMin(lhsPlan.getEstimateInfoTimestamp(), rhsPlan.getEstimateInfoTimestamp());
+ }
+ }
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/eb9de14b/phoenix-core/src/main/java/org/apache/phoenix/execute/UnionPlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/UnionPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/UnionPlan.java
index e6bf654..3b5168c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/UnionPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/UnionPlan.java
@@ -69,7 +69,7 @@ public class UnionPlan implements QueryPlan {
private Long estimatedRows;
private Long estimatedBytes;
private Long estimateInfoTs;
- private boolean explainPlanCalled;
+ private boolean getEstimatesCalled;
public UnionPlan(StatementContext context, FilterableStatement statement, TableRef table, RowProjector projector,
Integer limit, Integer offset, OrderBy orderBy, GroupBy groupBy, List<QueryPlan> plans, ParameterMetaData paramMetaData) throws SQLException {
@@ -174,7 +174,6 @@ public class UnionPlan implements QueryPlan {
@Override
public ExplainPlan getExplainPlan() throws SQLException {
- explainPlanCalled = true;
List<String> steps = new ArrayList<String>();
steps.add("UNION ALL OVER " + this.plans.size() + " QUERIES");
ResultIterator iterator = iterator();
@@ -184,23 +183,6 @@ public class UnionPlan implements QueryPlan {
for (int i = 1 ; i < steps.size()-offset; i++) {
steps.set(i, " " + steps.get(i));
}
- for (QueryPlan plan : plans) {
- if (plan.getEstimatedBytesToScan() == null || plan.getEstimatedRowsToScan() == null
- || plan.getEstimateInfoTimestamp() == null) {
- /*
- * If any of the sub plans doesn't have the estimate info available, then we don't
- * provide estimate for the overall plan
- */
- estimatedBytes = null;
- estimatedRows = null;
- estimateInfoTs = null;
- break;
- } else {
- estimatedBytes = add(estimatedBytes, plan.getEstimatedBytesToScan());
- estimatedRows = add(estimatedRows, plan.getEstimatedRowsToScan());
- estimateInfoTs = getMin(estimateInfoTs, plan.getEstimateInfoTimestamp());
- }
- }
return new ExplainPlan(steps);
}
@@ -265,25 +247,46 @@ public class UnionPlan implements QueryPlan {
@Override
public Long getEstimatedRowsToScan() throws SQLException {
- if (!explainPlanCalled) {
- getExplainPlan();
+ if (!getEstimatesCalled) {
+ getEstimates();
}
return estimatedRows;
}
@Override
public Long getEstimatedBytesToScan() throws SQLException {
- if (!explainPlanCalled) {
- getExplainPlan();
+ if (!getEstimatesCalled) {
+ getEstimates();
}
return estimatedBytes;
}
@Override
public Long getEstimateInfoTimestamp() throws SQLException {
- if (!explainPlanCalled) {
- getExplainPlan();
+ if (!getEstimatesCalled) {
+ getEstimates();
}
return estimateInfoTs;
}
+
+ private void getEstimates() throws SQLException {
+ getEstimatesCalled = true;
+ for (QueryPlan plan : plans) {
+ if (plan.getEstimatedBytesToScan() == null || plan.getEstimatedRowsToScan() == null
+ || plan.getEstimateInfoTimestamp() == null) {
+ /*
+ * If any of the sub plans doesn't have the estimate info available, then we don't
+ * provide estimate for the overall plan
+ */
+ estimatedBytes = null;
+ estimatedRows = null;
+ estimateInfoTs = null;
+ break;
+ } else {
+ estimatedBytes = add(estimatedBytes, plan.getEstimatedBytesToScan());
+ estimatedRows = add(estimatedRows, plan.getEstimatedRowsToScan());
+ estimateInfoTs = getMin(estimateInfoTs, plan.getEstimateInfoTimestamp());
+ }
+ }
+ }
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/eb9de14b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
index c699088..b637173 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
@@ -581,7 +581,14 @@ public class PhoenixStatement implements Statement, SQLCloseable {
@Override
public QueryPlan compilePlan(PhoenixStatement stmt, Sequence.ValueOp seqAction) throws SQLException {
CompilableStatement compilableStmt = getStatement();
- final StatementPlan plan = compilableStmt.compilePlan(stmt, Sequence.ValueOp.VALIDATE_SEQUENCE);
+ StatementPlan compilePlan = compilableStmt.compilePlan(stmt, Sequence.ValueOp.VALIDATE_SEQUENCE);
+ // For a QueryPlan, we need to get its optimized plan; for a MutationPlan, its enclosed QueryPlan
+ // has already been optimized during compilation.
+ if (compilePlan instanceof QueryPlan) {
+ QueryPlan dataPlan = (QueryPlan) compilePlan;
+ compilePlan = stmt.getConnection().getQueryServices().getOptimizer().optimize(stmt, dataPlan);
+ }
+ final StatementPlan plan = compilePlan;
List<String> planSteps = plan.getExplainPlan().getPlanSteps();
List<Tuple> tuples = Lists.newArrayListWithExpectedSize(planSteps.size());
Long estimatedBytesToScan = plan.getEstimatedBytesToScan();
[2/2] phoenix git commit: PHOENIX-4488 Cache config parameters for
MetaDataEndPointImpl during initialization
Posted by pb...@apache.org.
PHOENIX-4488 Cache config parameters for MetaDataEndPointImpl during initialization
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/afe21dc7
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/afe21dc7
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/afe21dc7
Branch: refs/heads/4.x-HBase-1.2
Commit: afe21dc72475aebd81a97d347c966dfc69cd5a9a
Parents: eb9de14
Author: James Taylor <jt...@salesforce.com>
Authored: Fri Dec 22 19:36:44 2017 +0000
Committer: Pedro Boado <pe...@gmail.com>
Committed: Wed Jan 31 20:56:21 2018 +0000
----------------------------------------------------------------------
.../coprocessor/MetaDataEndpointImplTest.java | 44 --------------------
.../coprocessor/MetaDataEndpointImpl.java | 30 ++++++-------
2 files changed, 16 insertions(+), 58 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/afe21dc7/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/MetaDataEndpointImplTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/MetaDataEndpointImplTest.java b/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/MetaDataEndpointImplTest.java
deleted file mode 100644
index 2c558d8..0000000
--- a/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/MetaDataEndpointImplTest.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.coprocessor;
-
-import com.google.common.collect.Lists;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.phoenix.query.QueryServices;
-import org.apache.phoenix.schema.PTable;
-import org.apache.phoenix.schema.PTableType;
-import org.junit.Test;
-
-import java.util.List;
-
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
-
-public class MetaDataEndpointImplTest {
-
- @Test
- public void testExceededIndexQuota() throws Exception {
- PTable parentTable = mock(PTable.class);
- List<PTable> indexes = Lists.newArrayList(mock(PTable.class), mock(PTable.class));
- when(parentTable.getIndexes()).thenReturn(indexes);
- Configuration configuration = new Configuration();
- assertFalse(MetaDataEndpointImpl.execeededIndexQuota(PTableType.INDEX, parentTable, configuration));
- configuration.setInt(QueryServices.MAX_INDEXES_PER_TABLE, 1);
- assertTrue(MetaDataEndpointImpl.execeededIndexQuota(PTableType.INDEX, parentTable, configuration));
- }
-}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/phoenix/blob/afe21dc7/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index bf8ba39..47ad7cf 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -242,7 +242,6 @@ import org.apache.phoenix.util.UpgradeUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.google.common.annotations.VisibleForTesting;
import com.google.common.cache.Cache;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
@@ -472,6 +471,10 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
private PhoenixMetaDataCoprocessorHost phoenixAccessCoprocessorHost;
private boolean accessCheckEnabled;
+ private boolean blockWriteRebuildIndex;
+ private int maxIndexesPerTable;
+ private boolean isTablesMappingEnabled;
+
/**
* Stores a reference to the coprocessor environment provided by the
@@ -492,8 +495,16 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
}
phoenixAccessCoprocessorHost = new PhoenixMetaDataCoprocessorHost(this.env);
- this.accessCheckEnabled = env.getConfiguration().getBoolean(QueryServices.PHOENIX_ACLS_ENABLED,
+ Configuration config = env.getConfiguration();
+ this.accessCheckEnabled = config.getBoolean(QueryServices.PHOENIX_ACLS_ENABLED,
QueryServicesOptions.DEFAULT_PHOENIX_ACLS_ENABLED);
+ this.blockWriteRebuildIndex = config.getBoolean(QueryServices.INDEX_FAILURE_BLOCK_WRITE,
+ QueryServicesOptions.DEFAULT_INDEX_FAILURE_BLOCK_WRITE);
+ this.maxIndexesPerTable = config.getInt(QueryServices.MAX_INDEXES_PER_TABLE,
+ QueryServicesOptions.DEFAULT_MAX_INDEXES_PER_TABLE);
+ this.isTablesMappingEnabled = SchemaUtil.isNamespaceMappingEnabled(PTableType.TABLE,
+ new ReadOnlyProps(config.iterator()));
+
logger.info("Starting Tracing-Metrics Systems");
// Start the phoenix trace collection
Tracing.addTraceMetricsSource();
@@ -583,8 +594,6 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
PTable oldTable = (PTable)metaDataCache.getIfPresent(cacheKey);
long tableTimeStamp = oldTable == null ? MIN_TABLE_TIMESTAMP-1 : oldTable.getTimeStamp();
PTable newTable;
- boolean blockWriteRebuildIndex = env.getConfiguration().getBoolean(QueryServices.INDEX_FAILURE_BLOCK_WRITE,
- QueryServicesOptions.DEFAULT_INDEX_FAILURE_BLOCK_WRITE);
newTable = getTable(scanner, clientTimeStamp, tableTimeStamp, clientVersion);
if (newTable == null) {
return null;
@@ -1551,7 +1560,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
return;
}
// make sure we haven't gone over our threshold for indexes on this table.
- if (execeededIndexQuota(tableType, parentTable, env.getConfiguration())) {
+ if (execeededIndexQuota(tableType, parentTable)) {
builder.setReturnCode(MetaDataProtos.MutationCode.TOO_MANY_INDEXES);
builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
done.run(builder.build());
@@ -1758,11 +1767,8 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
}
}
- @VisibleForTesting
- static boolean execeededIndexQuota(PTableType tableType, PTable parentTable, Configuration configuration) {
- return PTableType.INDEX == tableType && parentTable.getIndexes().size() >= configuration
- .getInt(QueryServices.MAX_INDEXES_PER_TABLE,
- QueryServicesOptions.DEFAULT_MAX_INDEXES_PER_TABLE);
+ private boolean execeededIndexQuota(PTableType tableType, PTable parentTable) {
+ return PTableType.INDEX == tableType && parentTable.getIndexes().size() >= maxIndexesPerTable;
}
private static final byte[] CHILD_TABLE_BYTES = new byte[] {PTable.LinkType.CHILD_TABLE.getSerializedValue()};
@@ -3265,8 +3271,6 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
* from getting rebuilt too often.
*/
final boolean wasLocked = (rowLock != null);
- boolean blockWriteRebuildIndex = env.getConfiguration().getBoolean(QueryServices.INDEX_FAILURE_BLOCK_WRITE,
- QueryServicesOptions.DEFAULT_INDEX_FAILURE_BLOCK_WRITE);
if (!wasLocked) {
rowLock = acquireLock(region, key, null);
}
@@ -3558,8 +3562,6 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
GetVersionResponse.Builder builder = GetVersionResponse.newBuilder();
Configuration config = env.getConfiguration();
- boolean isTablesMappingEnabled = SchemaUtil.isNamespaceMappingEnabled(PTableType.TABLE,
- new ReadOnlyProps(config.iterator()));
if (isTablesMappingEnabled
&& PhoenixDatabaseMetaData.MIN_NAMESPACE_MAPPED_PHOENIX_VERSION > request.getClientVersion()) {
logger.error("Old client is not compatible when" + " system tables are upgraded to map to namespace");