You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by ja...@apache.org on 2017/11/15 18:47:56 UTC
[01/37] phoenix git commit: PHOENIX-4283 fix a coearceByte issue
which causes nested group by big int incorrect
Repository: phoenix
Updated Branches:
refs/heads/4.x-HBase-1.1 7a4a974d3 -> d200b5165
PHOENIX-4283 fix a coearceByte issue which causes nested group by big int incorrect
Signed-off-by: aertoria <ca...@gmail.com>
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/7d2c1edd
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/7d2c1edd
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/7d2c1edd
Branch: refs/heads/4.x-HBase-1.1
Commit: 7d2c1edd66cf7fb2df17a13884bb6b5e4acdbe48
Parents: 7a4a974
Author: aertoria <ca...@gmail.com>
Authored: Sun Oct 15 18:36:44 2017 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Wed Nov 15 10:38:41 2017 -0800
----------------------------------------------------------------------
.../org/apache/phoenix/end2end/AggregateIT.java | 21 +++++++++++++++++++-
.../org/apache/phoenix/schema/types/PLong.java | 3 ++-
2 files changed, 22 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7d2c1edd/phoenix-core/src/it/java/org/apache/phoenix/end2end/AggregateIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AggregateIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AggregateIT.java
index 67a468a..3d0e590 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AggregateIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AggregateIT.java
@@ -936,7 +936,26 @@ public class AggregateIT extends ParallelStatsDisabledIT {
public void testCountNullInNonEncodedNonEmptyKeyValueCF() throws Exception {
testCountNullInNonEmptyKeyValueCF(0);
}
-
+
+ @Test
+ public void testNestedGroupedAggregationWithBigInt() throws Exception {
+ Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+ String tableName = generateUniqueName();
+ try(Connection conn = DriverManager.getConnection(getUrl(), props);) {
+ String createQuery="CREATE TABLE "+tableName+" (a BIGINT NOT NULL,c BIGINT NOT NULL CONSTRAINT PK PRIMARY KEY (a, c))";
+ String updateQuery="UPSERT INTO "+tableName+"(a,c) VALUES(4444444444444444444, 5555555555555555555)";
+ String query="SELECT a FROM (SELECT a, c FROM "+tableName+" GROUP BY a, c) GROUP BY a, c";
+ conn.prepareStatement(createQuery).execute();
+ conn.prepareStatement(updateQuery).execute();
+ conn.commit();
+ PreparedStatement statement = conn.prepareStatement(query);
+ ResultSet rs = statement.executeQuery();
+ assertTrue(rs.next());
+ assertEquals(4444444444444444444L,rs.getLong(1));
+ assertFalse(rs.next());
+ }
+ }
+
private void testCountNullInNonEmptyKeyValueCF(int columnEncodedBytes) throws Exception {
try (Connection conn = DriverManager.getConnection(getUrl())) {
//Type is INT
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7d2c1edd/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PLong.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PLong.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PLong.java
index 0402c6e..acd16c5 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PLong.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PLong.java
@@ -133,8 +133,9 @@ public class PLong extends PWholeNumber<Long> {
public void coerceBytes(ImmutableBytesWritable ptr, Object object, PDataType actualType,
Integer maxLength, Integer scale, SortOrder actualModifier, Integer desiredMaxLength, Integer desiredScale,
SortOrder expectedModifier) {
+
// Decrease size of TIMESTAMP to size of LONG and continue coerce
- if (ptr.getLength() > getByteSize()) {
+ if (ptr.getLength() > getByteSize() && actualType.isCoercibleTo(PTimestamp.INSTANCE)) {
ptr.set(ptr.get(), ptr.getOffset(), getByteSize());
}
super.coerceBytes(ptr, object, actualType, maxLength, scale, actualModifier, desiredMaxLength,
[22/37] phoenix git commit: PHOENIX-4335 System catalog snapshot
created each time a new connection is created
Posted by ja...@apache.org.
PHOENIX-4335 System catalog snapshot created each time a new connection is created
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/ef39feeb
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/ef39feeb
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/ef39feeb
Branch: refs/heads/4.x-HBase-1.1
Commit: ef39feebe0cf3b59537c0d0261657c090abe039c
Parents: e811218
Author: James Taylor <jt...@salesforce.com>
Authored: Tue Oct 31 15:55:03 2017 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Wed Nov 15 10:46:40 2017 -0800
----------------------------------------------------------------------
.../phoenix/end2end/SystemCatalogUpgradeIT.java | 121 +++++++++++++++++++
.../phoenix/coprocessor/MetaDataProtocol.java | 12 +-
.../query/ConnectionQueryServicesImpl.java | 39 ++++--
.../java/org/apache/phoenix/query/BaseTest.java | 35 ++++--
4 files changed, 190 insertions(+), 17 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/ef39feeb/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogUpgradeIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogUpgradeIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogUpgradeIT.java
new file mode 100644
index 0000000..e5b1d6e
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogUpgradeIT.java
@@ -0,0 +1,121 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.util.Map;
+import java.util.Properties;
+
+import org.apache.phoenix.coprocessor.MetaDataProtocol;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixEmbeddedDriver.ConnectionInfo;
+import org.apache.phoenix.jdbc.PhoenixTestDriver;
+import org.apache.phoenix.query.BaseTest;
+import org.apache.phoenix.query.ConnectionQueryServices;
+import org.apache.phoenix.query.ConnectionQueryServicesImpl;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.query.QueryServicesTestImpl;
+import org.apache.phoenix.util.ReadOnlyProps;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import com.google.common.collect.Maps;
+
+public class SystemCatalogUpgradeIT extends BaseTest {
+ private static boolean reinitialize;
+ private static int countUpgradeAttempts;
+ private static long systemTableVersion = MetaDataProtocol.getPriorVersion();
+
+ private static class PhoenixUpgradeCountingServices extends ConnectionQueryServicesImpl {
+ public PhoenixUpgradeCountingServices(QueryServices services, ConnectionInfo connectionInfo, Properties info) {
+ super(services, connectionInfo, info);
+ }
+
+ @Override
+ protected void setUpgradeRequired() {
+ super.setUpgradeRequired();
+ countUpgradeAttempts++;
+ }
+
+ @Override
+ protected long getSystemTableVersion() {
+ return systemTableVersion;
+ }
+
+ @Override
+ protected boolean isInitialized() {
+ return !reinitialize && super.isInitialized();
+ }
+ }
+
+ public static class PhoenixUpgradeCountingDriver extends PhoenixTestDriver {
+ private ConnectionQueryServices cqs;
+ private final ReadOnlyProps overrideProps;
+
+ public PhoenixUpgradeCountingDriver(ReadOnlyProps props) {
+ overrideProps = props;
+ }
+
+ @Override
+ public boolean acceptsURL(String url) throws SQLException {
+ return true;
+ }
+
+ @Override // public for testing
+ public synchronized ConnectionQueryServices getConnectionQueryServices(String url, Properties info) throws SQLException {
+ if (cqs == null) {
+ cqs = new PhoenixUpgradeCountingServices(new QueryServicesTestImpl(getDefaultProps(), overrideProps), ConnectionInfo.create(url), info);
+ cqs.init(url, info);
+ } else if (reinitialize) {
+ cqs.init(url, info);
+ reinitialize = false;
+ }
+ return cqs;
+ }
+ }
+
+ @BeforeClass
+ public static void doSetup() throws Exception {
+ Map<String, String> props = Maps.newConcurrentMap();
+ props.put(BaseTest.DRIVER_CLASS_NAME_ATTRIB, PhoenixUpgradeCountingDriver.class.getName());
+ setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
+ }
+
+ @Test
+ public void testUpgradeOnlyHappensOnce() throws Exception {
+ ConnectionQueryServices services = DriverManager.getConnection(getUrl()).unwrap(PhoenixConnection.class).getQueryServices();
+ assertTrue(services instanceof PhoenixUpgradeCountingServices);
+ // Check if the timestamp version is changing between the current version and prior version
+ boolean wasTimestampChanged = systemTableVersion != MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP;
+ reinitialize = true;
+ systemTableVersion = MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP;
+ DriverManager.getConnection(getUrl());
+ // Confirm that if the timestamp changed, that an upgrade was performed (and that if it
+ // didn't, that an upgrade wasn't attempted).
+ assertEquals(wasTimestampChanged ? 1 : 0, countUpgradeAttempts);
+ // Confirm that another connection does not increase the number of times upgrade was attempted
+ DriverManager.getConnection(getUrl());
+ assertEquals(wasTimestampChanged ? 1 : 0, countUpgradeAttempts);
+ }
+
+
+}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/ef39feeb/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
index 655068d..c4ecc3f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
@@ -19,6 +19,7 @@ package org.apache.phoenix.coprocessor;
import java.util.ArrayList;
import java.util.Collections;
+import java.util.Iterator;
import java.util.List;
import java.util.NavigableMap;
import java.util.TreeMap;
@@ -89,7 +90,8 @@ public abstract class MetaDataProtocol extends MetaDataService {
public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_9_0 = MIN_TABLE_TIMESTAMP + 20;
public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_10_0 = MIN_TABLE_TIMESTAMP + 25;
public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_11_0 = MIN_TABLE_TIMESTAMP + 27;
- public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_12_0 = MIN_TABLE_TIMESTAMP + 28;
+ // Since there's no upgrade code, keep the version the same as the previous version
+ public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_12_0 = MIN_SYSTEM_TABLE_TIMESTAMP_4_11_0;
// MIN_SYSTEM_TABLE_TIMESTAMP needs to be set to the max of all the MIN_SYSTEM_TABLE_TIMESTAMP_* constants
public static final long MIN_SYSTEM_TABLE_TIMESTAMP = MIN_SYSTEM_TABLE_TIMESTAMP_4_12_0;
@@ -431,6 +433,14 @@ public abstract class MetaDataProtocol extends MetaDataService {
}
}
+ public static long getPriorVersion() {
+ Iterator<Long> iterator = TIMESTAMP_VERSION_MAP.descendingKeySet().iterator();
+ if (!iterator.hasNext()) {
+ return -1;
+ }
+ return iterator.next();
+ }
+
public static String getVersion(long serverTimestamp) {
/*
* It is possible that when clients are trying to run upgrades concurrently, we could be at an intermediate
http://git-wip-us.apache.org/repos/asf/phoenix/blob/ef39feeb/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 4868551..c65fa7a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -2353,13 +2353,38 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
return addColumn(oldMetaConnection, tableName, timestamp, columns, true);
}
+ // Available for testing
+ protected long getSystemTableVersion() {
+ return MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP;
+ }
+
+ // Available for testing
+ protected void setUpgradeRequired() {
+ this.upgradeRequired.set(true);
+ }
+
+ // Available for testing
+ protected boolean isInitialized() {
+ return initialized;
+ }
+
+ // Available for testing
+ protected void setInitialized(boolean isInitialized) {
+ initialized = isInitialized;
+ }
+
+ // Available for testing
+ protected String getSystemCatalogDML() {
+ return QueryConstants.CREATE_TABLE_METADATA;
+ }
+
@Override
public void init(final String url, final Properties props) throws SQLException {
try {
PhoenixContextExecutor.call(new Callable<Void>() {
@Override
public Void call() throws Exception {
- if (initialized) {
+ if (isInitialized()) {
if (initializationException != null) {
// Throw previous initialization exception, as we won't resuse this instance
throw initializationException;
@@ -2367,7 +2392,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
return null;
}
synchronized (ConnectionQueryServicesImpl.this) {
- if (initialized) {
+ if (isInitialized()) {
if (initializationException != null) {
// Throw previous initialization exception, as we won't resuse this instance
throw initializationException;
@@ -2409,7 +2434,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
}
Properties scnProps = PropertiesUtil.deepCopy(props);
scnProps.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB,
- Long.toString(MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP));
+ Long.toString(getSystemTableVersion()));
scnProps.remove(PhoenixRuntime.TENANT_ID_ATTRIB);
String globalUrl = JDBCUtil.removeProperty(url, PhoenixRuntime.TENANT_ID_ATTRIB);
try (HBaseAdmin hBaseAdmin = getAdmin();
@@ -2417,7 +2442,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
scnProps, newEmptyMetaData())) {
try {
metaConnection.setRunningUpgrade(true);
- metaConnection.createStatement().executeUpdate(QueryConstants.CREATE_TABLE_METADATA);
+ metaConnection.createStatement().executeUpdate(getSystemCatalogDML());
} catch (NewerTableAlreadyExistsException ignore) {
// Ignore, as this will happen if the SYSTEM.CATALOG already exists at this fixed
// timestamp. A TableAlreadyExistsException is not thrown, since the table only exists
@@ -2425,7 +2450,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
} catch (TableAlreadyExistsException e) {
long currentServerSideTableTimeStamp = e.getTable().getTimeStamp();
if (currentServerSideTableTimeStamp < MIN_SYSTEM_TABLE_TIMESTAMP) {
- ConnectionQueryServicesImpl.this.upgradeRequired.set(true);
+ setUpgradeRequired();
}
} catch (PhoenixIOException e) {
if (!Iterables.isEmpty(Iterables.filter(Throwables.getCausalChain(e), AccessDeniedException.class))) {
@@ -2484,7 +2509,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
throw initializationException;
}
} finally {
- initialized = true;
+ setInitialized(true);
}
}
}
@@ -2567,7 +2592,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE);
boolean snapshotCreated = false;
try {
- if (!ConnectionQueryServicesImpl.this.upgradeRequired.get()) {
+ if (!isUpgradeRequired()) {
throw new UpgradeNotRequiredException();
}
Properties scnProps = PropertiesUtil.deepCopy(props);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/ef39feeb/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
index 5b09cad..b1b4396 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
@@ -79,6 +79,7 @@ import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException;
+import java.lang.reflect.Constructor;
import java.math.BigDecimal;
import java.sql.Connection;
import java.sql.DatabaseMetaData;
@@ -163,6 +164,8 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
*/
public abstract class BaseTest {
+ public static final String DRIVER_CLASS_NAME_ATTRIB = "phoenix.driver.class.name";
+
private static final Map<String,String> tableDDLMap;
private static final Logger logger = LoggerFactory.getLogger(BaseTest.class);
protected static final int DEFAULT_TXN_TIMEOUT_SECONDS = 30;
@@ -440,7 +443,7 @@ public abstract class BaseTest {
* @return url to be used by clients to connect to the cluster.
* @throws IOException
*/
- protected static String setUpTestCluster(@Nonnull Configuration conf, ReadOnlyProps overrideProps) throws IOException {
+ protected static String setUpTestCluster(@Nonnull Configuration conf, ReadOnlyProps overrideProps) throws Exception {
boolean isDistributedCluster = isDistributedClusterModeEnabled(conf);
if (!isDistributedCluster) {
return initMiniCluster(conf, overrideProps);
@@ -538,8 +541,9 @@ public abstract class BaseTest {
* Initialize the mini cluster using phoenix-test specific configuration.
* @param overrideProps TODO
* @return url to be used by clients to connect to the mini cluster.
+ * @throws Exception
*/
- private static String initMiniCluster(Configuration conf, ReadOnlyProps overrideProps) {
+ private static String initMiniCluster(Configuration conf, ReadOnlyProps overrideProps) throws Exception {
setUpConfigForMiniCluster(conf, overrideProps);
utility = new HBaseTestingUtility(conf);
try {
@@ -559,8 +563,9 @@ public abstract class BaseTest {
* Initialize the cluster in distributed mode
* @param overrideProps TODO
* @return url to be used by clients to connect to the mini cluster.
+ * @throws Exception
*/
- private static String initClusterDistributedMode(Configuration conf, ReadOnlyProps overrideProps) {
+ private static String initClusterDistributedMode(Configuration conf, ReadOnlyProps overrideProps) throws Exception {
setTestConfigForDistribuedCluster(conf, overrideProps);
try {
IntegrationTestingUtility util = new IntegrationTestingUtility(conf);
@@ -572,13 +577,13 @@ public abstract class BaseTest {
return JDBC_PROTOCOL + JDBC_PROTOCOL_TERMINATOR + PHOENIX_TEST_DRIVER_URL_PARAM;
}
- private static void setTestConfigForDistribuedCluster(Configuration conf, ReadOnlyProps overrideProps) {
+ private static void setTestConfigForDistribuedCluster(Configuration conf, ReadOnlyProps overrideProps) throws Exception {
setDefaultTestConfig(conf, overrideProps);
}
- private static void setDefaultTestConfig(Configuration conf, ReadOnlyProps overrideProps) {
+ private static void setDefaultTestConfig(Configuration conf, ReadOnlyProps overrideProps) throws Exception {
ConfigUtil.setReplicationConfigIfAbsent(conf);
- QueryServices services = new PhoenixTestDriver().getQueryServices();
+ QueryServices services = newTestDriver(overrideProps).getQueryServices();
for (Entry<String,String> entry : services.getProps()) {
conf.set(entry.getKey(), entry.getValue());
}
@@ -595,11 +600,11 @@ public abstract class BaseTest {
}
}
- public static Configuration setUpConfigForMiniCluster(Configuration conf) {
+ public static Configuration setUpConfigForMiniCluster(Configuration conf) throws Exception {
return setUpConfigForMiniCluster(conf, ReadOnlyProps.EMPTY_PROPS);
}
- public static Configuration setUpConfigForMiniCluster(Configuration conf, ReadOnlyProps overrideProps) {
+ public static Configuration setUpConfigForMiniCluster(Configuration conf, ReadOnlyProps overrideProps) throws Exception {
assertNotNull(conf);
setDefaultTestConfig(conf, overrideProps);
/*
@@ -626,12 +631,24 @@ public abstract class BaseTest {
return conf;
}
+ private static PhoenixTestDriver newTestDriver(ReadOnlyProps props) throws Exception {
+ PhoenixTestDriver newDriver;
+ String driverClassName = props.get(DRIVER_CLASS_NAME_ATTRIB);
+ if (driverClassName == null) {
+ newDriver = new PhoenixTestDriver(props);
+ } else {
+ Class<?> clazz = Class.forName(driverClassName);
+ Constructor constr = clazz.getConstructor(ReadOnlyProps.class);
+ newDriver = (PhoenixTestDriver)constr.newInstance(props);
+ }
+ return newDriver;
+ }
/**
* Create a {@link PhoenixTestDriver} and register it.
* @return an initialized and registered {@link PhoenixTestDriver}
*/
public static PhoenixTestDriver initAndRegisterTestDriver(String url, ReadOnlyProps props) throws Exception {
- PhoenixTestDriver newDriver = new PhoenixTestDriver(props);
+ PhoenixTestDriver newDriver = newTestDriver(props);
DriverManager.registerDriver(newDriver);
Driver oldDriver = DriverManager.getDriver(url);
if (oldDriver != newDriver) {
[32/37] phoenix git commit: PHOENIX-4291 Merge release script for mac
and linux
Posted by ja...@apache.org.
PHOENIX-4291 Merge release script for mac and linux
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/8947624d
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/8947624d
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/8947624d
Branch: refs/heads/4.x-HBase-1.1
Commit: 8947624df820a8e5f8821f409c65293373734992
Parents: 07aacc2
Author: Mujtaba <mu...@apache.org>
Authored: Fri Nov 3 11:55:25 2017 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Wed Nov 15 10:46:41 2017 -0800
----------------------------------------------------------------------
dev/make_rc.sh | 26 +++++++---
dev/make_rc_on_mac.sh | 121 ---------------------------------------------
2 files changed, 18 insertions(+), 129 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/8947624d/dev/make_rc.sh
----------------------------------------------------------------------
diff --git a/dev/make_rc.sh b/dev/make_rc.sh
index 29227b0..31cb9f9 100755
--- a/dev/make_rc.sh
+++ b/dev/make_rc.sh
@@ -43,7 +43,7 @@ DIR_DOCS=dev/release_files
# Verify no target exists
mvn clean; rm -rf $DIR_REL_BASE;
-RESULT=$(find -iname target)
+RESULT=$(find . -iname target)
if [ -z "$RESULT" ]
then
@@ -73,7 +73,7 @@ mvn clean apache-rat:check package -DskipTests -Dcheckstyle.skip=true -q;
rm -rf $(find . -type d -name archive-tmp);
# Copy all phoenix-*.jars to release dir
-phx_jars=$(find -iwholename "./*/target/phoenix-*.jar")
+phx_jars=$(find . -iwholename "./*/target/phoenix-*.jar")
cp $phx_jars $DIR_REL_BIN_PATH;
# Copy bin
@@ -81,7 +81,7 @@ cp bin/* $DIR_BIN;
cp -R $DIR_PHERF_CONF $DIR_BIN;
# Copy release docs
-
+cp README $DIR_REL_BIN_PATH;
cp $DIR_DOCS/* $DIR_REL_BIN_PATH;
# Copy examples
@@ -97,10 +97,20 @@ echo "Now signing source and binary tars"
# Sign
function_sign() {
phoenix_tar=$(find apache-phoenix-*.gz);
- gpg --armor --output $phoenix_tar.asc --detach-sig $phoenix_tar;
- md5sum -b $phoenix_tar > $phoenix_tar.md5;
- sha512sum -b $phoenix_tar > $phoenix_tar.sha;
- sha256sum -b $phoenix_tar >> $phoenix_tar.sha;
+
+ # if on MAC OS
+ if [[ "$OSTYPE" == "darwin"* ]]; then
+ gpg2 --armor --output $phoenix_tar.asc --detach-sig $phoenix_tar;
+ openssl md5 $phoenix_tar > $phoenix_tar.md5;
+ openssl dgst -sha512 $phoenix_tar > $phoenix_tar.sha;
+ openssl dgst -sha256 $phoenix_tar >> $phoenix_tar.sha;
+ # all other OS
+ else
+ gpg --armor --output $phoenix_tar.asc --detach-sig $phoenix_tar;
+ md5sum -b $phoenix_tar > $phoenix_tar.md5;
+ sha512sum -b $phoenix_tar > $phoenix_tar.sha;
+ sha256sum -b $phoenix_tar >> $phoenix_tar.sha;
+ fi
}
cd $DIR_REL_BIN_TAR_PATH; function_sign;
@@ -111,7 +121,7 @@ read -p "Do you want add tag for this RC in GIT? (Y for yes or any other key to
if [[ $prompt =~ [yY](es)* ]]
then
echo "Tagging..."
- read -p "Enter tag (Example 5.0.0-rc0):" prompt
+ read -p "Enter tag (Example 4.13.0-HBase-0.98-rc0):" prompt
echo "Setting tag: $prompt";sleep 5s
git tag -a $prompt -m "$prompt"; git push origin $prompt
mv $DIR_REL_ROOT $DIR_REL_BASE/phoenix-$prompt
http://git-wip-us.apache.org/repos/asf/phoenix/blob/8947624d/dev/make_rc_on_mac.sh
----------------------------------------------------------------------
diff --git a/dev/make_rc_on_mac.sh b/dev/make_rc_on_mac.sh
deleted file mode 100755
index 0b924f1..0000000
--- a/dev/make_rc_on_mac.sh
+++ /dev/null
@@ -1,121 +0,0 @@
-#!/bin/bash
-############################################################################
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-############################################################################
-set -e
-
-echo "Script that assembles all you need to make an RC."
-echo "It generates source and binary tar in release directory"
-echo "Presumes that you can sign a release as described at https://www.apache.org/dev/release-signing.html"
-echo "Starting...";sleep 2s
-
-# Set directory variables
-DIR_ROOT="$(cd $(dirname $0);pwd)/.."
-cd $DIR_ROOT
-PHOENIX="$(xmllint --xpath "//*[local-name()='project']/*[local-name()='version']/text()" pom.xml)"
-DIR_REL_BASE=$DIR_ROOT/release
-DIR_REL_ROOT=$DIR_REL_BASE/apache-phoenix-$PHOENIX
-DIR_REL_BIN=apache-phoenix-$PHOENIX-bin
-DIR_REL_BIN_PATH=$DIR_REL_ROOT/$DIR_REL_BIN
-REL_SRC=apache-phoenix-$PHOENIX-src
-DIR_REL_SRC_TAR_PATH=$DIR_REL_ROOT/src
-DIR_REL_BIN_TAR_PATH=$DIR_REL_ROOT/bin
-DIR_BIN=$DIR_REL_BIN_PATH/bin
-DIR_PHERF_CONF=phoenix-pherf/config
-DIR_EXAMPLES=$DIR_REL_BIN_PATH/examples
-DIR_DOCS=dev/release_files
-
-# Verify no target exists
-mvn clean; rm -rf $DIR_REL_BASE;
-RESULT=$(find . -iname target)
-
-if [ -z "$RESULT" ]
-then
- echo "Verified target directory does not exist.";
-else
- echo "Target directory exists at: $RESULT. Please use a clean repo.";
- exit -1;
-fi
-
-# Generate src tar
-ln -s . $REL_SRC; tar cvzf $REL_SRC.tar.gz --exclude="$REL_SRC/$REL_SRC" $REL_SRC/*; rm $REL_SRC;
-
-# Generate directory structure
-mkdir $DIR_REL_BASE;
-mkdir $DIR_REL_ROOT;
-mkdir $DIR_REL_BIN_PATH;
-mkdir $DIR_REL_BIN_TAR_PATH;
-mkdir $DIR_REL_SRC_TAR_PATH;
-mkdir $DIR_EXAMPLES;
-mkdir $DIR_BIN;
-
-# Move src tar
-mv $REL_SRC.tar.gz $DIR_REL_SRC_TAR_PATH;
-
-# Copy common jars
-mvn clean apache-rat:check package -DskipTests -Dcheckstyle.skip=true -q;
-rm -rf $(find . -type d -name archive-tmp);
-
-# Copy all phoenix-*.jars to release dir
-phx_jars=$(find . -iwholename "./*/target/phoenix-*.jar")
-cp $phx_jars $DIR_REL_BIN_PATH;
-
-# Copy bin
-cp bin/* $DIR_BIN;
-cp -R $DIR_PHERF_CONF $DIR_BIN;
-
-# Copy release docs
-
-cp $DIR_DOCS/* $DIR_REL_BIN_PATH;
-
-# Copy examples
-cp -r examples/* $DIR_EXAMPLES
-
-# Generate bin tar
-tar cvzf $DIR_REL_BIN_TAR_PATH/$DIR_REL_BIN.tar.gz -C $DIR_REL_ROOT apache-phoenix-$PHOENIX-bin;
-rm -rf $DIR_REL_BIN_PATH;
-
-echo "DONE generating binary and source tars in release directory."
-echo "Now signing source and binary tars"
-
-# Sign
-function_sign() {
- phoenix_tar=$(find apache-phoenix-*.gz);
- gpg2 --armor --output $phoenix_tar.asc --detach-sig $phoenix_tar;
- openssl md5 $phoenix_tar > $phoenix_tar.md5;
- openssl dgst -sha512 $phoenix_tar > $phoenix_tar.sha;
- openssl dgst -sha256 $phoenix_tar >> $phoenix_tar.sha;
-}
-
-cd $DIR_REL_BIN_TAR_PATH; function_sign;
-cd $DIR_REL_SRC_TAR_PATH; function_sign;
-
-# Tag
-read -p "Do you want add tag for this RC in GIT? (Y for yes or any other key to continue)" prompt
-if [[ $prompt =~ [yY](es)* ]]
-then
- echo "Tagging..."
- read -p "Enter tag (Example 5.0.0-rc0):" prompt
- echo "Setting tag: $prompt";sleep 5s
- git tag -a $prompt -m "$prompt"; git push origin $prompt
- mv $DIR_REL_ROOT $DIR_REL_BASE/phoenix-$prompt
-fi
-
-echo "DONE."
-echo "If all looks good in release directory then commit RC at https://dist.apache.org/repos/dist/dev/phoenix"
[05/37] phoenix git commit: Revert "PHOENIX-4198 Remove the need for
users to have access to the Phoenix SYSTEM tables to create tables"
Posted by ja...@apache.org.
Revert "PHOENIX-4198 Remove the need for users to have access to the Phoenix SYSTEM tables to create tables"
This reverts commit 7a4a974d3e82292b5b5ce94868d8d57c5272d114.
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/5003ac30
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/5003ac30
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/5003ac30
Branch: refs/heads/4.x-HBase-1.1
Commit: 5003ac304eaa3ff27a3c5199f56e9954835ddc87
Parents: 1c3116f
Author: James Taylor <jt...@salesforce.com>
Authored: Wed Nov 15 10:40:36 2017 -0800
Committer: James Taylor <jt...@salesforce.com>
Committed: Wed Nov 15 10:40:36 2017 -0800
----------------------------------------------------------------------
.../phoenix/end2end/TableDDLPermissionsIT.java | 692 -------------------
.../org/apache/hadoop/hbase/ipc/RpcUtil.java | 32 -
.../BaseMetaDataEndpointObserver.java | 111 ---
.../coprocessor/MetaDataEndpointImpl.java | 338 ++-------
.../coprocessor/MetaDataEndpointObserver.java | 68 --
.../coprocessor/MetaDataRegionObserver.java | 17 +-
.../coprocessor/PhoenixAccessController.java | 628 -----------------
.../PhoenixMetaDataCoprocessorHost.java | 236 -------
.../index/PhoenixIndexFailurePolicy.java | 109 ++-
.../query/ConnectionQueryServicesImpl.java | 15 +-
.../org/apache/phoenix/query/QueryServices.java | 4 -
.../phoenix/query/QueryServicesOptions.java | 14 +-
.../phoenix/schema/stats/StatisticsWriter.java | 42 +-
.../org/apache/phoenix/util/MetaDataUtil.java | 18 -
.../org/apache/phoenix/util/SchemaUtil.java | 12 -
15 files changed, 140 insertions(+), 2196 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/5003ac30/phoenix-core/src/it/java/org/apache/phoenix/end2end/TableDDLPermissionsIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TableDDLPermissionsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TableDDLPermissionsIT.java
deleted file mode 100644
index 971383b..0000000
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TableDDLPermissionsIT.java
+++ /dev/null
@@ -1,692 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to you under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.end2end;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import java.io.IOException;
-import java.lang.reflect.UndeclaredThrowableException;
-import java.security.PrivilegedExceptionAction;
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-import java.util.Set;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.AuthUtil;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.NamespaceDescriptor;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.security.AccessDeniedException;
-import org.apache.hadoop.hbase.security.access.AccessControlClient;
-import org.apache.hadoop.hbase.security.access.Permission.Action;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.phoenix.exception.PhoenixIOException;
-import org.apache.phoenix.query.QueryServices;
-import org.apache.phoenix.util.MetaDataUtil;
-import org.apache.phoenix.util.SchemaUtil;
-import org.junit.After;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-import org.junit.runners.Parameterized.Parameters;
-
-import com.google.common.collect.Maps;
-
-/**
- * Test that verifies a user can read Phoenix tables with a minimal set of permissions.
- */
-@Category(NeedsOwnMiniClusterTest.class)
-@RunWith(Parameterized.class)
-public class TableDDLPermissionsIT{
- private static String SUPERUSER;
-
- private static HBaseTestingUtility testUtil;
-
- private static final Set<String> PHOENIX_SYSTEM_TABLES = new HashSet<>(Arrays.asList(
- "SYSTEM.CATALOG", "SYSTEM.SEQUENCE", "SYSTEM.STATS", "SYSTEM.FUNCTION",
- "SYSTEM.MUTEX"));
- // PHOENIX-XXXX SYSTEM.MUTEX isn't being created in the SYSTEM namespace as it should be.
- private static final Set<String> PHOENIX_NAMESPACE_MAPPED_SYSTEM_TABLES = new HashSet<>(
- Arrays.asList("SYSTEM:CATALOG", "SYSTEM:SEQUENCE", "SYSTEM:STATS", "SYSTEM:FUNCTION",
- "SYSTEM.MUTEX"));
- private static final String GROUP_SYSTEM_ACCESS = "group_system_access";
- final UserGroupInformation superUser = UserGroupInformation.createUserForTesting(SUPERUSER, new String[0]);
- final UserGroupInformation superUser2 = UserGroupInformation.createUserForTesting("superuser", new String[0]);
- final UserGroupInformation regularUser = UserGroupInformation.createUserForTesting("user", new String[0]);
- final UserGroupInformation groupUser = UserGroupInformation.createUserForTesting("user2", new String[] { GROUP_SYSTEM_ACCESS });
- final UserGroupInformation unprivilegedUser = UserGroupInformation.createUserForTesting("unprivilegedUser",
- new String[0]);
-
-
- private static final int NUM_RECORDS = 5;
-
- private boolean isNamespaceMapped;
-
- public TableDDLPermissionsIT(final boolean isNamespaceMapped) throws Exception {
- this.isNamespaceMapped = isNamespaceMapped;
- Map<String, String> clientProps = Maps.newHashMapWithExpectedSize(1);
- clientProps.put(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, "true");
- }
-
- private void startNewMiniCluster(Configuration overrideConf) throws Exception{
- if (null != testUtil) {
- testUtil.shutdownMiniCluster();
- testUtil = null;
- }
- testUtil = new HBaseTestingUtility();
-
- Configuration config = testUtil.getConfiguration();
-
- config.set("hbase.coprocessor.master.classes",
- "org.apache.hadoop.hbase.security.access.AccessController");
- config.set("hbase.coprocessor.region.classes",
- "org.apache.hadoop.hbase.security.access.AccessController");
- config.set("hbase.coprocessor.regionserver.classes",
- "org.apache.hadoop.hbase.security.access.AccessController");
- config.set("hbase.security.exec.permission.checks", "true");
- config.set("hbase.security.authorization", "true");
- config.set("hbase.superuser", SUPERUSER+","+superUser2.getShortUserName());
- config.set("hbase.regionserver.wal.codec", "org.apache.hadoop.hbase.regionserver.wal.IndexedWALEditCodec");
- config.set(QueryServices.PHOENIX_ACLS_ENABLED,"true");
- config.set(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, Boolean.toString(isNamespaceMapped));
- // Avoid multiple clusters trying to bind the master's info port (16010)
- config.setInt(HConstants.MASTER_INFO_PORT, -1);
-
- if (overrideConf != null) {
- config.addResource(overrideConf);
- }
- testUtil.startMiniCluster(1);
- }
-
- private void grantSystemTableAccess() throws Exception{
- try (Connection conn = getConnection()) {
- if (isNamespaceMapped) {
- grantPermissions(regularUser.getShortUserName(), PHOENIX_NAMESPACE_MAPPED_SYSTEM_TABLES, Action.READ,
- Action.EXEC);
- grantPermissions(unprivilegedUser.getShortUserName(), PHOENIX_NAMESPACE_MAPPED_SYSTEM_TABLES,
- Action.READ, Action.EXEC);
- grantPermissions(AuthUtil.toGroupEntry(GROUP_SYSTEM_ACCESS), PHOENIX_NAMESPACE_MAPPED_SYSTEM_TABLES,
- Action.READ, Action.EXEC);
- // Local Index requires WRITE permission on SYSTEM.SEQUENCE TABLE.
- grantPermissions(regularUser.getShortUserName(), Collections.singleton("SYSTEM:SEQUENCE"), Action.WRITE,
- Action.READ, Action.EXEC);
- grantPermissions(unprivilegedUser.getShortUserName(), Collections.singleton("SYSTEM:SEQUENCE"), Action.WRITE,
- Action.READ, Action.EXEC);
-
- } else {
- grantPermissions(regularUser.getShortUserName(), PHOENIX_SYSTEM_TABLES, Action.READ, Action.EXEC);
- grantPermissions(unprivilegedUser.getShortUserName(), PHOENIX_SYSTEM_TABLES, Action.READ, Action.EXEC);
- grantPermissions(AuthUtil.toGroupEntry(GROUP_SYSTEM_ACCESS), PHOENIX_SYSTEM_TABLES, Action.READ, Action.EXEC);
- // Local Index requires WRITE permission on SYSTEM.SEQUENCE TABLE.
- grantPermissions(regularUser.getShortUserName(), Collections.singleton("SYSTEM.SEQUENCE"), Action.WRITE,
- Action.READ, Action.EXEC);
- grantPermissions(unprivilegedUser.getShortUserName(), Collections.singleton("SYSTEM:SEQUENCE"), Action.WRITE,
- Action.READ, Action.EXEC);
- }
- } catch (Throwable e) {
- if (e instanceof Exception) {
- throw (Exception)e;
- } else {
- throw new Exception(e);
- }
- }
- }
-
- @Parameters(name = "isNamespaceMapped={0}") // name is used by failsafe as file name in reports
- public static Collection<Boolean> data() {
- return Arrays.asList(true, false);
- }
-
- @BeforeClass
- public static void doSetup() throws Exception {
- SUPERUSER = System.getProperty("user.name");
- //setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()), new ReadOnlyProps(clientProps.entrySet().iterator()));
- }
-
- protected static String getUrl() {
- return "jdbc:phoenix:localhost:" + testUtil.getZkCluster().getClientPort() + ":/hbase";
- }
-
- public Connection getConnection() throws SQLException{
- Properties props = new Properties();
- props.setProperty(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, Boolean.toString(isNamespaceMapped));
- return DriverManager.getConnection(getUrl(),props);
- }
-
- @Test
- public void testSchemaPermissions() throws Throwable{
-
- if (!isNamespaceMapped) { return; }
- try {
- startNewMiniCluster(null);
- grantSystemTableAccess();
- final String schemaName = "TEST_SCHEMA_PERMISSION";
- superUser.doAs(new PrivilegedExceptionAction<Void>() {
- @Override
- public Void run() throws Exception {
- try {
- AccessControlClient.grant(getUtility().getConnection(), regularUser.getShortUserName(),
- Action.ADMIN);
- } catch (Throwable e) {
- if (e instanceof Exception) {
- throw (Exception)e;
- } else {
- throw new Exception(e);
- }
- }
- return null;
- }
- });
- verifyAllowed(createSchema(schemaName), regularUser);
- // Unprivileged user cannot drop a schema
- verifyDenied(dropSchema(schemaName), unprivilegedUser);
- verifyDenied(createSchema(schemaName), unprivilegedUser);
-
- verifyAllowed(dropSchema(schemaName), regularUser);
- } finally {
- revokeAll();
- }
- }
-
- @Test
- public void testAutomaticGrantDisabled() throws Throwable{
- testIndexAndView(false);
- }
-
- public void testIndexAndView(boolean isAutomaticGrant) throws Throwable {
- Configuration conf = new Configuration();
- conf.set(QueryServices.PHOENIX_AUTOMATIC_GRANT_ENABLED, Boolean.toString(isAutomaticGrant));
- startNewMiniCluster(conf);
- final String schema = "TEST_INDEX_VIEW";
- final String tableName = "TABLE_DDL_PERMISSION_IT";
- final String phoenixTableName = schema + "." + tableName;
- final String indexName1 = tableName + "_IDX1";
- final String indexName2 = tableName + "_IDX2";
- final String lIndexName1 = tableName + "_LIDX1";
- final String viewName1 = schema+"."+tableName + "_V1";
- final String viewName2 = schema+"."+tableName + "_V2";
- final String viewName3 = schema+"."+tableName + "_V3";
- final String viewName4 = schema+"."+tableName + "_V4";
- final String viewIndexName1 = tableName + "_VIDX1";
- final String viewIndexName2 = tableName + "_VIDX2";
- grantSystemTableAccess();
- try {
- superUser.doAs(new PrivilegedExceptionAction<Void>() {
- @Override
- public Void run() throws Exception {
- try {
- verifyAllowed(createSchema(schema), superUser);
- if (isNamespaceMapped) {
- grantPermissions(regularUser.getShortUserName(), schema, Action.CREATE);
- grantPermissions(AuthUtil.toGroupEntry(GROUP_SYSTEM_ACCESS), schema, Action.CREATE);
-
- } else {
- grantPermissions(regularUser.getShortUserName(),
- NamespaceDescriptor.DEFAULT_NAMESPACE.getName(), Action.CREATE);
- grantPermissions(AuthUtil.toGroupEntry(GROUP_SYSTEM_ACCESS),
- NamespaceDescriptor.DEFAULT_NAMESPACE.getName(), Action.CREATE);
-
- }
- } catch (Throwable e) {
- if (e instanceof Exception) {
- throw (Exception)e;
- } else {
- throw new Exception(e);
- }
- }
- return null;
- }
- });
-
- verifyAllowed(createTable(phoenixTableName), regularUser);
- verifyAllowed(createIndex(indexName1, phoenixTableName), regularUser);
- verifyAllowed(createView(viewName1, phoenixTableName), regularUser);
- verifyAllowed(createLocalIndex(lIndexName1, phoenixTableName), regularUser);
- verifyAllowed(createIndex(viewIndexName1, viewName1), regularUser);
- verifyAllowed(createIndex(viewIndexName2, viewName1), regularUser);
- verifyAllowed(createView(viewName4, viewName1), regularUser);
- verifyAllowed(readTable(phoenixTableName), regularUser);
-
- verifyDenied(createIndex(indexName2, phoenixTableName), unprivilegedUser);
- verifyDenied(createView(viewName2, phoenixTableName), unprivilegedUser);
- verifyDenied(createView(viewName3, viewName1), unprivilegedUser);
- verifyDenied(dropView(viewName1), unprivilegedUser);
-
- verifyDenied(dropIndex(indexName1, phoenixTableName), unprivilegedUser);
- verifyDenied(dropTable(phoenixTableName), unprivilegedUser);
- verifyDenied(rebuildIndex(indexName1, phoenixTableName), unprivilegedUser);
- verifyDenied(addColumn(phoenixTableName, "val1"), unprivilegedUser);
- verifyDenied(dropColumn(phoenixTableName, "val"), unprivilegedUser);
- verifyDenied(addProperties(phoenixTableName, "GUIDE_POSTS_WIDTH", "100"), unprivilegedUser);
-
- // Granting read permission to unprivileged user, now he should be able to create view but not index
- grantPermissions(unprivilegedUser.getShortUserName(),
- Collections.singleton(
- SchemaUtil.getPhysicalHBaseTableName(schema, tableName, isNamespaceMapped).getString()),
- Action.READ, Action.EXEC);
- grantPermissions(AuthUtil.toGroupEntry(GROUP_SYSTEM_ACCESS),
- Collections.singleton(
- SchemaUtil.getPhysicalHBaseTableName(schema, tableName, isNamespaceMapped).getString()),
- Action.READ, Action.EXEC);
- verifyDenied(createIndex(indexName2, phoenixTableName), unprivilegedUser);
- if (!isAutomaticGrant) {
- // Automatic grant will read access for all indexes
- verifyDenied(createView(viewName2, phoenixTableName), unprivilegedUser);
-
- // Granting read permission to unprivileged user on index so that a new view can read a index as well,
- // now
- // he should be able to create view but not index
- grantPermissions(unprivilegedUser.getShortUserName(),
- Collections.singleton(SchemaUtil
- .getPhysicalHBaseTableName(schema, indexName1, isNamespaceMapped).getString()),
- Action.READ, Action.EXEC);
- verifyDenied(createView(viewName3, viewName1), unprivilegedUser);
- }
-
- verifyAllowed(createView(viewName2, phoenixTableName), unprivilegedUser);
-
- if (!isAutomaticGrant) {
- // Grant access to view index for parent view
- grantPermissions(unprivilegedUser.getShortUserName(),
- Collections.singleton(Bytes.toString(MetaDataUtil.getViewIndexPhysicalName(SchemaUtil
- .getPhysicalHBaseTableName(schema, tableName, isNamespaceMapped).getBytes()))),
- Action.READ, Action.EXEC);
- }
- verifyAllowed(createView(viewName3, viewName1), unprivilegedUser);
-
- // Grant create permission in namespace
- if (isNamespaceMapped) {
- grantPermissions(unprivilegedUser.getShortUserName(), schema, Action.CREATE);
- } else {
- grantPermissions(unprivilegedUser.getShortUserName(), NamespaceDescriptor.DEFAULT_NAMESPACE.getName(),
- Action.CREATE);
- }
- if (!isAutomaticGrant) {
- verifyDenied(createIndex(indexName2, phoenixTableName), unprivilegedUser);
- // Give user of data table access to index table which will be created by unprivilegedUser
- grantPermissions(regularUser.getShortUserName(),
- Collections.singleton(SchemaUtil
- .getPhysicalHBaseTableName(schema, indexName2, isNamespaceMapped).getString()),
- Action.WRITE);
- verifyDenied(createIndex(indexName2, phoenixTableName), unprivilegedUser);
- grantPermissions(regularUser.getShortUserName(),
- Collections.singleton(SchemaUtil
- .getPhysicalHBaseTableName(schema, indexName2, isNamespaceMapped).getString()),
- Action.WRITE, Action.READ, Action.CREATE, Action.EXEC, Action.ADMIN);
- }
- // we should be able to read the data from another index as well to which we have not given any access to
- // this user
- verifyAllowed(createIndex(indexName2, phoenixTableName), unprivilegedUser);
- verifyAllowed(readTable(phoenixTableName, indexName1), unprivilegedUser);
- verifyAllowed(readTable(phoenixTableName, indexName2), unprivilegedUser);
- verifyAllowed(rebuildIndex(indexName2, phoenixTableName), unprivilegedUser);
-
- // data table user should be able to read new index
- verifyAllowed(rebuildIndex(indexName2, phoenixTableName), regularUser);
- verifyAllowed(readTable(phoenixTableName, indexName2), regularUser);
-
- verifyAllowed(readTable(phoenixTableName), regularUser);
- verifyAllowed(rebuildIndex(indexName1, phoenixTableName), regularUser);
- verifyAllowed(addColumn(phoenixTableName, "val1"), regularUser);
- verifyAllowed(addProperties(phoenixTableName, "GUIDE_POSTS_WIDTH", "100"), regularUser);
- verifyAllowed(dropView(viewName1), regularUser);
- verifyAllowed(dropView(viewName2), regularUser);
- verifyAllowed(dropColumn(phoenixTableName, "val1"), regularUser);
- verifyAllowed(dropIndex(indexName2, phoenixTableName), regularUser);
- verifyAllowed(dropIndex(indexName1, phoenixTableName), regularUser);
- verifyAllowed(dropTable(phoenixTableName), regularUser);
-
- // check again with super users
- verifyAllowed(createTable(phoenixTableName), superUser2);
- verifyAllowed(createIndex(indexName1, phoenixTableName), superUser2);
- verifyAllowed(createView(viewName1, phoenixTableName), superUser2);
- verifyAllowed(readTable(phoenixTableName), superUser2);
- verifyAllowed(dropView(viewName1), superUser2);
- verifyAllowed(dropTable(phoenixTableName), superUser2);
-
- } finally {
- revokeAll();
- }
- }
-
-
- @Test
- public void testAutomaticGrantEnabled() throws Throwable{
- testIndexAndView(true);
- }
-
- private void revokeAll() throws IOException, Throwable {
- AccessControlClient.revoke(getUtility().getConnection(), AuthUtil.toGroupEntry(GROUP_SYSTEM_ACCESS),Action.values() );
- AccessControlClient.revoke(getUtility().getConnection(), regularUser.getShortUserName(),Action.values() );
- AccessControlClient.revoke(getUtility().getConnection(), unprivilegedUser.getShortUserName(),Action.values() );
-
- }
-
- protected void grantPermissions(String groupEntry, Action... actions) throws IOException, Throwable {
- AccessControlClient.grant(getUtility().getConnection(), groupEntry, actions);
- }
-
- private AccessTestAction dropTable(final String tableName) throws SQLException {
- return new AccessTestAction() {
- @Override
- public Object run() throws Exception {
- try (Connection conn = getConnection(); Statement stmt = conn.createStatement();) {
- assertFalse(stmt.execute("DROP TABLE IF EXISTS " + tableName));
- }
- return null;
- }
- };
-
- }
-
- private AccessTestAction createTable(final String tableName) throws SQLException {
- return new AccessTestAction() {
- @Override
- public Object run() throws Exception {
- try (Connection conn = getConnection(); Statement stmt = conn.createStatement();) {
- assertFalse(stmt.execute("CREATE TABLE " + tableName + "(pk INTEGER not null primary key, data VARCHAR,val integer)"));
- try (PreparedStatement pstmt = conn.prepareStatement("UPSERT INTO " + tableName + " values(?, ?, ?)")) {
- for (int i = 0; i < NUM_RECORDS; i++) {
- pstmt.setInt(1, i);
- pstmt.setString(2, Integer.toString(i));
- pstmt.setInt(3, i);
- assertEquals(1, pstmt.executeUpdate());
- }
- }
- conn.commit();
- }
- return null;
- }
- };
- }
-
- private AccessTestAction readTable(final String tableName) throws SQLException {
- return readTable(tableName,null);
- }
- private AccessTestAction readTable(final String tableName, final String indexName) throws SQLException {
- return new AccessTestAction() {
- @Override
- public Object run() throws Exception {
- try (Connection conn = getConnection(); Statement stmt = conn.createStatement()) {
- ResultSet rs = stmt.executeQuery("SELECT "+(indexName!=null?"/*+ INDEX("+tableName+" "+indexName+")*/":"")+" pk, data,val FROM " + tableName +" where data>='0'");
- assertNotNull(rs);
- int i = 0;
- while (rs.next()) {
- assertEquals(i, rs.getInt(1));
- assertEquals(Integer.toString(i), rs.getString(2));
- assertEquals(i, rs.getInt(3));
- i++;
- }
- assertEquals(NUM_RECORDS, i);
- }
- return null;
- }
- };
- }
-
- public static HBaseTestingUtility getUtility(){
- return testUtil;
- }
-
- private void grantPermissions(String toUser, Set<String> tablesToGrant, Action... actions) throws Throwable {
- for (String table : tablesToGrant) {
- AccessControlClient.grant(getUtility().getConnection(), TableName.valueOf(table), toUser, null, null,
- actions);
- }
- }
-
- private void grantPermissions(String toUser, String namespace, Action... actions) throws Throwable {
- AccessControlClient.grant(getUtility().getConnection(), namespace, toUser, actions);
- }
-
-
- private AccessTestAction dropColumn(final String tableName, final String columnName) throws SQLException {
- return new AccessTestAction() {
- @Override
- public Object run() throws Exception {
- try (Connection conn = getConnection(); Statement stmt = conn.createStatement();) {
- assertFalse(stmt.execute("ALTER TABLE " + tableName + " DROP COLUMN "+columnName));
- }
- return null;
- }
- };
- }
-
- private AccessTestAction addColumn(final String tableName, final String columnName) throws SQLException {
- return new AccessTestAction() {
- @Override
- public Object run() throws Exception {
- try (Connection conn = getConnection(); Statement stmt = conn.createStatement();) {
- assertFalse(stmt.execute("ALTER TABLE " + tableName + " ADD "+columnName+" varchar"));
- }
- return null;
- }
- };
- }
-
- private AccessTestAction addProperties(final String tableName, final String property, final String value)
- throws SQLException {
- return new AccessTestAction() {
- @Override
- public Object run() throws Exception {
- try (Connection conn = getConnection(); Statement stmt = conn.createStatement();) {
- assertFalse(stmt.execute("ALTER TABLE " + tableName + " SET " + property + "=" + value));
- }
- return null;
- }
- };
- }
-
- private AccessTestAction dropView(final String viewName) throws SQLException {
- return new AccessTestAction() {
- @Override
- public Object run() throws Exception {
- try (Connection conn = getConnection(); Statement stmt = conn.createStatement();) {
- assertFalse(stmt.execute("DROP VIEW " + viewName));
- }
- return null;
- }
- };
- }
-
- private AccessTestAction createView(final String viewName, final String dataTable) throws SQLException {
- return new AccessTestAction() {
- @Override
- public Object run() throws Exception {
- try (Connection conn = getConnection(); Statement stmt = conn.createStatement();) {
- assertFalse(stmt.execute("CREATE VIEW " + viewName + " AS SELECT * FROM " + dataTable));
- }
- return null;
- }
- };
- }
-
- private AccessTestAction createIndex(final String indexName, final String dataTable) throws SQLException {
- return new AccessTestAction() {
- @Override
- public Object run() throws Exception {
-
- try (Connection conn = getConnection(); Statement stmt = conn.createStatement();) {
- assertFalse(stmt.execute("CREATE INDEX " + indexName + " on " + dataTable + "(data)"));
- }
- return null;
- }
- };
- }
-
- private AccessTestAction createLocalIndex(final String indexName, final String dataTable) throws SQLException {
- return new AccessTestAction() {
- @Override
- public Object run() throws Exception {
-
- try (Connection conn = getConnection(); Statement stmt = conn.createStatement();) {
- assertFalse(stmt.execute("CREATE LOCAL INDEX " + indexName + " on " + dataTable + "(data)"));
- }
- return null;
- }
- };
- }
-
- private AccessTestAction dropIndex(final String indexName, final String dataTable) throws SQLException {
- return new AccessTestAction() {
- @Override
- public Object run() throws Exception {
- try (Connection conn = getConnection(); Statement stmt = conn.createStatement();) {
- assertFalse(stmt.execute("DROP INDEX " + indexName + " on " + dataTable));
- }
- return null;
- }
- };
- }
-
- private AccessTestAction createSchema(final String schemaName) throws SQLException {
- return new AccessTestAction() {
- @Override
- public Object run() throws Exception {
- if (isNamespaceMapped) {
- try (Connection conn = getConnection(); Statement stmt = conn.createStatement();) {
- assertFalse(stmt.execute("CREATE SCHEMA " + schemaName));
- }
- }
- return null;
- }
- };
- }
-
- private AccessTestAction dropSchema(final String schemaName) throws SQLException {
- return new AccessTestAction() {
- @Override
- public Object run() throws Exception {
- if (isNamespaceMapped) {
- try (Connection conn = getConnection(); Statement stmt = conn.createStatement();) {
- assertFalse(stmt.execute("DROP SCHEMA " + schemaName));
- }
- }
- return null;
- }
- };
- }
-
- private AccessTestAction rebuildIndex(final String indexName, final String dataTable) throws SQLException {
- return new AccessTestAction() {
- @Override
- public Object run() throws Exception {
- try (Connection conn = getConnection(); Statement stmt = conn.createStatement();) {
- assertFalse(stmt.execute("ALTER INDEX " + indexName + " on " + dataTable + " DISABLE"));
- assertFalse(stmt.execute("ALTER INDEX " + indexName + " on " + dataTable + " REBUILD"));
- }
- return null;
- }
- };
- }
-
- static interface AccessTestAction extends PrivilegedExceptionAction<Object> { }
-
- @After
- public void cleanup() throws Exception {
- if (null != testUtil) {
- testUtil.shutdownMiniCluster();
- testUtil = null;
- }
- }
-
- /** This fails only in case of ADE or empty list for any of the users. */
- private void verifyAllowed(AccessTestAction action, UserGroupInformation... users) throws Exception {
- for (UserGroupInformation user : users) {
- verifyAllowed(user, action);
- }
- }
-
- /** This passes only in case of ADE for all users. */
- private void verifyDenied(AccessTestAction action, UserGroupInformation... users) throws Exception {
- for (UserGroupInformation user : users) {
- verifyDenied(user, action);
- }
- }
-
- /** This fails only in case of ADE or empty list for any of the actions. */
- private void verifyAllowed(UserGroupInformation user, AccessTestAction... actions) throws Exception {
- for (AccessTestAction action : actions) {
- try {
- Object obj = user.doAs(action);
- if (obj != null && obj instanceof List<?>) {
- List<?> results = (List<?>) obj;
- if (results != null && results.isEmpty()) {
- fail("Empty non null results from action for user '" + user.getShortUserName() + "'");
- }
- }
- } catch (AccessDeniedException ade) {
- fail("Expected action to pass for user '" + user.getShortUserName() + "' but was denied");
- }
- }
- }
-
- /** This passes only in case of ADE for all actions. */
- private void verifyDenied(UserGroupInformation user, AccessTestAction... actions) throws Exception {
- for (AccessTestAction action : actions) {
- try {
- user.doAs(action);
- fail("Expected exception was not thrown for user '" + user.getShortUserName() + "'");
- } catch (IOException e) {
- fail("Expected exception was not thrown for user '" + user.getShortUserName() + "'");
- } catch (UndeclaredThrowableException ute) {
- Throwable ex = ute.getUndeclaredThrowable();
-
- if (ex instanceof PhoenixIOException) {
- if (ex.getCause() instanceof AccessDeniedException) {
- // expected result
- validateAccessDeniedException((AccessDeniedException) ex.getCause());
- return;
- }
- }
- }catch(RuntimeException ex){
- // This can occur while accessing tabledescriptors from client by the unprivileged user
- if (ex.getCause() instanceof AccessDeniedException) {
- // expected result
- validateAccessDeniedException((AccessDeniedException) ex.getCause());
- return;
- }
- }
- fail("Expected exception was not thrown for user '" + user.getShortUserName() + "'");
- }
- }
-
- private void validateAccessDeniedException(AccessDeniedException ade) {
- String msg = ade.getMessage();
- assertTrue("Exception contained unexpected message: '" + msg + "'",
- !msg.contains("is not the scanner owner"));
- }
-}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/5003ac30/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/RpcUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/RpcUtil.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/RpcUtil.java
deleted file mode 100644
index ac281f1..0000000
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/RpcUtil.java
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.ipc;
-
-import org.apache.hadoop.hbase.ipc.RpcServer.Call;
-
-public class RpcUtil {
-
- public static Call getRpcContext() {
- return RpcServer.CurCall.get();
- }
-
- public static void setRpcContext(Call c){
- RpcServer.CurCall.set(c);
- }
-
-}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/5003ac30/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseMetaDataEndpointObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseMetaDataEndpointObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseMetaDataEndpointObserver.java
deleted file mode 100644
index 8decc8c..0000000
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseMetaDataEndpointObserver.java
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.coprocessor;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.Set;
-
-import org.apache.hadoop.hbase.CoprocessorEnvironment;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.coprocessor.ObserverContext;
-import org.apache.phoenix.coprocessor.PhoenixMetaDataCoprocessorHost.PhoenixMetaDataControllerEnvironment;
-import org.apache.phoenix.schema.PIndexState;
-import org.apache.phoenix.schema.PTable;
-import org.apache.phoenix.schema.PTableType;
-
-public class BaseMetaDataEndpointObserver implements MetaDataEndpointObserver{
-
- @Override
- public void start(CoprocessorEnvironment env) throws IOException {
-
- }
-
- @Override
- public void stop(CoprocessorEnvironment env) throws IOException {
-
- }
-
- @Override
- public void preGetTable(
- org.apache.hadoop.hbase.coprocessor.ObserverContext<PhoenixMetaDataControllerEnvironment> ctx,
- String tenantId, String tableName, TableName physicalTableName) throws IOException {
-
- }
-
-
- @Override
- public void preCreateTable(ObserverContext<PhoenixMetaDataControllerEnvironment> ctx, String tenantId,
- String tableName, TableName physicalTableName, TableName parentPhysicalTableName, PTableType tableType,
- Set<byte[]> familySet, Set<TableName> indexes) throws IOException {
-
- }
-
- @Override
- public void preDropTable(ObserverContext<PhoenixMetaDataControllerEnvironment> ctx, String tenantId,
- String tableName, TableName physicalTableName, TableName parentPhysicalTableName, PTableType tableType,
- List<PTable> indexes) throws IOException {
-
- }
-
- @Override
- public void preAlterTable(ObserverContext<PhoenixMetaDataControllerEnvironment> ctx, String tenantId,
- String tableName, TableName physicalTableName, TableName parentPhysicalTableName, PTableType type) throws IOException {
-
- }
-
- @Override
- public void preGetSchema(ObserverContext<PhoenixMetaDataControllerEnvironment> ctx, String schemaName)
- throws IOException {
-
- }
-
- @Override
- public void preCreateSchema(ObserverContext<PhoenixMetaDataControllerEnvironment> ctx, String schemaName)
- throws IOException {
-
- }
-
- @Override
- public void preDropSchema(ObserverContext<PhoenixMetaDataControllerEnvironment> ctx, String schemaName) throws IOException {
-
- }
-
- @Override
- public void preCreateFunction(ObserverContext<PhoenixMetaDataControllerEnvironment> ctx, String tenantId,
- String functionName) throws IOException {
-
- }
-
- @Override
- public void preDropFunction(ObserverContext<PhoenixMetaDataControllerEnvironment> ctx, String tenantId, String functionName)
- throws IOException {}
-
- @Override
- public void preGetFunctions(ObserverContext<PhoenixMetaDataControllerEnvironment> ctx, String tenantId, String functionName)
- throws IOException {
-
- }
-
- @Override
- public void preIndexUpdate(ObserverContext<PhoenixMetaDataControllerEnvironment> ctx, String tenantId,
- String indexName, TableName physicalTableName, TableName parentPhysicalTableName, PIndexState newState)
- throws IOException {
-
- }
-}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/5003ac30/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index 026a516..43c885a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -84,7 +84,6 @@ import static org.apache.phoenix.util.SchemaUtil.getVarCharLength;
import static org.apache.phoenix.util.SchemaUtil.getVarChars;
import java.io.IOException;
-import java.security.PrivilegedExceptionAction;
import java.sql.ResultSetMetaData;
import java.sql.SQLException;
import java.sql.Statement;
@@ -92,12 +91,10 @@ import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
-import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.NavigableMap;
-import java.util.Set;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
@@ -108,7 +105,6 @@ import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValue.Type;
-import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HTableInterface;
@@ -125,12 +121,9 @@ import org.apache.hadoop.hbase.filter.FilterList;
import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.ipc.RpcServer.Call;
-import org.apache.hadoop.hbase.ipc.RpcUtil;
import org.apache.hadoop.hbase.regionserver.Region;
import org.apache.hadoop.hbase.regionserver.Region.RowLock;
import org.apache.hadoop.hbase.regionserver.RegionScanner;
-import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.VersionInfo;
@@ -459,7 +452,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
private static final int DEFAULT_VALUE_INDEX = FUNCTION_ARG_KV_COLUMNS.indexOf(DEFAULT_VALUE_KV);
private static final int MIN_VALUE_INDEX = FUNCTION_ARG_KV_COLUMNS.indexOf(MIN_VALUE_KV);
private static final int MAX_VALUE_INDEX = FUNCTION_ARG_KV_COLUMNS.indexOf(MAX_VALUE_KV);
-
+
private static PName newPName(byte[] keyBuffer, int keyOffset, int keyLength) {
if (keyLength <= 0) {
return null;
@@ -470,9 +463,6 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
private RegionCoprocessorEnvironment env;
- private PhoenixMetaDataCoprocessorHost phoenixAccessCoprocessorHost;
- private boolean accessCheckEnabled;
-
/**
* Stores a reference to the coprocessor environment provided by the
* {@link org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost} from the region where this
@@ -490,10 +480,6 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
} else {
throw new CoprocessorException("Must be loaded on a table region!");
}
-
- phoenixAccessCoprocessorHost = new PhoenixMetaDataCoprocessorHost(this.env);
- this.accessCheckEnabled = env.getConfiguration().getBoolean(QueryServices.PHOENIX_ACLS_ENABLED,
- QueryServicesOptions.DEFAULT_PHOENIX_ACLS_ENABLED);
logger.info("Starting Tracing-Metrics Systems");
// Start the phoenix trace collection
Tracing.addTraceMetricsSource();
@@ -537,9 +523,6 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
done.run(builder.build());
return;
}
- getCoprocessorHost().preGetTable(Bytes.toString(tenantId), SchemaUtil.getTableName(schemaName, tableName),
- TableName.valueOf(table.getPhysicalName().getBytes()));
-
builder.setReturnCode(MetaDataProtos.MutationCode.TABLE_ALREADY_EXISTS);
long disableIndexTimestamp = table.getIndexDisableTimestamp();
long minNonZerodisableIndexTimestamp = disableIndexTimestamp > 0 ? disableIndexTimestamp : Long.MAX_VALUE;
@@ -571,10 +554,6 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
}
}
- private PhoenixMetaDataCoprocessorHost getCoprocessorHost() {
- return phoenixAccessCoprocessorHost;
- }
-
private PTable buildTable(byte[] key, ImmutableBytesPtr cacheKey, Region region,
long clientTimeStamp, int clientVersion) throws IOException, SQLException {
Scan scan = MetaDataUtil.newTableRowsScan(key, MIN_TABLE_TIMESTAMP, clientTimeStamp);
@@ -1338,14 +1317,12 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
* @return null if the physical table row information is not present.
*
*/
- private static Mutation getPhysicalTableRowForView(List<Mutation> tableMetadata, byte[][] parentTenantSchemaTableNames, byte[][] physicalSchemaTableNames) {
+ private static Mutation getPhysicalTableForView(List<Mutation> tableMetadata, byte[][] parentSchemaTableNames) {
int size = tableMetadata.size();
byte[][] rowKeyMetaData = new byte[3][];
MetaDataUtil.getTenantIdAndSchemaAndTableName(tableMetadata, rowKeyMetaData);
Mutation physicalTableRow = null;
- Mutation parentTableRow = null;
boolean physicalTableLinkFound = false;
- boolean parentTableLinkFound = false;
if (size >= 2) {
int i = size - 1;
while (i >= 1) {
@@ -1355,51 +1332,28 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
if (linkType == LinkType.PHYSICAL_TABLE) {
physicalTableRow = m;
physicalTableLinkFound = true;
- }
- if (linkType == LinkType.PARENT_TABLE) {
- parentTableRow=m;
- parentTableLinkFound = true;
+ break;
}
}
- if(physicalTableLinkFound && parentTableLinkFound){
- break;
- }
i--;
}
}
- if (!parentTableLinkFound) {
- parentTenantSchemaTableNames[0] = null;
- parentTenantSchemaTableNames[1] = null;
- parentTenantSchemaTableNames[2] = null;
-
- }
if (!physicalTableLinkFound) {
- physicalSchemaTableNames[0] = null;
- physicalSchemaTableNames[1] = null;
- physicalSchemaTableNames[2] = null;
- }
- if (physicalTableLinkFound) {
- getSchemaTableNames(physicalTableRow,physicalSchemaTableNames);
- }
- if (parentTableLinkFound) {
- getSchemaTableNames(parentTableRow,parentTenantSchemaTableNames);
+ parentSchemaTableNames[0] = null;
+ parentSchemaTableNames[1] = null;
+ return null;
}
- return physicalTableRow;
- }
-
- private static void getSchemaTableNames(Mutation row, byte[][] schemaTableNames) {
- byte[][] rowKeyMetaData = new byte[5][];
- getVarChars(row.getRow(), 5, rowKeyMetaData);
- byte[] tenantId = rowKeyMetaData[PhoenixDatabaseMetaData.TENANT_ID_INDEX];
+ rowKeyMetaData = new byte[5][];
+ getVarChars(physicalTableRow.getRow(), 5, rowKeyMetaData);
byte[] colBytes = rowKeyMetaData[PhoenixDatabaseMetaData.COLUMN_NAME_INDEX];
byte[] famBytes = rowKeyMetaData[PhoenixDatabaseMetaData.FAMILY_NAME_INDEX];
if ((colBytes == null || colBytes.length == 0) && (famBytes != null && famBytes.length > 0)) {
byte[] sName = SchemaUtil.getSchemaNameFromFullName(famBytes).getBytes();
byte[] tName = SchemaUtil.getTableNameFromFullName(famBytes).getBytes();
- schemaTableNames[0]= tenantId;
- schemaTableNames[1] = sName;
- schemaTableNames[2] = tName;
+ parentSchemaTableNames[0] = sName;
+ parentSchemaTableNames[1] = tName;
}
+ return physicalTableRow;
}
@Override
@@ -1416,76 +1370,25 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
byte[] tenantIdBytes = rowKeyMetaData[PhoenixDatabaseMetaData.TENANT_ID_INDEX];
schemaName = rowKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX];
tableName = rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX];
- boolean isNamespaceMapped = MetaDataUtil.isNameSpaceMapped(tableMetadata, GenericKeyValueBuilder.INSTANCE,
- new ImmutableBytesWritable());
- final IndexType indexType = MetaDataUtil.getIndexType(tableMetadata, GenericKeyValueBuilder.INSTANCE,
- new ImmutableBytesWritable());
+
byte[] parentSchemaName = null;
byte[] parentTableName = null;
PTableType tableType = MetaDataUtil.getTableType(tableMetadata, GenericKeyValueBuilder.INSTANCE, new ImmutableBytesWritable());
byte[] parentTableKey = null;
Mutation viewPhysicalTableRow = null;
- Set<TableName> indexes = new HashSet<TableName>();;
- byte[] cPhysicalName = SchemaUtil.getPhysicalHBaseTableName(schemaName, tableName, isNamespaceMapped)
- .getBytes();
- byte[] cParentPhysicalName=null;
if (tableType == PTableType.VIEW) {
- byte[][] parentSchemaTableNames = new byte[3][];
- byte[][] parentPhysicalSchemaTableNames = new byte[3][];
+ byte[][] parentSchemaTableNames = new byte[2][];
/*
* For a view, we lock the base physical table row. For a mapped view, there is
* no link present to the physical table. So the viewPhysicalTableRow is null
* in that case.
*/
-
- viewPhysicalTableRow = getPhysicalTableRowForView(tableMetadata, parentSchemaTableNames,parentPhysicalSchemaTableNames);
- long clientTimeStamp = MetaDataUtil.getClientTimeStamp(tableMetadata);
- if (parentPhysicalSchemaTableNames[2] != null) {
-
- parentTableKey = SchemaUtil.getTableKey(ByteUtil.EMPTY_BYTE_ARRAY,
- parentPhysicalSchemaTableNames[1], parentPhysicalSchemaTableNames[2]);
- PTable parentTable = loadTable(env, parentTableKey, new ImmutableBytesPtr(parentTableKey),
- clientTimeStamp, clientTimeStamp, clientVersion);
- if (parentTable == null) {
- builder.setReturnCode(MetaDataProtos.MutationCode.PARENT_TABLE_NOT_FOUND);
- builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
- done.run(builder.build());
- return;
- }
- cParentPhysicalName = parentTable.getPhysicalName().getBytes();
- if (parentSchemaTableNames[2] != null
- && Bytes.compareTo(parentSchemaTableNames[2], parentPhysicalSchemaTableNames[2]) != 0) {
- // if view is created on view
- byte[] parentKey = SchemaUtil.getTableKey(
- parentSchemaTableNames[0] == null ? ByteUtil.EMPTY_BYTE_ARRAY : parentSchemaTableNames[0],
- parentSchemaTableNames[1], parentSchemaTableNames[2]);
- parentTable = loadTable(env, parentKey, new ImmutableBytesPtr(parentKey),
- clientTimeStamp, clientTimeStamp, clientVersion);
- if (parentTable == null) {
- // it could be a global view
- parentKey = SchemaUtil.getTableKey(ByteUtil.EMPTY_BYTE_ARRAY,
- parentSchemaTableNames[1], parentSchemaTableNames[2]);
- parentTable = loadTable(env, parentKey, new ImmutableBytesPtr(parentKey),
- clientTimeStamp, clientTimeStamp, clientVersion);
- }
- }
- if (parentTable == null) {
- builder.setReturnCode(MetaDataProtos.MutationCode.PARENT_TABLE_NOT_FOUND);
- builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
- done.run(builder.build());
- return;
- }
- for (PTable index : parentTable.getIndexes()) {
- indexes.add(TableName.valueOf(index.getPhysicalName().getBytes()));
- }
-
- } else {
- // Mapped View
- cParentPhysicalName = SchemaUtil.getTableNameAsBytes(schemaName, tableName);
+ viewPhysicalTableRow = getPhysicalTableForView(tableMetadata, parentSchemaTableNames);
+ parentSchemaName = parentSchemaTableNames[0];
+ parentTableName = parentSchemaTableNames[1];
+ if (parentTableName != null) {
+ parentTableKey = SchemaUtil.getTableKey(ByteUtil.EMPTY_BYTE_ARRAY, parentSchemaName, parentTableName);
}
- parentSchemaName = parentPhysicalSchemaTableNames[1];
- parentTableName = parentPhysicalSchemaTableNames[2];
-
} else if (tableType == PTableType.INDEX) {
parentSchemaName = schemaName;
/*
@@ -1495,27 +1398,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
*/
parentTableName = MetaDataUtil.getParentTableName(tableMetadata);
parentTableKey = SchemaUtil.getTableKey(tenantIdBytes, parentSchemaName, parentTableName);
- long clientTimeStamp = MetaDataUtil.getClientTimeStamp(tableMetadata);
- PTable parentTable = loadTable(env, parentTableKey, new ImmutableBytesPtr(parentTableKey),
- clientTimeStamp, clientTimeStamp, clientVersion);
- if (IndexType.LOCAL == indexType) {
- cPhysicalName = parentTable.getPhysicalName().getBytes();
- cParentPhysicalName=parentTable.getPhysicalName().getBytes();
- } else if (parentTable.getType() == PTableType.VIEW) {
- cPhysicalName = MetaDataUtil.getViewIndexPhysicalName(parentTable.getPhysicalName().getBytes());
- cParentPhysicalName = parentTable.getPhysicalName().getBytes();
- }else{
- cParentPhysicalName = SchemaUtil
- .getPhysicalHBaseTableName(parentSchemaName, parentTableName, isNamespaceMapped).getBytes();
- }
}
-
- getCoprocessorHost().preCreateTable(Bytes.toString(tenantIdBytes),
- SchemaUtil.getTableName(schemaName, tableName),
- (tableType == PTableType.VIEW) ? null : TableName.valueOf(cPhysicalName),
- cParentPhysicalName == null ? null : TableName.valueOf(cParentPhysicalName), tableType,
- /* TODO: During inital create we may not need the family map */
- Collections.<byte[]> emptySet(), indexes);
Region region = env.getRegion();
List<RowLock> locks = Lists.newArrayList();
@@ -1730,7 +1613,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
// primary and then index table locks are held, in that order). For now, we just don't support
// indexing on the system table. This is an issue because of the way we manage batch mutation
// in the Indexer.
- mutateRowsWithLocks(region, tableMetadata, Collections.<byte[]> emptySet(), HConstants.NO_NONCE, HConstants.NO_NONCE);
+ region.mutateRowsWithLocks(tableMetadata, Collections.<byte[]> emptySet(), HConstants.NO_NONCE, HConstants.NO_NONCE);
// Invalidate the cache - the next getTable call will add it
// TODO: consider loading the table that was just created here, patching up the parent table, and updating the cache
@@ -1749,7 +1632,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
done.run(builder.build());
return;
} finally {
- releaseRowLocks(region,locks);
+ region.releaseRowLocks(locks);
}
} catch (Throwable t) {
logger.error("createTable failed", t);
@@ -1765,6 +1648,15 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
QueryServicesOptions.DEFAULT_MAX_INDEXES_PER_TABLE);
}
+ private static RowLock acquireLock(Region region, byte[] key, List<RowLock> locks)
+ throws IOException {
+ RowLock rowLock = region.getRowLock(key, true);
+ if (rowLock == null) {
+ throw new IOException("Failed to acquire lock on " + Bytes.toStringBinary(key));
+ }
+ locks.add(rowLock);
+ return rowLock;
+ }
private static final byte[] CHILD_TABLE_BYTES = new byte[] {PTable.LinkType.CHILD_TABLE.getSerializedValue()};
@@ -1954,23 +1846,6 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
parentTableName == null ? lockKey : SchemaUtil.getTableKey(tenantIdBytes,
schemaName, tableName);
-
- PTableType ptableType=PTableType.fromSerializedValue(tableType);
- long clientTimeStamp = MetaDataUtil.getClientTimeStamp(tableMetadata);
- byte[] cKey = SchemaUtil.getTableKey(tenantIdBytes, schemaName, tableName);
- PTable loadedTable = loadTable(env, cKey, new ImmutableBytesPtr(cKey), clientTimeStamp, clientTimeStamp,
- request.getClientVersion());
- if (loadedTable == null) {
- builder.setReturnCode(MetaDataProtos.MutationCode.TABLE_NOT_FOUND);
- builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
- done.run(builder.build());
- return;
- }
- getCoprocessorHost().preDropTable(Bytes.toString(tenantIdBytes),
- SchemaUtil.getTableName(schemaName, tableName),
- TableName.valueOf(loadedTable.getPhysicalName().getBytes()),
- getParentPhysicalTableName(loadedTable), ptableType,loadedTable.getIndexes());
-
Region region = env.getRegion();
MetaDataMutationResult result = checkTableKeyInRegion(key, region);
if (result != null) {
@@ -1995,7 +1870,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
}
Cache<ImmutableBytesPtr,PMetaDataEntity> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
// Commit the list of deletion.
- mutateRowsWithLocks(region, tableMetadata, Collections.<byte[]> emptySet(), HConstants.NO_NONCE,
+ region.mutateRowsWithLocks(tableMetadata, Collections.<byte[]> emptySet(), HConstants.NO_NONCE,
HConstants.NO_NONCE);
long currentTime = MetaDataUtil.getClientTimeStamp(tableMetadata);
for (ImmutableBytesPtr ckey : invalidateList) {
@@ -2008,7 +1883,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
done.run(MetaDataMutationResult.toProto(result));
return;
} finally {
- releaseRowLocks(region,locks);
+ region.releaseRowLocks(locks);
}
} catch (Throwable t) {
logger.error("dropTable failed", t);
@@ -2016,24 +1891,6 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
ServerUtil.createIOException(SchemaUtil.getTableName(schemaName, tableName), t));
}
}
-
- protected void releaseRowLocks(Region region, List<RowLock> locks) {
- if (locks != null) {
- region.releaseRowLocks(locks);
- }
- }
-
- private RowLock acquireLock(Region region, byte[] lockKey, List<RowLock> locks) throws IOException {
- //LockManager.RowLock rowLock = lockManager.lockRow(lockKey, rowLockWaitDuration);
- RowLock rowLock = region.getRowLock(lockKey, false);
- if (rowLock == null) {
- throw new IOException("Failed to acquire lock on " + Bytes.toStringBinary(lockKey));
- }
- if (locks != null) {
- locks.add(rowLock);
- }
- return rowLock;
- }
private MetaDataMutationResult doDropTable(byte[] key, byte[] tenantId, byte[] schemaName,
byte[] tableName, byte[] parentTableName, PTableType tableType, List<Mutation> rowsToDelete,
@@ -2236,15 +2093,18 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
EnvironmentEdgeManager.currentTimeMillis(), null);
}
if (table.getTimeStamp() >= clientTimeStamp) {
- logger.info("Found newer table as of " + table.getTimeStamp() + " versus client timestamp of "
- + clientTimeStamp);
+ logger.info("Found newer table as of " + table.getTimeStamp() + " versus client timestamp of " + clientTimeStamp);
return new MetaDataMutationResult(MutationCode.NEWER_TABLE_FOUND,
EnvironmentEdgeManager.currentTimeMillis(), table);
- } else if (isTableDeleted(table)) { return new MetaDataMutationResult(MutationCode.TABLE_NOT_FOUND,
- EnvironmentEdgeManager.currentTimeMillis(), null); }
- long expectedSeqNum = MetaDataUtil.getSequenceNumber(tableMetadata) - 1; // lookup TABLE_SEQ_NUM in
- // tableMetaData
+ } else if (isTableDeleted(table)) {
+ return new MetaDataMutationResult(MutationCode.TABLE_NOT_FOUND,
+ EnvironmentEdgeManager.currentTimeMillis(), null);
+ }
+ long expectedSeqNum = MetaDataUtil.getSequenceNumber(tableMetadata) - 1; // lookup
+ // TABLE_SEQ_NUM
+ // in
+ // tableMetaData
if (logger.isDebugEnabled()) {
logger.debug("For table " + Bytes.toStringBinary(key) + " expecting seqNum "
+ expectedSeqNum + " and found seqNum " + table.getSequenceNumber()
@@ -2279,7 +2139,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
if (result != null && result.getMutationCode()!=MutationCode.TABLE_ALREADY_EXISTS) {
return result;
}
- mutateRowsWithLocks(region, tableMetadata, Collections.<byte[]> emptySet(), HConstants.NO_NONCE, HConstants.NO_NONCE);
+ region.mutateRowsWithLocks(tableMetadata, Collections.<byte[]> emptySet(), HConstants.NO_NONCE, HConstants.NO_NONCE);
// Invalidate from cache
for (ImmutableBytesPtr invalidateKey : invalidateList) {
metaDataCache.invalidate(invalidateKey);
@@ -2295,7 +2155,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
return new MetaDataMutationResult(MutationCode.TABLE_ALREADY_EXISTS, currentTime, table);
}
} finally {
- releaseRowLocks(region,locks);
+ region.releaseRowLocks(locks);
}
} catch (Throwable t) {
ServerUtil.throwIOException(SchemaUtil.getTableName(schemaName, tableName), t);
@@ -3111,11 +2971,6 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
PTableType type = table.getType();
byte[] tableHeaderRowKey = SchemaUtil.getTableKey(tenantId,
schemaName, tableName);
- byte[] cPhysicalTableName=table.getPhysicalName().getBytes();
- getCoprocessorHost().preAlterTable(Bytes.toString(tenantId),
- SchemaUtil.getTableName(schemaName, tableName), TableName.valueOf(cPhysicalTableName),
- getParentPhysicalTableName(table),type);
-
// Size for worst case - all new columns are PK column
List<Mutation> mutationsForAddingColumnsToViews = Lists.newArrayListWithExpectedSize(tableMetaData.size() * ( 1 + table.getIndexes().size()));
if (type == PTableType.TABLE || type == PTableType.SYSTEM) {
@@ -3269,7 +3124,10 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
boolean blockWriteRebuildIndex = env.getConfiguration().getBoolean(QueryServices.INDEX_FAILURE_BLOCK_WRITE,
QueryServicesOptions.DEFAULT_INDEX_FAILURE_BLOCK_WRITE);
if (!wasLocked) {
- rowLock = acquireLock(region, key, null);
+ rowLock = region.getRowLock(key, true);
+ if (rowLock == null) {
+ throw new IOException("Failed to acquire lock on " + Bytes.toStringBinary(key));
+ }
}
try {
PTable table = (PTable)metaDataCache.getIfPresent(cacheKey);
@@ -3326,10 +3184,16 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
* Lock directly on key, though it may be an index table. This will just prevent a table
* from getting rebuilt too often.
*/
- List<RowLock> rowLocks = new ArrayList<RowLock>(keys.size());
+ List<RowLock> rowLocks = new ArrayList<Region.RowLock>(keys.size());;
try {
+ rowLocks = new ArrayList<Region.RowLock>(keys.size());
for (int i = 0; i < keys.size(); i++) {
- acquireLock(region, keys.get(i), rowLocks);
+ Region.RowLock rowLock = region.getRowLock(keys.get(i), true);
+ if (rowLock == null) {
+ throw new IOException("Failed to acquire lock on "
+ + Bytes.toStringBinary(keys.get(i)));
+ }
+ rowLocks.add(rowLock);
}
List<PFunction> functionsAvailable = new ArrayList<PFunction>(keys.size());
@@ -3359,7 +3223,10 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
if(functionsAvailable.size() == numFunctions) return functionsAvailable;
return null;
} finally {
- releaseRowLocks(region,rowLocks);
+ for (Region.RowLock lock : rowLocks) {
+ lock.release();
+ }
+ rowLocks.clear();
}
}
@@ -3381,11 +3248,6 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
byte[] schemaName = rowKeyMetaData[SCHEMA_NAME_INDEX];
byte[] tableName = rowKeyMetaData[TABLE_NAME_INDEX];
boolean deletePKColumn = false;
- getCoprocessorHost().preAlterTable(Bytes.toString(tenantId),
- SchemaUtil.getTableName(schemaName, tableName),
- TableName.valueOf(table.getPhysicalName().getBytes()),
- getParentPhysicalTableName(table),table.getType());
-
List<Mutation> additionalTableMetaData = Lists.newArrayList();
PTableType type = table.getType();
@@ -3618,7 +3480,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
}
PIndexState newState =
PIndexState.fromSerializedValue(newKV.getValueArray()[newKV.getValueOffset()]);
- RowLock rowLock = acquireLock(region, key, null);
+ RowLock rowLock = region.getRowLock(key, true);
if (rowLock == null) {
throw new IOException("Failed to acquire lock on " + Bytes.toStringBinary(key));
}
@@ -3640,22 +3502,6 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
Cell currentDisableTimeStamp = currentResult.getColumnLatestCell(TABLE_FAMILY_BYTES, INDEX_DISABLE_TIMESTAMP_BYTES);
boolean rowKeyOrderOptimizable = currentResult.getColumnLatestCell(TABLE_FAMILY_BYTES, ROW_KEY_ORDER_OPTIMIZABLE_BYTES) != null;
- //check permission on data table
- long clientTimeStamp = MetaDataUtil.getClientTimeStamp(tableMetadata);
- PTable loadedTable = loadTable(env, key, new ImmutableBytesPtr(key), clientTimeStamp, clientTimeStamp,
- request.getClientVersion());
- if (loadedTable == null) {
- builder.setReturnCode(MetaDataProtos.MutationCode.TABLE_NOT_FOUND);
- builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
- done.run(builder.build());
- return;
- }
- getCoprocessorHost().preIndexUpdate(Bytes.toString(tenantId),
- SchemaUtil.getTableName(schemaName, tableName),
- TableName.valueOf(loadedTable.getPhysicalName().getBytes()),
- getParentPhysicalTableName(loadedTable),
- newState);
-
PIndexState currentState =
PIndexState.fromSerializedValue(currentStateKV.getValueArray()[currentStateKV
.getValueOffset()]);
@@ -3765,7 +3611,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
if (setRowKeyOrderOptimizableCell) {
UpgradeUtil.addRowKeyOrderOptimizableCell(tableMetadata, key, timeStamp);
}
- mutateRowsWithLocks(region, tableMetadata, Collections.<byte[]> emptySet(), HConstants.NO_NONCE,
+ region.mutateRowsWithLocks(tableMetadata, Collections.<byte[]> emptySet(), HConstants.NO_NONCE,
HConstants.NO_NONCE);
// Invalidate from cache
Cache<ImmutableBytesPtr,PMetaDataEntity> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
@@ -3926,7 +3772,6 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
long clientTimeStamp = request.getClientTimestamp();
List<RowLock> locks = Lists.newArrayList();
try {
- getCoprocessorHost().preGetSchema(schemaName);
acquireLock(region, lockKey, locks);
// Get as of latest timestamp so we can detect if we have a
// newer schema that already
@@ -3957,7 +3802,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
done.run(builder.build());
return;
} finally {
- releaseRowLocks(region,locks);
+ region.releaseRowLocks(locks);
}
}
@@ -4060,7 +3905,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
}
// Don't store function info for temporary functions.
if(!temporaryFunction) {
- mutateRowsWithLocks(region, functionMetaData, Collections.<byte[]> emptySet(), HConstants.NO_NONCE, HConstants.NO_NONCE);
+ region.mutateRowsWithLocks(functionMetaData, Collections.<byte[]> emptySet(), HConstants.NO_NONCE, HConstants.NO_NONCE);
}
// Invalidate the cache - the next getFunction call will add it
@@ -4074,7 +3919,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
done.run(builder.build());
return;
} finally {
- releaseRowLocks(region,locks);
+ region.releaseRowLocks(locks);
}
} catch (Throwable t) {
logger.error("createFunction failed", t);
@@ -4113,7 +3958,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
done.run(MetaDataMutationResult.toProto(result));
return;
}
- mutateRowsWithLocks(region, functionMetaData, Collections.<byte[]> emptySet(), HConstants.NO_NONCE, HConstants.NO_NONCE);
+ region.mutateRowsWithLocks(functionMetaData, Collections.<byte[]> emptySet(), HConstants.NO_NONCE, HConstants.NO_NONCE);
Cache<ImmutableBytesPtr,PMetaDataEntity> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
long currentTime = MetaDataUtil.getClientTimeStamp(functionMetaData);
@@ -4126,7 +3971,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
done.run(MetaDataMutationResult.toProto(result));
return;
} finally {
- releaseRowLocks(region,locks);
+ region.releaseRowLocks(locks);
}
} catch (Throwable t) {
logger.error("dropFunction failed", t);
@@ -4223,7 +4068,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
return;
}
}
- mutateRowsWithLocks(region, schemaMutations, Collections.<byte[]> emptySet(), HConstants.NO_NONCE,
+ region.mutateRowsWithLocks(schemaMutations, Collections.<byte[]> emptySet(), HConstants.NO_NONCE,
HConstants.NO_NONCE);
// Invalidate the cache - the next getSchema call will add it
@@ -4241,7 +4086,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
done.run(builder.build());
return;
} finally {
- releaseRowLocks(region,locks);
+ region.releaseRowLocks(locks);
}
} catch (Throwable t) {
logger.error("Creating the schema" + schemaName + "failed", t);
@@ -4255,7 +4100,6 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
try {
List<Mutation> schemaMetaData = ProtobufUtil.getMutations(request);
schemaName = request.getSchemaName();
- getCoprocessorHost().preDropSchema(schemaName);
byte[] lockKey = SchemaUtil.getSchemaKey(schemaName);
Region region = env.getRegion();
MetaDataMutationResult result = checkSchemaKeyInRegion(lockKey, region);
@@ -4273,7 +4117,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
done.run(MetaDataMutationResult.toProto(result));
return;
}
- mutateRowsWithLocks(region, schemaMetaData, Collections.<byte[]> emptySet(), HConstants.NO_NONCE,
+ region.mutateRowsWithLocks(schemaMetaData, Collections.<byte[]> emptySet(), HConstants.NO_NONCE,
HConstants.NO_NONCE);
Cache<ImmutableBytesPtr, PMetaDataEntity> metaDataCache = GlobalCache.getInstance(this.env)
.getMetaDataCache();
@@ -4285,7 +4129,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
done.run(MetaDataMutationResult.toProto(result));
return;
} finally {
- releaseRowLocks(region,locks);
+ region.releaseRowLocks(locks);
}
} catch (Throwable t) {
logger.error("drop schema failed:", t);
@@ -4331,48 +4175,4 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
null);
}
-
- private void mutateRowsWithLocks(final Region region, final List<Mutation> mutations, final Set<byte[]> rowsToLock,
- final long nonceGroup, final long nonce) throws IOException {
- // we need to mutate SYSTEM.CATALOG with HBase/login user if access is enabled.
- if (this.accessCheckEnabled) {
- User.runAsLoginUser(new PrivilegedExceptionAction<Void>() {
- @Override
- public Void run() throws Exception {
- final Call rpcContext = RpcUtil.getRpcContext();
- // Setting RPC context as null so that user can be resetted
- try {
- RpcUtil.setRpcContext(null);
- region.mutateRowsWithLocks(mutations, rowsToLock, nonceGroup, nonce);
- } catch (Throwable e) {
- throw new IOException(e);
- } finally {
- // Setting RPC context back to original context of the RPC
- RpcUtil.setRpcContext(rpcContext);
- }
- return null;
- }
- });
- } else {
- region.mutateRowsWithLocks(mutations, rowsToLock, nonceGroup, nonce);
- }
- }
-
- private TableName getParentPhysicalTableName(PTable table) {
- return table
- .getType() == PTableType.VIEW
- ? TableName.valueOf(table.getPhysicalName().getBytes())
- : table.getType() == PTableType.INDEX
- ? TableName
- .valueOf(SchemaUtil
- .getPhysicalHBaseTableName(table.getParentSchemaName(),
- table.getParentTableName(), table.isNamespaceMapped())
- .getBytes())
- : TableName
- .valueOf(
- SchemaUtil
- .getPhysicalHBaseTableName(table.getSchemaName(),
- table.getTableName(), table.isNamespaceMapped())
- .getBytes());
- }
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/5003ac30/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointObserver.java
deleted file mode 100644
index 86b8bf1..0000000
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointObserver.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.coprocessor;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.Set;
-
-import org.apache.hadoop.hbase.Coprocessor;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.coprocessor.ObserverContext;
-import org.apache.phoenix.coprocessor.PhoenixMetaDataCoprocessorHost.PhoenixMetaDataControllerEnvironment;
-import org.apache.phoenix.schema.PIndexState;
-import org.apache.phoenix.schema.PTable;
-import org.apache.phoenix.schema.PTableType;
-
-public interface MetaDataEndpointObserver extends Coprocessor {
-
- void preGetTable( ObserverContext<PhoenixMetaDataControllerEnvironment> ctx, String tenantId,String tableName,
- TableName physicalTableName) throws IOException;
-
- void preCreateTable(final ObserverContext<PhoenixMetaDataControllerEnvironment> ctx, final String tenantId,
- String tableName, TableName physicalTableName, final TableName parentPhysicalTableName,
- PTableType tableType, final Set<byte[]> familySet, Set<TableName> indexes) throws IOException;
-
- void preDropTable(final ObserverContext<PhoenixMetaDataControllerEnvironment> ctx, final String tenantId,
- final String tableName,TableName physicalTableName, TableName parentPhysicalTableName, PTableType tableType, List<PTable> indexes) throws IOException;
-
- void preAlterTable(final ObserverContext<PhoenixMetaDataControllerEnvironment> ctx, final String tenantId,final String tableName,
- final TableName physicalTableName,final TableName parentPhysicalTableName, PTableType type) throws IOException;
-
- void preGetSchema(final ObserverContext<PhoenixMetaDataControllerEnvironment> ctx, final String schemaName)
- throws IOException;
-
- void preCreateSchema(final ObserverContext<PhoenixMetaDataControllerEnvironment> ctx, final String schemaName)
- throws IOException;
-
- void preDropSchema(final ObserverContext<PhoenixMetaDataControllerEnvironment> ctx, final String schemaName)
- throws IOException;
-
- void preCreateFunction(final ObserverContext<PhoenixMetaDataControllerEnvironment> ctx, final String tenantId,
- final String functionName) throws IOException;
-
- void preDropFunction(final ObserverContext<PhoenixMetaDataControllerEnvironment> ctx, final String tenantId,
- final String functionName) throws IOException;
-
- void preGetFunctions(final ObserverContext<PhoenixMetaDataControllerEnvironment> ctx, final String tenantId,
- final String functionName) throws IOException;
-
- void preIndexUpdate(ObserverContext<PhoenixMetaDataControllerEnvironment> ctx, String tenantId,
- String indexName, TableName physicalTableName, TableName parentPhysicalTableName, PIndexState newState) throws IOException;
-
-}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/5003ac30/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
index af06235..c816549 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
@@ -20,7 +20,6 @@ package org.apache.phoenix.coprocessor;
import static org.apache.phoenix.schema.types.PDataType.TRUE_BYTES;
import java.io.IOException;
-import java.security.PrivilegedExceptionAction;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Collections;
@@ -51,7 +50,6 @@ import org.apache.hadoop.hbase.filter.CompareFilter;
import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.regionserver.RegionScanner;
-import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
@@ -165,18 +163,9 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
SchemaUtil.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, props));
statsTable = env.getTable(
SchemaUtil.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_STATS_NAME_BYTES, props));
- final HTableInterface mTable=metaTable;
- final HTableInterface sTable=statsTable;
- User.runAsLoginUser(new PrivilegedExceptionAction<Void>() {
- @Override
- public Void run() throws Exception {
- if (UpgradeUtil.truncateStats(mTable, sTable)) {
- LOG.info("Stats are successfully truncated for upgrade 4.7!!");
- }
- return null;
- }
- });
-
+ if (UpgradeUtil.truncateStats(metaTable, statsTable)) {
+ LOG.info("Stats are successfully truncated for upgrade 4.7!!");
+ }
} catch (Exception exception) {
LOG.warn("Exception while truncate stats..,"
+ " please check and delete stats manually inorder to get proper result with old client!!");
[08/37] phoenix git commit: PHOENIX-4280 Delete doesn't work when
immutable indexes are in building state
Posted by ja...@apache.org.
PHOENIX-4280 Delete doesn't work when immutable indexes are in building state
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/a49aed8e
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/a49aed8e
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/a49aed8e
Branch: refs/heads/4.x-HBase-1.1
Commit: a49aed8e755ccf35e1938754ebba982ca456ab3c
Parents: b1fa6b5
Author: James Taylor <jt...@salesforce.com>
Authored: Thu Oct 19 17:52:29 2017 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Wed Nov 15 10:41:23 2017 -0800
----------------------------------------------------------------------
.../phoenix/end2end/index/DropColumnIT.java | 3 +-
.../phoenix/end2end/index/ImmutableIndexIT.java | 105 ++++++++++++++++-
.../end2end/index/IndexMaintenanceIT.java | 7 +-
.../apache/phoenix/compile/DeleteCompiler.java | 18 ++-
.../hbase/index/builder/BaseIndexCodec.java | 33 +++---
.../hbase/index/covered/IndexMetaData.java | 13 ++-
.../hbase/index/covered/LocalTableState.java | 69 ++++++-----
.../hbase/index/covered/NonTxIndexBuilder.java | 115 +------------------
.../hbase/index/scanner/ScannerBuilder.java | 2 +-
.../hbase/index/util/IndexManagementUtil.java | 2 -
.../apache/phoenix/index/IndexMaintainer.java | 29 ++++-
.../phoenix/index/PhoenixIndexMetaData.java | 14 ++-
.../index/PhoenixTransactionalIndexer.java | 34 ++----
.../index/covered/LocalTableStateTest.java | 31 ++---
.../index/covered/NonTxIndexBuilderTest.java | 2 +-
15 files changed, 255 insertions(+), 222 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/a49aed8e/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/DropColumnIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/DropColumnIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/DropColumnIT.java
index 4f6c37e..badb2a6 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/DropColumnIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/DropColumnIT.java
@@ -22,6 +22,7 @@ import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException;
@@ -294,7 +295,7 @@ public class DropColumnIT extends ParallelStatsDisabledIT {
if (!mutable && columnEncoded) {
KeyValueColumnExpression colExpression = new SingleCellColumnExpression(localIndexCol, "0:V2", localIndexTable.getEncodingScheme());
ImmutableBytesPtr ptr = new ImmutableBytesPtr();
- colExpression.evaluate(new ResultTuple(result), ptr);
+ assertTrue(colExpression.evaluate(new ResultTuple(result), ptr));
colValue = ptr.copyBytesIfNecessary();
}
else {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/a49aed8e/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ImmutableIndexIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ImmutableIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ImmutableIndexIT.java
index 4c43068..9eb5440 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ImmutableIndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ImmutableIndexIT.java
@@ -29,6 +29,7 @@ import java.sql.SQLException;
import java.sql.Statement;
import java.util.Arrays;
import java.util.Collection;
+import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Properties;
@@ -40,6 +41,7 @@ import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.hadoop.hbase.HBaseIOException;
+import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
@@ -47,12 +49,15 @@ import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Pair;
import org.apache.phoenix.end2end.BaseUniqueNamesOwnClusterIT;
import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.query.BaseTest;
import org.apache.phoenix.query.QueryServices;
import org.apache.phoenix.schema.PIndexState;
import org.apache.phoenix.schema.PTableImpl;
+import org.apache.phoenix.util.PhoenixRuntime;
import org.apache.phoenix.util.PropertiesUtil;
import org.apache.phoenix.util.ReadOnlyProps;
import org.apache.phoenix.util.SchemaUtil;
@@ -146,8 +151,13 @@ public class ImmutableIndexIT extends BaseUniqueNamesOwnClusterIT {
String dml = "DELETE from " + fullTableName + " WHERE long_col2 = 4";
try {
conn.createStatement().execute(dml);
- fail();
+ if (!localIndex) {
+ fail();
+ }
} catch (SQLException e) {
+ if (localIndex) {
+ throw e;
+ }
assertEquals(SQLExceptionCode.INVALID_FILTER_ON_IMMUTABLE_ROWS.getErrorCode(),
e.getErrorCode());
}
@@ -156,6 +166,99 @@ public class ImmutableIndexIT extends BaseUniqueNamesOwnClusterIT {
}
}
+ @Test
+ public void testDeleteFromPartialPK() throws Exception {
+ Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+ String tableName = "TBL_" + generateUniqueName();
+ String indexName = "IND_" + generateUniqueName();
+ String fullTableName = SchemaUtil.getTableName(TestUtil.DEFAULT_SCHEMA_NAME, tableName);
+ String fullIndexName = SchemaUtil.getTableName(TestUtil.DEFAULT_SCHEMA_NAME, indexName);
+ try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
+ conn.setAutoCommit(false);
+ String ddl =
+ "CREATE TABLE " + fullTableName + TestUtil.TEST_TABLE_SCHEMA + tableDDLOptions;
+ Statement stmt = conn.createStatement();
+ stmt.execute(ddl);
+ populateTestTable(fullTableName);
+ ddl =
+ "CREATE " + (localIndex ? "LOCAL" : "") + " INDEX " + indexName + " ON "
+ + fullTableName + " (char_pk, varchar_pk)";
+ stmt.execute(ddl);
+
+ ResultSet rs;
+
+ rs = conn.createStatement().executeQuery("SELECT /*+ NO_INDEX*/ COUNT(*) FROM " + fullTableName);
+ assertTrue(rs.next());
+ assertEquals(3, rs.getInt(1));
+ rs = conn.createStatement().executeQuery("SELECT COUNT(*) FROM " + fullIndexName);
+ assertTrue(rs.next());
+ assertEquals(3, rs.getInt(1));
+
+ String dml = "DELETE from " + fullTableName + " WHERE varchar_pk='varchar1'";
+ conn.createStatement().execute(dml);
+ assertIndexMutations(conn);
+ conn.commit();
+
+ rs = conn.createStatement().executeQuery("SELECT /*+ NO_INDEX*/ COUNT(*) FROM " + fullTableName);
+ assertTrue(rs.next());
+ assertEquals(2, rs.getInt(1));
+ rs = conn.createStatement().executeQuery("SELECT COUNT(*) FROM " + fullIndexName);
+ assertTrue(rs.next());
+ assertEquals(2, rs.getInt(1));
+ }
+ }
+
+ @Test
+ public void testDeleteFromNonPK() throws Exception {
+ Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+ String tableName = "TBL_" + generateUniqueName();
+ String indexName = "IND_" + generateUniqueName();
+ String fullTableName = SchemaUtil.getTableName(TestUtil.DEFAULT_SCHEMA_NAME, tableName);
+ String fullIndexName = SchemaUtil.getTableName(TestUtil.DEFAULT_SCHEMA_NAME, indexName);
+ try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
+ conn.setAutoCommit(false);
+ String ddl =
+ "CREATE TABLE " + fullTableName + TestUtil.TEST_TABLE_SCHEMA + tableDDLOptions;
+ Statement stmt = conn.createStatement();
+ stmt.execute(ddl);
+ populateTestTable(fullTableName);
+ ddl =
+ "CREATE " + (localIndex ? "LOCAL" : "") + " INDEX " + indexName + " ON "
+ + fullTableName + " (varchar_col1, varchar_pk)";
+ stmt.execute(ddl);
+
+ ResultSet rs;
+
+ rs = conn.createStatement().executeQuery("SELECT /*+ NO_INDEX*/ COUNT(*) FROM " + fullTableName);
+ assertTrue(rs.next());
+ assertEquals(3, rs.getInt(1));
+ rs = conn.createStatement().executeQuery("SELECT COUNT(*) FROM " + fullIndexName);
+ assertTrue(rs.next());
+ assertEquals(3, rs.getInt(1));
+
+ String dml = "DELETE from " + fullTableName + " WHERE varchar_col1='varchar_a' AND varchar_pk='varchar1'";
+ conn.createStatement().execute(dml);
+ assertIndexMutations(conn);
+ conn.commit();
+
+ TestUtil.dumpTable(conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(fullTableName)));
+
+ rs = conn.createStatement().executeQuery("SELECT /*+ NO_INDEX*/ COUNT(*) FROM " + fullTableName);
+ assertTrue(rs.next());
+ assertEquals(2, rs.getInt(1));
+ rs = conn.createStatement().executeQuery("SELECT COUNT(*) FROM " + fullIndexName);
+ assertTrue(rs.next());
+ assertEquals(2, rs.getInt(1));
+ }
+ }
+
+ private void assertIndexMutations(Connection conn) throws SQLException {
+ Iterator<Pair<byte[], List<KeyValue>>> iterator = PhoenixRuntime.getUncommittedDataIterator(conn);
+ assertTrue(iterator.hasNext());
+ iterator.next();
+ assertEquals(!localIndex, iterator.hasNext());
+ }
+
// This test is know to flap. We need PHOENIX-2582 to be fixed before enabling this back.
@Ignore
@Test
http://git-wip-us.apache.org/repos/asf/phoenix/blob/a49aed8e/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexMaintenanceIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexMaintenanceIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexMaintenanceIT.java
index 7d02e80..d5895ae 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexMaintenanceIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexMaintenanceIT.java
@@ -344,13 +344,14 @@ public class IndexMaintenanceIT extends ParallelStatsDisabledIT {
String dml = "DELETE from " + fullDataTableName + " WHERE long_col2 = 2";
try {
conn.createStatement().execute(dml);
- if (!mutable) {
+ if (!mutable && !localIndex) {
fail();
}
} catch (SQLException e) {
- if (!mutable) {
- assertEquals(SQLExceptionCode.INVALID_FILTER_ON_IMMUTABLE_ROWS.getErrorCode(), e.getErrorCode());
+ if (mutable || localIndex) {
+ throw e;
}
+ assertEquals(SQLExceptionCode.INVALID_FILTER_ON_IMMUTABLE_ROWS.getErrorCode(), e.getErrorCode());
}
if (!mutable) {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/a49aed8e/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
index be07cf4..eb252d3 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
@@ -31,6 +31,7 @@ import java.util.Map;
import java.util.Set;
import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.phoenix.cache.ServerCacheClient;
@@ -47,6 +48,7 @@ import org.apache.phoenix.execute.MutationState;
import org.apache.phoenix.execute.MutationState.RowMutationState;
import org.apache.phoenix.filter.SkipScanFilter;
import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
+import org.apache.phoenix.index.IndexMaintainer;
import org.apache.phoenix.index.PhoenixIndexCodec;
import org.apache.phoenix.iterate.ResultIterator;
import org.apache.phoenix.jdbc.PhoenixConnection;
@@ -62,7 +64,6 @@ import org.apache.phoenix.parse.NamedTableNode;
import org.apache.phoenix.parse.ParseNode;
import org.apache.phoenix.parse.ParseNodeFactory;
import org.apache.phoenix.parse.SelectStatement;
-import org.apache.phoenix.parse.TableName;
import org.apache.phoenix.query.ConnectionQueryServices;
import org.apache.phoenix.query.KeyRange;
import org.apache.phoenix.query.QueryConstants;
@@ -75,13 +76,13 @@ import org.apache.phoenix.schema.PIndexState;
import org.apache.phoenix.schema.PName;
import org.apache.phoenix.schema.PRow;
import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTable.IndexType;
import org.apache.phoenix.schema.PTableKey;
import org.apache.phoenix.schema.PTableType;
import org.apache.phoenix.schema.ReadOnlyTableException;
import org.apache.phoenix.schema.SortOrder;
import org.apache.phoenix.schema.TableRef;
import org.apache.phoenix.schema.tuple.Tuple;
-import org.apache.phoenix.schema.types.PDataType;
import org.apache.phoenix.schema.types.PLong;
import org.apache.phoenix.util.ByteUtil;
import org.apache.phoenix.util.IndexUtil;
@@ -167,6 +168,11 @@ public class DeleteCompiler {
for (int i = 0; i < indexTableRefs.size(); i++) {
ImmutableBytesPtr indexPtr = new ImmutableBytesPtr(); // allocate new as this is a key in a Map
rs.getCurrentRow().getKey(indexPtr);
+ // Translate the data table row to the index table row
+ if (sourceTableRef.getTable().getType() != PTableType.INDEX) {
+ IndexMaintainer maintainer = indexTableRefs.get(i).getTable().getIndexMaintainer(table, connection);
+ indexPtr.set(maintainer.buildRowKey(null, indexPtr, null, null, HConstants.LATEST_TIMESTAMP));
+ }
indexMutations.get(i).put(indexPtr, new RowMutationState(PRow.DELETE_MARKER, statement.getConnection().getStatementExecutionCounter(), NULL_ROWTIMESTAMP_INFO, null));
}
if (mutations.size() > maxSize) {
@@ -242,12 +248,12 @@ public class DeleteCompiler {
}
- private Map<PTableKey, PTable> getNonDisabledImmutableIndexes(TableRef tableRef) {
+ private Map<PTableKey, PTable> getNonDisabledGlobalImmutableIndexes(TableRef tableRef) {
PTable table = tableRef.getTable();
if (table.isImmutableRows() && !table.getIndexes().isEmpty()) {
Map<PTableKey, PTable> nonDisabledIndexes = new HashMap<PTableKey, PTable>(table.getIndexes().size());
for (PTable index : table.getIndexes()) {
- if (index.getIndexState() != PIndexState.DISABLE) {
+ if (index.getIndexState() != PIndexState.DISABLE && index.getIndexType() == IndexType.GLOBAL) {
nonDisabledIndexes.put(index.getKey(), index);
}
}
@@ -401,7 +407,7 @@ public class DeleteCompiler {
.setTableName(tableName).build().buildException();
}
- immutableIndex = getNonDisabledImmutableIndexes(tableRefToBe);
+ immutableIndex = getNonDisabledGlobalImmutableIndexes(tableRefToBe);
boolean mayHaveImmutableIndexes = !immutableIndex.isEmpty();
noQueryReqd = !hasLimit;
// Can't run on same server for transactional data, as we need the row keys for the data
@@ -444,7 +450,7 @@ public class DeleteCompiler {
// of immutable indexes.
table = connection.getTable(new PTableKey(table.getTenantId(), table.getName().getString()));
tableRefToBe.setTable(table);
- immutableIndex = getNonDisabledImmutableIndexes(tableRefToBe);
+ immutableIndex = getNonDisabledGlobalImmutableIndexes(tableRefToBe);
}
} catch (MetaDataEntityNotFoundException e) {
// Catch column/column family not found exception, as our meta data may
http://git-wip-us.apache.org/repos/asf/phoenix/blob/a49aed8e/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/builder/BaseIndexCodec.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/builder/BaseIndexCodec.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/builder/BaseIndexCodec.java
index 1ce4e2e..cf6e95e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/builder/BaseIndexCodec.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/builder/BaseIndexCodec.java
@@ -23,25 +23,22 @@ import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.phoenix.hbase.index.covered.IndexCodec;
-/**
- *
- */
public abstract class BaseIndexCodec implements IndexCodec {
- @Override
- public void initialize(RegionCoprocessorEnvironment env) throws IOException {
- // noop
- }
+ @Override
+ public void initialize(RegionCoprocessorEnvironment env) throws IOException {
+ // noop
+ }
- /**
- * {@inheritDoc}
- * <p>
- * By default, the codec is always enabled. Subclasses should override this method if they want do
- * decide to index on a per-mutation basis.
- * @throws IOException
- */
- @Override
- public boolean isEnabled(Mutation m) throws IOException {
- return true;
- }
+ /**
+ * {@inheritDoc}
+ * <p>
+ * By default, the codec is always enabled. Subclasses should override this method if they want do
+ * decide to index on a per-mutation basis.
+ * @throws IOException
+ */
+ @Override
+ public boolean isEnabled(Mutation m) throws IOException {
+ return true;
+ }
}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/phoenix/blob/a49aed8e/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/IndexMetaData.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/IndexMetaData.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/IndexMetaData.java
index 5314631..20ed855 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/IndexMetaData.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/IndexMetaData.java
@@ -17,14 +17,15 @@
*/
package org.apache.phoenix.hbase.index.covered;
+import org.apache.hadoop.hbase.client.Mutation;
import org.apache.phoenix.coprocessor.BaseScannerRegionObserver.ReplayWrite;
public interface IndexMetaData {
public static final IndexMetaData NULL_INDEX_META_DATA = new IndexMetaData() {
@Override
- public boolean isImmutableRows() {
- return false;
+ public boolean requiresPriorRowState(Mutation m) {
+ return true;
}
@Override
@@ -32,7 +33,13 @@ public interface IndexMetaData {
return null;
}};
- public boolean isImmutableRows();
+
+ /**
+ * Determines whether or not we need to look up the old row to retrieve old row values for maintaining the index.
+ * @param m mutation being performed on the data table
+ * @return true if prior row state is required and false otherwise
+ */
+ public boolean requiresPriorRowState(Mutation m);
public ReplayWrite getReplayWrite();
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/a49aed8e/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/LocalTableState.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/LocalTableState.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/LocalTableState.java
index 0f5a9f9..f7784e5 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/LocalTableState.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/LocalTableState.java
@@ -164,8 +164,27 @@ public class LocalTableState implements TableState {
* @throws IOException
*/
public Pair<CoveredDeleteScanner, IndexUpdate> getIndexedColumnsTableState(
- Collection<? extends ColumnReference> indexedColumns, boolean ignoreNewerMutations, boolean returnNullScannerIfRowNotFound, IndexMetaData indexMetaData) throws IOException {
- ensureLocalStateInitialized(indexedColumns, ignoreNewerMutations, indexMetaData);
+ Collection<? extends ColumnReference> indexedColumns, boolean ignoreNewerMutations, boolean isStateForDeletes, IndexMetaData indexMetaData) throws IOException {
+ // check to see if we haven't initialized any columns yet
+ Collection<? extends ColumnReference> toCover = this.columnSet.findNonCoveredColumns(indexedColumns);
+
+ // add the covered columns to the set
+ for (ColumnReference ref : toCover) {
+ this.columnSet.addColumn(ref);
+ }
+ boolean requiresPriorRowState = indexMetaData.requiresPriorRowState(update);
+ if (!toCover.isEmpty()) {
+ // no need to perform scan to find prior row values when the indexed columns are immutable, as
+ // by definition, there won't be any. If we have indexed non row key columns, then we need to
+ // look up the row so that we can formulate the delete of the index row correctly. We'll always
+ // have our "empty" key value column, so we check if we have more than that as a basis for
+ // needing to lookup the prior row values.
+ if (requiresPriorRowState) {
+ // add the current state of the row. Uses listCells() to avoid a new array creation.
+ this.addUpdateCells(this.table.getCurrentRowState(update, toCover, ignoreNewerMutations).listCells(), false);
+ }
+ }
+
// filter out things with a newer timestamp and track the column references to which it applies
ColumnTracker tracker = new ColumnTracker(indexedColumns);
synchronized (this.trackedColumns) {
@@ -175,35 +194,27 @@ public class LocalTableState implements TableState {
}
}
- CoveredDeleteScanner scanner = this.scannerBuilder.buildIndexedColumnScanner(indexedColumns, tracker, ts, returnNullScannerIfRowNotFound);
-
+ CoveredDeleteScanner scanner = this.scannerBuilder.buildIndexedColumnScanner(indexedColumns, tracker, ts,
+ // If we're determining the index state for deletes and either
+ // a) we've looked up the prior row state or
+ // b) we're inserting immutable data
+ // then allow a null scanner to be returned.
+ // FIXME: this is crappy code - we need to simplify the global mutable secondary index implementation
+ // TODO: use mutable transactional secondary index implementation instead (PhoenixTransactionalIndexer)
+ isStateForDeletes && (requiresPriorRowState || insertingData(update)));
return new Pair<CoveredDeleteScanner, IndexUpdate>(scanner, new IndexUpdate(tracker));
}
- /**
- * Initialize the managed local state. Generally, this will only be called by
- * {@link #getNonIndexedColumnsTableState(List)}, which is unlikely to be called concurrently from the outside. Even
- * then, there is still fairly low contention as each new Put/Delete will have its own table state.
- * @param indexMetaData TODO
- */
- private synchronized void ensureLocalStateInitialized(Collection<? extends ColumnReference> columns, boolean ignoreNewerMutations, IndexMetaData indexMetaData)
- throws IOException {
- // check to see if we haven't initialized any columns yet
- Collection<? extends ColumnReference> toCover = this.columnSet.findNonCoveredColumns(columns);
- // we have all the columns loaded, so we are good to go.
- if (toCover.isEmpty()) { return; }
-
- // no need to perform scan to find prior row values when the indexed columns are immutable, as
- // by definition, there won't be any.
- if (!indexMetaData.isImmutableRows()) {
- // add the current state of the row. Uses listCells() to avoid a new array creation.
- this.addUpdateCells(this.table.getCurrentRowState(update, toCover, ignoreNewerMutations).listCells(), false);
- }
-
- // add the covered columns to the set
- for (ColumnReference ref : toCover) {
- this.columnSet.addColumn(ref);
+
+ private static boolean insertingData(Mutation m) {
+ for (Collection<Cell> cells : m.getFamilyCellMap().values()) {
+ for (Cell cell : cells) {
+ if (KeyValue.Type.codeToType(cell.getTypeByte()) != KeyValue.Type.Put) {
+ return false;
+ }
+ }
}
+ return true;
}
@Override
@@ -264,9 +275,9 @@ public class LocalTableState implements TableState {
}
@Override
- public Pair<ValueGetter, IndexUpdate> getIndexUpdateState(Collection<? extends ColumnReference> indexedColumns, boolean ignoreNewerMutations, boolean returnNullScannerIfRowNotFound, IndexMetaData indexMetaData)
+ public Pair<ValueGetter, IndexUpdate> getIndexUpdateState(Collection<? extends ColumnReference> indexedColumns, boolean ignoreNewerMutations, boolean isStateForDeletes, IndexMetaData indexMetaData)
throws IOException {
- Pair<CoveredDeleteScanner, IndexUpdate> pair = getIndexedColumnsTableState(indexedColumns, ignoreNewerMutations, returnNullScannerIfRowNotFound, indexMetaData);
+ Pair<CoveredDeleteScanner, IndexUpdate> pair = getIndexedColumnsTableState(indexedColumns, ignoreNewerMutations, isStateForDeletes, indexMetaData);
ValueGetter valueGetter = IndexManagementUtil.createGetterFromScanner(pair.getFirst(), getCurrentRowKey());
return new Pair<ValueGetter, IndexUpdate>(valueGetter, pair.getSecond());
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/a49aed8e/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/NonTxIndexBuilder.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/NonTxIndexBuilder.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/NonTxIndexBuilder.java
index 50e2c3f..8dd57c0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/NonTxIndexBuilder.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/NonTxIndexBuilder.java
@@ -10,10 +10,8 @@
package org.apache.phoenix.hbase.index.covered;
import java.io.IOException;
-import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
-import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -29,9 +27,6 @@ import org.apache.phoenix.hbase.index.covered.data.LocalHBaseState;
import org.apache.phoenix.hbase.index.covered.data.LocalTable;
import org.apache.phoenix.hbase.index.covered.update.ColumnTracker;
import org.apache.phoenix.hbase.index.covered.update.IndexUpdateManager;
-import org.apache.phoenix.hbase.index.covered.update.IndexedColumnGroup;
-
-import com.google.common.collect.Lists;
/**
* Build covered indexes for phoenix updates.
@@ -99,17 +94,7 @@ public class NonTxIndexBuilder extends BaseIndexBuilder {
}
}
- // go through each batch of keyvalues and build separate index entries for each
- boolean cleanupCurrentState = !indexMetaData.isImmutableRows();
- /*
- * We have to split the work between the cleanup and the update for each group because when we update the
- * current state of the row for the current batch (appending the mutations for the current batch) the next
- * group will see that as the current state, which will can cause the a delete and a put to be created for
- * the next group.
- */
- if (addMutationsForBatch(manager, batch, state, cleanupCurrentState, indexMetaData)) {
- cleanupCurrentState = false;
- }
+ addMutationsForBatch(manager, batch, state, indexMetaData);
}
/**
@@ -138,17 +123,13 @@ public class NonTxIndexBuilder extends BaseIndexBuilder {
* timestamp-based batch of edits
* @param state
* local state to update and pass to the codec
- * @param requireCurrentStateCleanup
- * <tt>true</tt> if we should should attempt to cleanup the current state of the table, in the event of a
- * 'back in time' batch. <tt>false</tt> indicates we should not attempt the cleanup, e.g. an earlier
- * batch already did the cleanup.
* @param indexMetaData TODO
* @return <tt>true</tt> if we cleaned up the current state forward (had a back-in-time put), <tt>false</tt>
* otherwise
* @throws IOException
*/
private boolean addMutationsForBatch(IndexUpdateManager updateMap, Batch batch, LocalTableState state,
- boolean requireCurrentStateCleanup, IndexMetaData indexMetaData) throws IOException {
+ IndexMetaData indexMetaData) throws IOException {
// need a temporary manager for the current batch. It should resolve any conflicts for the
// current batch. Essentially, we can get the case where a batch doesn't change the current
@@ -160,9 +141,7 @@ public class NonTxIndexBuilder extends BaseIndexBuilder {
// determine if we need to make any cleanup given the pending update.
long batchTs = batch.getTimestamp();
state.setPendingUpdates(batch.getKvs());
- if (!indexMetaData.isImmutableRows()) {
- addCleanupForCurrentBatch(updateMap, batchTs, state, indexMetaData);
- }
+ addCleanupForCurrentBatch(updateMap, batchTs, state, indexMetaData);
// A.2 do a single pass first for the updates to the current state
state.applyPendingUpdates();
@@ -170,36 +149,6 @@ public class NonTxIndexBuilder extends BaseIndexBuilder {
// FIXME: PHOENIX-4057 do not attempt to issue index updates
// for out-of-order mutations since it corrupts the index.
return false;
-
-// long minTs = addUpdateForGivenTimestamp(batchTs, state, updateMap, indexMetaData);
-// // if all the updates are the latest thing in the index, we are done - don't go and fix history
-// if (ColumnTracker.isNewestTime(minTs)) { return false; }
-//
-// // A.3 otherwise, we need to roll up through the current state and get the 'correct' view of the
-// // index. after this, we have the correct view of the index, from the batch up to the index
-// while (!ColumnTracker.isNewestTime(minTs)) {
-// minTs = addUpdateForGivenTimestamp(minTs, state, updateMap, indexMetaData);
-// }
-//
-// // B. only cleanup the current state if we need to - its a huge waste of effort otherwise.
-// if (requireCurrentStateCleanup) {
-// // roll back the pending update. This is needed so we can remove all the 'old' index entries.
-// // We don't need to do the puts here, but just the deletes at the given timestamps since we
-// // just want to completely hide the incorrect entries.
-// state.rollback(batch.getKvs());
-// // setup state
-// state.setPendingUpdates(batch.getKvs());
-//
-// // cleanup the pending batch. If anything in the correct history is covered by Deletes used to
-// // 'fix' history (same row key and ts), we just drop the delete (we don't want to drop both
-// // because the update may have a different set of columns or value based on the update).
-// cleanupIndexStateFromBatchOnward(updateMap, batchTs, state, indexMetaData);
-//
-// // have to roll the state forward again, so the current state is correct
-// state.applyPendingUpdates();
-// return true;
-// }
-// return false;
}
private long addUpdateForGivenTimestamp(long ts, LocalTableState state, IndexUpdateManager updateMap, IndexMetaData indexMetaData)
@@ -249,7 +198,6 @@ public class NonTxIndexBuilder extends BaseIndexBuilder {
*/
// timestamp of the next update we need to track
long minTs = ColumnTracker.NO_NEWER_PRIMARY_TABLE_ENTRY_TIMESTAMP;
- List<IndexedColumnGroup> columnHints = new ArrayList<IndexedColumnGroup>();
for (IndexUpdate update : upserts) {
// this is the one bit where we check the timestamps
final ColumnTracker tracker = update.getIndexedColumns();
@@ -265,71 +213,17 @@ public class NonTxIndexBuilder extends BaseIndexBuilder {
continue;
}
- // track index hints for the next round. Hint if we need an update for that column for the
- // next timestamp. These columns clearly won't need to update as we go through time as they
- // already match the most recent possible thing.
- boolean needsCleanup = false;
- if (tracker.hasNewerTimestamps()) {
- columnHints.add(tracker);
- // this update also needs to be cleaned up at the next timestamp because it not the latest.
- needsCleanup = true;
- }
-
// only make the put if the index update has been setup
if (update.isValid()) {
byte[] table = update.getTableName();
Mutation mutation = update.getUpdate();
updateMap.addIndexUpdate(table, mutation);
-
- // only make the cleanup if we made a put and need cleanup
- if (needsCleanup) {
- // there is a TS for the interested columns that is greater than the columns in the
- // put. Therefore, we need to issue a delete at the same timestamp
- Delete d = new Delete(mutation.getRow());
- d.setTimestamp(tracker.getTS());
- updateMap.addIndexUpdate(table, d);
- }
}
}
return minTs;
}
/**
- * Cleanup the index based on the current state from the given batch. Iterates over each timestamp (for the indexed
- * rows) for the current state of the table and cleans up all the existing entries generated by the codec.
- * <p>
- * Adds all pending updates to the updateMap
- *
- * @param updateMap
- * updated with the pending index updates from the codec
- * @param batchTs
- * timestamp from which we should cleanup
- * @param state
- * current state of the primary table. Should already by setup to the correct state from which we want to
- * cleanup.
- * @param indexMetaData TODO
- * @throws IOException
- */
- private void cleanupIndexStateFromBatchOnward(IndexUpdateManager updateMap, long batchTs, LocalTableState state, IndexMetaData indexMetaData)
- throws IOException {
- // get the cleanup for the current state
- state.setCurrentTimestamp(batchTs);
- addDeleteUpdatesToMap(updateMap, state, batchTs, indexMetaData);
- Set<ColumnTracker> trackers = state.getTrackedColumns();
- long minTs = ColumnTracker.NO_NEWER_PRIMARY_TABLE_ENTRY_TIMESTAMP;
- for (ColumnTracker tracker : trackers) {
- if (tracker.getTS() < minTs) {
- minTs = tracker.getTS();
- }
- }
- state.resetTrackedColumns();
- if (!ColumnTracker.isNewestTime(minTs)) {
- state.setHints(Lists.newArrayList(trackers));
- cleanupIndexStateFromBatchOnward(updateMap, minTs, state, indexMetaData);
- }
- }
-
- /**
* Get the index deletes from the codec {@link IndexCodec#getIndexDeletes(TableState, IndexMetaData)} and then add them to the
* update map.
* <p>
@@ -340,9 +234,6 @@ public class NonTxIndexBuilder extends BaseIndexBuilder {
*/
protected void addDeleteUpdatesToMap(IndexUpdateManager updateMap, LocalTableState state, long ts, IndexMetaData indexMetaData)
throws IOException {
- if (indexMetaData.isImmutableRows()) {
- return;
- }
Iterable<IndexUpdate> cleanup = codec.getIndexDeletes(state, indexMetaData);
if (cleanup != null) {
for (IndexUpdate d : cleanup) {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/a49aed8e/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/ScannerBuilder.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/ScannerBuilder.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/ScannerBuilder.java
index 5547958..ad09c0c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/ScannerBuilder.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/ScannerBuilder.java
@@ -71,7 +71,7 @@ public class ScannerBuilder {
// filter out kvs based on deletes
ApplyAndFilterDeletesFilter deleteFilter = new ApplyAndFilterDeletesFilter(getAllFamilies(indexedColumns));
filters.addFilter(deleteFilter);
-
+
// combine the family filters and the rest of the filters as a
return getFilteredScanner(filters, returnNullIfRowNotFound, deleteFilter.getDeleteTracker());
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/a49aed8e/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java
index 697caef..a4a34a1 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java
@@ -25,7 +25,6 @@ import java.util.Comparator;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
-import java.util.Map.Entry;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -34,7 +33,6 @@ import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.KeyValue.Type;
import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Mutation;
http://git-wip-us.apache.org/repos/asf/phoenix/blob/a49aed8e/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java b/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
index 83b1d58..b4566a4 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
@@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValue.Type;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Durability;
+import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.util.ByteStringer;
@@ -1048,10 +1049,14 @@ public class IndexMaintainer implements Writable, Iterable<ColumnReference> {
}
private enum DeleteType {SINGLE_VERSION, ALL_VERSIONS};
- private DeleteType getDeleteTypeOrNull(Collection<KeyValue> pendingUpdates) {
+ private DeleteType getDeleteTypeOrNull(Collection<? extends Cell> pendingUpdates) {
+ return getDeleteTypeOrNull(pendingUpdates, this.nDataCFs);
+ }
+
+ private DeleteType getDeleteTypeOrNull(Collection<? extends Cell> pendingUpdates, int nCFs) {
int nDeleteCF = 0;
int nDeleteVersionCF = 0;
- for (KeyValue kv : pendingUpdates) {
+ for (Cell kv : pendingUpdates) {
if (kv.getTypeByte() == KeyValue.Type.DeleteFamilyVersion.getCode()) {
nDeleteVersionCF++;
}
@@ -1064,22 +1069,34 @@ public class IndexMaintainer implements Writable, Iterable<ColumnReference> {
// This is what a delete looks like on the server side for mutable indexing...
// Should all be one or the other for DeleteFamily versus DeleteFamilyVersion, but just in case not
DeleteType deleteType = null;
- if (nDeleteVersionCF > 0 && nDeleteVersionCF >= this.nDataCFs) {
+ if (nDeleteVersionCF > 0 && nDeleteVersionCF >= nCFs) {
deleteType = DeleteType.SINGLE_VERSION;
} else {
int nDelete = nDeleteCF + nDeleteVersionCF;
- if (nDelete>0 && nDelete >= this.nDataCFs) {
+ if (nDelete>0 && nDelete >= nCFs) {
deleteType = DeleteType.ALL_VERSIONS;
}
}
return deleteType;
}
- public boolean isRowDeleted(Collection<KeyValue> pendingUpdates) {
+ public boolean isRowDeleted(Collection<? extends Cell> pendingUpdates) {
return getDeleteTypeOrNull(pendingUpdates) != null;
}
- private boolean hasIndexedColumnChanged(ValueGetter oldState, Collection<KeyValue> pendingUpdates, long ts) throws IOException {
+ public boolean isRowDeleted(Mutation m) {
+ if (m.getFamilyCellMap().size() < this.nDataCFs) {
+ return false;
+ }
+ for (List<Cell> cells : m.getFamilyCellMap().values()) {
+ if (getDeleteTypeOrNull(cells, 1) == null) { // Checking CFs one by one
+ return false;
+ }
+ }
+ return true;
+ }
+
+ private boolean hasIndexedColumnChanged(ValueGetter oldState, Collection<? extends Cell> pendingUpdates, long ts) throws IOException {
if (pendingUpdates.isEmpty()) {
return false;
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/a49aed8e/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexMetaData.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexMetaData.java b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexMetaData.java
index 7908103..05371a6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexMetaData.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexMetaData.java
@@ -22,6 +22,7 @@ import java.sql.SQLException;
import java.util.List;
import java.util.Map;
+import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.phoenix.cache.GlobalCache;
import org.apache.phoenix.cache.IndexMetaDataCache;
@@ -43,6 +44,7 @@ public class PhoenixIndexMetaData implements IndexMetaData {
private final IndexMetaDataCache indexMetaDataCache;
private final ReplayWrite replayWrite;
private final boolean isImmutable;
+ private final boolean hasNonPkColumns;
private static IndexMetaDataCache getIndexMetaData(RegionCoprocessorEnvironment env, Map<String, byte[]> attributes) throws IOException {
if (attributes == null) { return IndexMetaDataCache.EMPTY_INDEX_META_DATA_CACHE; }
@@ -102,10 +104,13 @@ public class PhoenixIndexMetaData implements IndexMetaData {
public PhoenixIndexMetaData(RegionCoprocessorEnvironment env, Map<String,byte[]> attributes) throws IOException {
this.indexMetaDataCache = getIndexMetaData(env, attributes);
boolean isImmutable = true;
+ boolean hasNonPkColumns = false;
for (IndexMaintainer maintainer : indexMetaDataCache.getIndexMaintainers()) {
isImmutable &= maintainer.isImmutableRows();
+ hasNonPkColumns |= !maintainer.getIndexedColumns().isEmpty();
}
this.isImmutable = isImmutable;
+ this.hasNonPkColumns = hasNonPkColumns;
this.attributes = attributes;
this.replayWrite = getReplayWrite(attributes);
}
@@ -122,12 +127,17 @@ public class PhoenixIndexMetaData implements IndexMetaData {
return attributes;
}
+ @Override
public ReplayWrite getReplayWrite() {
return replayWrite;
}
-
- @Override
+
public boolean isImmutableRows() {
return isImmutable;
}
+
+ @Override
+ public boolean requiresPriorRowState(Mutation m) {
+ return !isImmutable || (indexMetaDataCache.getIndexMaintainers().get(0).isRowDeleted(m) && hasNonPkColumns);
+ }
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/a49aed8e/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java
index bc53b6b..3495267 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java
@@ -89,7 +89,6 @@ import org.apache.phoenix.util.PropertiesUtil;
import org.apache.phoenix.util.ScanUtil;
import org.apache.phoenix.util.SchemaUtil;
import org.apache.phoenix.util.ServerUtil;
-import org.apache.phoenix.util.TransactionUtil;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
@@ -284,16 +283,6 @@ public class PhoenixTransactionalIndexer extends BaseRegionObserver {
} else {
findPriorValueMutations = mutations;
}
- while(mutationIterator.hasNext()) {
- Mutation m = mutationIterator.next();
- // add the mutation to the batch set
- ImmutableBytesPtr row = new ImmutableBytesPtr(m.getRow());
- if (mutations != findPriorValueMutations && isDeleteMutation(m)) {
- addMutation(findPriorValueMutations, row, m);
- }
- addMutation(mutations, row, m);
- }
-
// Collect the set of mutable ColumnReferences so that we can first
// run a scan to get the current state. We'll need this to delete
// the existing index rows.
@@ -309,6 +298,17 @@ public class PhoenixTransactionalIndexer extends BaseRegionObserver {
mutableColumns.addAll(allColumns);
}
+ while(mutationIterator.hasNext()) {
+ Mutation m = mutationIterator.next();
+ // add the mutation to the batch set
+ ImmutableBytesPtr row = new ImmutableBytesPtr(m.getRow());
+ // if we have no non PK columns, no need to find the prior values
+ if (mutations != findPriorValueMutations && indexMetaData.requiresPriorRowState(m)) {
+ addMutation(findPriorValueMutations, row, m);
+ }
+ addMutation(mutations, row, m);
+ }
+
Collection<Pair<Mutation, byte[]>> indexUpdates = new ArrayList<Pair<Mutation, byte[]>>(mutations.size() * 2 * indexMaintainers.size());
try {
// Track if we have row keys with Delete mutations (or Puts that are
@@ -363,17 +363,6 @@ public class PhoenixTransactionalIndexer extends BaseRegionObserver {
return indexUpdates;
}
- private static boolean isDeleteMutation(Mutation m) {
- for (Map.Entry<byte[],List<Cell>> cellMap : m.getFamilyCellMap().entrySet()) {
- for (Cell cell : cellMap.getValue()) {
- if (cell.getTypeByte() != KeyValue.Type.Put.getCode() || TransactionUtil.isDelete(cell)) {
- return true;
- }
- }
- }
- return false;
- }
-
private void processMutation(RegionCoprocessorEnvironment env,
PhoenixIndexMetaData indexMetaData, byte[] txRollbackAttribute,
ResultScanner scanner,
@@ -398,6 +387,7 @@ public class PhoenixTransactionalIndexer extends BaseRegionObserver {
for (Mutation m : mutations.values()) {
TxTableState state = new TxTableState(env, upsertColumns, indexMetaData.getAttributes(), txnContext.getWritePointer(), m);
generatePuts(indexMetaData, indexUpdates, state);
+ generateDeletes(indexMetaData, indexUpdates, txRollbackAttribute, state);
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/a49aed8e/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/LocalTableStateTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/LocalTableStateTest.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/LocalTableStateTest.java
index 0efb63a..052930d 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/LocalTableStateTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/LocalTableStateTest.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValue.Type;
import org.apache.hadoop.hbase.KeyValueUtil;
+import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
@@ -57,13 +58,13 @@ public class LocalTableStateTest {
private static final IndexMetaData indexMetaData = new IndexMetaData() {
@Override
- public boolean isImmutableRows() {
- return false;
+ public ReplayWrite getReplayWrite() {
+ return null;
}
@Override
- public ReplayWrite getReplayWrite() {
- return null;
+ public boolean requiresPriorRowState(Mutation m) {
+ return true;
}
};
@@ -120,14 +121,14 @@ public class LocalTableStateTest {
IndexMetaData indexMetaData = new IndexMetaData() {
@Override
- public boolean isImmutableRows() {
- return false;
- }
-
- @Override
public ReplayWrite getReplayWrite() {
return null;
}
+
+ @Override
+ public boolean requiresPriorRowState(Mutation m) {
+ return true;
+ }
};
Put m = new Put(row);
@@ -157,16 +158,16 @@ public class LocalTableStateTest {
IndexMetaData indexMetaData = new IndexMetaData() {
@Override
- public boolean isImmutableRows() {
- return true;
- }
-
- @Override
public ReplayWrite getReplayWrite() {
return null;
}
+
+ @Override
+ public boolean requiresPriorRowState(Mutation m) {
+ return false;
+ }
- };
+ };
Put m = new Put(row);
m.add(fam, qual, ts, val);
// setup mocks
http://git-wip-us.apache.org/repos/asf/phoenix/blob/a49aed8e/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/NonTxIndexBuilderTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/NonTxIndexBuilderTest.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/NonTxIndexBuilderTest.java
index d06967d..d94cce0 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/NonTxIndexBuilderTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/NonTxIndexBuilderTest.java
@@ -145,7 +145,7 @@ public class NonTxIndexBuilderTest extends BaseConnectionlessQueryTest {
Mockito.when(mockRegionInfo.getEndKey()).thenReturn(Bytes.toBytes("z"));
mockIndexMetaData = Mockito.mock(PhoenixIndexMetaData.class);
- Mockito.when(mockIndexMetaData.isImmutableRows()).thenReturn(false);
+ Mockito.when(mockIndexMetaData.requiresPriorRowState((Mutation)Mockito.any())).thenReturn(true);
Mockito.when(mockIndexMetaData.getIndexMaintainers())
.thenReturn(Collections.singletonList(getTestIndexMaintainer()));
[17/37] phoenix git commit: PHOENIX-4322 DESC primary key column with
variable length does not work in SkipScanFilter
Posted by ja...@apache.org.
PHOENIX-4322 DESC primary key column with variable length does not work in SkipScanFilter
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/3df249cf
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/3df249cf
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/3df249cf
Branch: refs/heads/4.x-HBase-1.1
Commit: 3df249cf55806dd523cd72328df8713344173e36
Parents: e319ff0
Author: maryannxue <ma...@gmail.com>
Authored: Mon Oct 30 11:49:40 2017 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Wed Nov 15 10:46:39 2017 -0800
----------------------------------------------------------------------
.../src/it/java/org/apache/phoenix/end2end/SortOrderIT.java | 9 +++++++++
.../src/main/java/org/apache/phoenix/util/ScanUtil.java | 7 +++++--
2 files changed, 14 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/3df249cf/phoenix-core/src/it/java/org/apache/phoenix/end2end/SortOrderIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SortOrderIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SortOrderIT.java
index 655dbb1..58bbabb 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SortOrderIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SortOrderIT.java
@@ -167,6 +167,15 @@ public class SortOrderIT extends ParallelStatsDisabledIT {
runQueryTest(ddl, upsert("oid", "code"), insertedRows, new Object[][]{{"o2", 2}}, new WhereCondition("oid", "IN", "('o2')"),
table);
}
+
+ @Test
+ public void inDescCompositePK3() throws Exception {
+ String table = generateUniqueName();
+ String ddl = "CREATE table " + table + " (oid INTEGER NOT NULL, code VARCHAR NOT NULL constraint pk primary key (oid DESC, code DESC))";
+ Object[][] insertedRows = new Object[][]{{1, "o1"}, {2, "o2"}, {3, "o3"}};
+ runQueryTest(ddl, upsert("oid", "code"), insertedRows, new Object[][]{{2, "o2"}, {1, "o1"}},
+ new WhereCondition("(oid, code)", "IN", "((1, 'o1'), (2, 'o2'))"), table);
+ }
@Test
public void likeDescCompositePK1() throws Exception {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/3df249cf/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java
index a844226..8ab4f20 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java
@@ -431,8 +431,11 @@ public class ScanUtil {
anyInclusiveUpperRangeKey |= !range.isSingleKey() && inclusiveUpper;
// A null or empty byte array is always represented as a zero byte
byte sepByte = SchemaUtil.getSeparatorByte(schema.rowKeyOrderOptimizable(), bytes.length == 0, field);
-
- if ( !isFixedWidth && ( sepByte == QueryConstants.DESC_SEPARATOR_BYTE
+ // The result of an RVC evaluation can come with a trailing separator already, so we
+ // should avoid adding another one.
+ if ( !isFixedWidth
+ && ( bytes.length == 0 || key[offset - 1] != sepByte )
+ && ( sepByte == QueryConstants.DESC_SEPARATOR_BYTE
|| ( !exclusiveUpper
&& (fieldIndex < schema.getMaxFields() || inclusiveUpper || exclusiveLower) ) ) ) {
key[offset++] = sepByte;
[34/37] phoenix git commit: PHOENIX-4287 Make indexes inherit use
stats property from their parent table or view
Posted by ja...@apache.org.
PHOENIX-4287 Make indexes inherit use stats property from their parent table or view
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/f9746794
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/f9746794
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/f9746794
Branch: refs/heads/4.x-HBase-1.1
Commit: f9746794b0f2d2d1fbe7a3da822340bfc656daed
Parents: 474bc18
Author: Samarth Jain <sa...@apache.org>
Authored: Thu Nov 2 16:55:55 2017 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Wed Nov 15 10:46:41 2017 -0800
----------------------------------------------------------------------
.../end2end/ExplainPlanWithStatsEnabledIT.java | 146 +++++++++++++++++--
.../phoenix/iterate/BaseResultIterators.java | 41 +++++-
2 files changed, 171 insertions(+), 16 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/f9746794/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExplainPlanWithStatsEnabledIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExplainPlanWithStatsEnabledIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExplainPlanWithStatsEnabledIT.java
index e76b147..bfc6819 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExplainPlanWithStatsEnabledIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExplainPlanWithStatsEnabledIT.java
@@ -72,8 +72,8 @@ public class ExplainPlanWithStatsEnabledIT extends ParallelStatsEnabledIT {
private static void createIndex(String indexName, String table, long guidePostWidth)
throws Exception {
try (Connection conn = DriverManager.getConnection(getUrl())) {
- conn.createStatement().execute("CREATE INDEX " + indexName + " ON " + table
- + " (c1.a) INCLUDE (c2.b) ");
+ conn.createStatement().execute(
+ "CREATE INDEX " + indexName + " ON " + table + " (c1.a) INCLUDE (c2.b) ");
conn.createStatement().execute("UPDATE STATISTICS " + indexName);
}
}
@@ -558,9 +558,10 @@ public class ExplainPlanWithStatsEnabledIT extends ParallelStatsEnabledIT {
Estimate info = getByteRowEstimates(conn, sql, binds);
assertEquals((Long) 10l, info.getEstimatedRows());
assertTrue(info.getEstimateInfoTs() > 0);
-
+
// Now, let's disable USE_STATS_FOR_PARALLELIZATION on the table
- conn.createStatement().execute("ALTER TABLE " + tableName + " SET USE_STATS_FOR_PARALLELIZATION = " + false);
+ conn.createStatement().execute(
+ "ALTER TABLE " + tableName + " SET USE_STATS_FOR_PARALLELIZATION = " + false);
rs = conn.createStatement().executeQuery(sql);
// stats are not being used for parallelization. So number of scans is lower.
assertEquals(4, rs.unwrap(PhoenixResultSet.class).getStatement().getQueryPlan()
@@ -570,11 +571,11 @@ public class ExplainPlanWithStatsEnabledIT extends ParallelStatsEnabledIT {
info = getByteRowEstimates(conn, sql, binds);
assertEquals((Long) 10l, info.getEstimatedRows());
assertTrue(info.getEstimateInfoTs() > 0);
-
+
// assert that the aggregate query on view also works correctly
String viewName = "V_" + generateUniqueName();
- conn.createStatement()
- .execute("CREATE VIEW " + viewName + " AS SELECT * FROM " + tableName + " USE_STATS_FOR_PARALLELIZATION = false");
+ conn.createStatement().execute("CREATE VIEW " + viewName + " AS SELECT * FROM "
+ + tableName + " USE_STATS_FOR_PARALLELIZATION = false");
sql = "SELECT COUNT(*) FROM " + viewName;
rs = conn.createStatement().executeQuery(sql);
// stats are not being used for parallelization. So number of scans is lower.
@@ -595,21 +596,21 @@ public class ExplainPlanWithStatsEnabledIT extends ParallelStatsEnabledIT {
rs = conn.createStatement().executeQuery(sql);
// stats are being used for parallelization. So number of scans is higher.
assertEquals(14, rs.unwrap(PhoenixResultSet.class).getStatement().getQueryPlan()
- .getScans().get(0).size());
+ .getScans().get(0).size());
assertTrue(rs.next());
assertEquals(10, rs.getInt(1));
info = getByteRowEstimates(conn, sql, binds);
assertEquals((Long) 10l, info.getEstimatedRows());
assertTrue(info.getEstimateInfoTs() > 0);
- conn.createStatement().execute(
- "ALTER TABLE " + viewName + " SET USE_STATS_FOR_PARALLELIZATION=true");
+ conn.createStatement()
+ .execute("ALTER TABLE " + viewName + " SET USE_STATS_FOR_PARALLELIZATION=true");
sql = "SELECT COUNT(*) FROM " + viewName;
// query the view
rs = conn.createStatement().executeQuery(sql);
// stats are not being used for parallelization. So number of scans is higher.
assertEquals(14, rs.unwrap(PhoenixResultSet.class).getStatement().getQueryPlan()
- .getScans().get(0).size());
+ .getScans().get(0).size());
assertTrue(rs.next());
assertEquals(10, rs.getInt(1));
info = getByteRowEstimates(conn, sql, binds);
@@ -944,4 +945,127 @@ public class ExplainPlanWithStatsEnabledIT extends ParallelStatsEnabledIT {
assertEquals((Long) 6l, info.estimatedRows);
}
}
+
+ @Test
+ public void testIndexesUseStatsIfOnForParentTable() throws Exception {
+ testIndexesInheritUseStatsPropFromParentTable(true);
+ }
+
+ @Test
+ public void testIndexesDontUseStatsIfOffForParentTable() throws Exception {
+ testIndexesInheritUseStatsPropFromParentTable(false);
+ }
+
+ private void testIndexesInheritUseStatsPropFromParentTable(boolean useStats) throws Exception {
+ String baseTable = generateUniqueName();
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ String ddl =
+ "CREATE TABLE " + baseTable
+ + " (k INTEGER PRIMARY KEY, a bigint, b bigint, c bigint) GUIDE_POSTS_WIDTH=20, USE_STATS_FOR_PARALLELIZATION="
+ + useStats;
+ conn.createStatement().execute(ddl);
+ conn.createStatement().execute("upsert into " + baseTable + " values (100,1,1,1)");
+ conn.createStatement().execute("upsert into " + baseTable + " values (101,2,2,2)");
+ conn.createStatement().execute("upsert into " + baseTable + " values (102,3,3,3)");
+ conn.createStatement().execute("upsert into " + baseTable + " values (103,4,4,4)");
+ conn.createStatement().execute("upsert into " + baseTable + " values (104,5,5,5)");
+ conn.createStatement().execute("upsert into " + baseTable + " values (105,6,6,6)");
+ conn.createStatement().execute("upsert into " + baseTable + " values (106,7,7,7)");
+ conn.createStatement().execute("upsert into " + baseTable + " values (107,8,8,8)");
+ conn.createStatement().execute("upsert into " + baseTable + " values (108,9,9,9)");
+ conn.createStatement().execute("upsert into " + baseTable + " values (109,10,10,10)");
+ conn.commit();
+
+ // Create global index on base table
+ String globalIndex = "GI_" + generateUniqueName();
+ ddl = "CREATE INDEX " + globalIndex + " ON " + baseTable + " (a) INCLUDE (b) ";
+ conn.createStatement().execute(ddl);
+
+ // Create local index on base table
+ String localIndex = "LI_" + generateUniqueName();
+ ddl = "CREATE LOCAL INDEX " + localIndex + " ON " + baseTable + " (b) INCLUDE (c) ";
+ conn.createStatement().execute(ddl);
+
+ // Create a view and an index on it
+ String view = "V_" + generateUniqueName();
+ ddl =
+ "CREATE VIEW " + view + " AS SELECT * FROM " + baseTable
+ + " USE_STATS_FOR_PARALLELIZATION=" + useStats;
+ conn.createStatement().execute(ddl);
+ String viewIndex = "VI_" + generateUniqueName();
+ ddl = "CREATE INDEX " + viewIndex + " ON " + view + " (b)";
+ conn.createStatement().execute(ddl);
+
+ // collect stats for all
+ conn.createStatement().execute("UPDATE STATISTICS " + baseTable);
+
+ // query against the base table
+ String query = "SELECT /*+ NO_INDEX */ COUNT(*) FROM " + baseTable;
+ PhoenixResultSet rs =
+ conn.createStatement().executeQuery(query).unwrap(PhoenixResultSet.class);
+ // assert query is against base table
+ assertEquals(baseTable,
+ rs.getStatement().getQueryPlan().getTableRef().getTable().getName().getString());
+ assertEquals(useStats ? 11 : 1, rs.unwrap(PhoenixResultSet.class).getStatement()
+ .getQueryPlan().getScans().get(0).size());
+
+ // query against the global index
+ query = "SELECT B FROM " + baseTable + " WHERE A > 0";
+ rs = conn.createStatement().executeQuery(query).unwrap(PhoenixResultSet.class);
+ // assert query is against global index
+ assertEquals(globalIndex, rs.unwrap(PhoenixResultSet.class).getStatement()
+ .getQueryPlan().getTableRef().getTable().getName().getString());
+ assertEquals(useStats ? 11 : 1, rs.unwrap(PhoenixResultSet.class).getStatement()
+ .getQueryPlan().getScans().get(0).size());
+
+ // query against the local index
+ query = "SELECT C FROM " + baseTable + " WHERE B > 0";
+ rs = conn.createStatement().executeQuery(query).unwrap(PhoenixResultSet.class);
+ // assert query is against global index
+ assertEquals(localIndex, rs.unwrap(PhoenixResultSet.class).getStatement().getQueryPlan()
+ .getTableRef().getTable().getName().getString());
+ assertEquals(useStats ? 11 : 1, rs.unwrap(PhoenixResultSet.class).getStatement()
+ .getQueryPlan().getScans().get(0).size());
+
+ // query against the view
+ query = "SELECT * FROM " + view;
+ rs = conn.createStatement().executeQuery(query).unwrap(PhoenixResultSet.class);
+ // assert query is against view
+ assertEquals(view, rs.unwrap(PhoenixResultSet.class).getStatement().getQueryPlan()
+ .getTableRef().getTable().getName().getString());
+ assertEquals(useStats ? 11 : 1, rs.unwrap(PhoenixResultSet.class).getStatement()
+ .getQueryPlan().getScans().get(0).size());
+
+ // query against the view index
+ query = "SELECT 1 FROM " + view + " WHERE B > 0";
+ rs = conn.createStatement().executeQuery(query).unwrap(PhoenixResultSet.class);
+ // assert query is against viewIndex
+ assertEquals(viewIndex, rs.unwrap(PhoenixResultSet.class).getStatement().getQueryPlan()
+ .getTableRef().getTable().getName().getString());
+ assertEquals(useStats ? 11 : 1, rs.unwrap(PhoenixResultSet.class).getStatement()
+ .getQueryPlan().getScans().get(0).size());
+
+ // flip the use stats property on the view and see if view index picks it up
+ conn.createStatement().execute(
+ "ALTER VIEW " + view + " SET USE_STATS_FOR_PARALLELIZATION=" + !useStats);
+
+ // query against the view
+ query = "SELECT * FROM " + view;
+ rs = conn.createStatement().executeQuery(query).unwrap(PhoenixResultSet.class);
+ // assert query is against view
+ assertEquals(view, rs.unwrap(PhoenixResultSet.class).getStatement().getQueryPlan()
+ .getTableRef().getTable().getName().getString());
+ assertEquals(!useStats ? 11 : 1, rs.unwrap(PhoenixResultSet.class).getStatement()
+ .getQueryPlan().getScans().get(0).size());
+
+ // query against the view index
+ query = "SELECT 1 FROM " + view + " WHERE B > 0";
+ rs = conn.createStatement().executeQuery(query).unwrap(PhoenixResultSet.class);
+ // assert query is against viewIndex
+ assertEquals(viewIndex, rs.unwrap(PhoenixResultSet.class).getStatement().getQueryPlan()
+ .getTableRef().getTable().getName().getString());
+ assertEquals(!useStats ? 11 : 1, rs.unwrap(PhoenixResultSet.class).getStatement()
+ .getQueryPlan().getScans().get(0).size());
+ }
+ }
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/f9746794/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
index b4c9698..18f28e2 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
@@ -82,6 +82,7 @@ import org.apache.phoenix.filter.DistinctPrefixFilter;
import org.apache.phoenix.filter.EncodedQualifiersColumnProjectionFilter;
import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
import org.apache.phoenix.hbase.index.util.VersionUtil;
+import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.join.HashCacheClient;
import org.apache.phoenix.parse.FilterableStatement;
import org.apache.phoenix.parse.HintNode;
@@ -98,7 +99,10 @@ import org.apache.phoenix.schema.PTable.ImmutableStorageScheme;
import org.apache.phoenix.schema.PTable.IndexType;
import org.apache.phoenix.schema.PTable.QualifierEncodingScheme;
import org.apache.phoenix.schema.PTable.ViewType;
+import org.apache.phoenix.schema.PTableKey;
+import org.apache.phoenix.schema.PTableType;
import org.apache.phoenix.schema.StaleRegionBoundaryCacheException;
+import org.apache.phoenix.schema.TableNotFoundException;
import org.apache.phoenix.schema.TableRef;
import org.apache.phoenix.schema.stats.GuidePostsInfo;
import org.apache.phoenix.schema.stats.GuidePostsKey;
@@ -491,11 +495,7 @@ public abstract class BaseResultIterators extends ExplainTable implements Result
scanId = new UUID(ThreadLocalRandom.current().nextLong(), ThreadLocalRandom.current().nextLong()).toString();
initializeScan(plan, perScanLimit, offset, scan);
- this.useStatsForParallelization =
- table.useStatsForParallelization() == null
- ? context.getConnection().getQueryServices().getConfiguration().getBoolean(
- USE_STATS_FOR_PARALLELIZATION, DEFAULT_USE_STATS_FOR_PARALLELIZATION)
- : table.useStatsForParallelization();
+ this.useStatsForParallelization = getStatsForParallelizationProp(context, table);
this.scans = getParallelScans();
List<KeyRange> splitRanges = Lists.newArrayListWithExpectedSize(scans.size() * ESTIMATED_GUIDEPOSTS_PER_REGION);
for (List<Scan> scanList : scans) {
@@ -1238,4 +1238,35 @@ public abstract class BaseResultIterators extends ExplainTable implements Result
public Long getEstimateInfoTimestamp() {
return this.estimateInfoTimestamp;
}
+
+ private boolean getStatsForParallelizationProp(StatementContext context, PTable table) {
+ Boolean useStats = table.useStatsForParallelization();
+ if (useStats != null) {
+ return useStats;
+ }
+ /*
+ * For a view index, we use the property set on view. For indexes on base table, whether
+ * global or local, we use the property set on the base table.
+ */
+ if (table.getType() == PTableType.INDEX) {
+ PhoenixConnection conn = context.getConnection();
+ String parentTableName = table.getParentName().getString();
+ try {
+ PTable parentTable =
+ conn.getTable(new PTableKey(conn.getTenantId(), parentTableName));
+ useStats = parentTable.useStatsForParallelization();
+ if (useStats != null) {
+ return useStats;
+ }
+ } catch (TableNotFoundException e) {
+ logger.warn("Unable to find parent table \"" + parentTableName + "\" of table \""
+ + table.getName().getString()
+ + "\" to determine USE_STATS_FOR_PARALLELIZATION",
+ e);
+ }
+ }
+ return context.getConnection().getQueryServices().getConfiguration()
+ .getBoolean(USE_STATS_FOR_PARALLELIZATION, DEFAULT_USE_STATS_FOR_PARALLELIZATION);
+ }
+
}
[31/37] phoenix git commit: PHOENIX-4349 Update version to 4.13.0
Posted by ja...@apache.org.
PHOENIX-4349 Update version to 4.13.0
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/07aacc2f
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/07aacc2f
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/07aacc2f
Branch: refs/heads/4.x-HBase-1.1
Commit: 07aacc2febb6c23be6fa7ad95c9e49690accb9d2
Parents: 81019c6
Author: James Taylor <jt...@salesforce.com>
Authored: Fri Nov 3 09:26:58 2017 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Wed Nov 15 10:46:41 2017 -0800
----------------------------------------------------------------------
.../java/org/apache/phoenix/coprocessor/MetaDataProtocol.java | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/07aacc2f/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
index c4ecc3f..fe11ec7 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
@@ -63,7 +63,7 @@ import com.google.protobuf.ByteString;
*/
public abstract class MetaDataProtocol extends MetaDataService {
public static final int PHOENIX_MAJOR_VERSION = 4;
- public static final int PHOENIX_MINOR_VERSION = 12;
+ public static final int PHOENIX_MINOR_VERSION = 13;
public static final int PHOENIX_PATCH_NUMBER = 0;
public static final int PHOENIX_VERSION =
VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, PHOENIX_MINOR_VERSION, PHOENIX_PATCH_NUMBER);
@@ -92,8 +92,9 @@ public abstract class MetaDataProtocol extends MetaDataService {
public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_11_0 = MIN_TABLE_TIMESTAMP + 27;
// Since there's no upgrade code, keep the version the same as the previous version
public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_12_0 = MIN_SYSTEM_TABLE_TIMESTAMP_4_11_0;
+ public static final long MIN_SYSTEM_TABLE_TIMESTAMP_4_13_0 = MIN_SYSTEM_TABLE_TIMESTAMP_4_11_0;
// MIN_SYSTEM_TABLE_TIMESTAMP needs to be set to the max of all the MIN_SYSTEM_TABLE_TIMESTAMP_* constants
- public static final long MIN_SYSTEM_TABLE_TIMESTAMP = MIN_SYSTEM_TABLE_TIMESTAMP_4_12_0;
+ public static final long MIN_SYSTEM_TABLE_TIMESTAMP = MIN_SYSTEM_TABLE_TIMESTAMP_4_13_0;
// ALWAYS update this map whenever rolling out a new release (major, minor or patch release).
// Key is the SYSTEM.CATALOG timestamp for the version and value is the version string.
@@ -112,6 +113,7 @@ public abstract class MetaDataProtocol extends MetaDataService {
TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_4_10_0, "4.10.x");
TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_4_11_0, "4.11.x");
TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_4_12_0, "4.12.x");
+ TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_4_13_0, "4.13.x");
}
public static final String CURRENT_CLIENT_VERSION = PHOENIX_MAJOR_VERSION + "." + PHOENIX_MINOR_VERSION + "." + PHOENIX_PATCH_NUMBER;
[03/37] phoenix git commit: PHOENIX-4295 Fix argument order for
StatsCollectorIT derived classes
Posted by ja...@apache.org.
PHOENIX-4295 Fix argument order for StatsCollectorIT derived classes
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/1c3116fc
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/1c3116fc
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/1c3116fc
Branch: refs/heads/4.x-HBase-1.1
Commit: 1c3116fce8f05cf1240ac2746594c2735c7b2af5
Parents: e2351ef
Author: James Taylor <jt...@salesforce.com>
Authored: Wed Oct 18 09:29:44 2017 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Wed Nov 15 10:39:55 2017 -0800
----------------------------------------------------------------------
.../end2end/ColumnEncodedImmutableNonTxStatsCollectorIT.java | 4 ++--
.../end2end/ColumnEncodedImmutableTxStatsCollectorIT.java | 5 ++---
.../end2end/ColumnEncodedMutableNonTxStatsCollectorIT.java | 5 ++---
.../phoenix/end2end/ColumnEncodedMutableTxStatsCollectorIT.java | 5 ++---
.../end2end/NonColumnEncodedImmutableNonTxStatsCollectorIT.java | 5 ++---
.../end2end/NonColumnEncodedImmutableTxStatsCollectorIT.java | 5 ++---
.../end2end/SysTableNamespaceMappedStatsCollectorIT.java | 3 +--
7 files changed, 13 insertions(+), 19 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/1c3116fc/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnEncodedImmutableNonTxStatsCollectorIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnEncodedImmutableNonTxStatsCollectorIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnEncodedImmutableNonTxStatsCollectorIT.java
index 7ef825e..d5d8442 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnEncodedImmutableNonTxStatsCollectorIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnEncodedImmutableNonTxStatsCollectorIT.java
@@ -29,10 +29,10 @@ public class ColumnEncodedImmutableNonTxStatsCollectorIT extends StatsCollectorI
super(mutable, transactional, userTableNamespaceMapped, columnEncoded);
}
- @Parameters(name="columnEncoded = {0}, mutable = {1}, transactional = {2}, isUserTableNamespaceMapped = {3}")
+ @Parameters(name = "mutable = {0}, transactional = {1}, isUserTableNamespaceMapped = {2}, columnEncoded = {3}")
public static Collection<Boolean[]> data() {
return Arrays.asList(new Boolean[][] {
- { true, false, false, false }, { true, false, false, true }
+ { false, false, false, true }, { false, false, true, true }
});
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/1c3116fc/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnEncodedImmutableTxStatsCollectorIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnEncodedImmutableTxStatsCollectorIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnEncodedImmutableTxStatsCollectorIT.java
index 0c6934b..23b1654 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnEncodedImmutableTxStatsCollectorIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnEncodedImmutableTxStatsCollectorIT.java
@@ -29,10 +29,9 @@ public class ColumnEncodedImmutableTxStatsCollectorIT extends StatsCollectorIT {
super(mutable, transactional, userTableNamespaceMapped, columnEncoded);
}
- @Parameters(
- name = "columnEncoded = {0}, mutable = {1}, transactional = {2}, isUserTableNamespaceMapped = {3}")
+ @Parameters(name = "mutable = {0}, transactional = {1}, isUserTableNamespaceMapped = {2}, columnEncoded = {3}")
public static Collection<Boolean[]> data() {
return Arrays.asList(
- new Boolean[][] { { true, false, true, false }, { true, false, true, true }, });
+ new Boolean[][] { { false, true, false, true }, { false, true, true, true }, });
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/1c3116fc/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnEncodedMutableNonTxStatsCollectorIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnEncodedMutableNonTxStatsCollectorIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnEncodedMutableNonTxStatsCollectorIT.java
index 3cd81c7..24869a2 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnEncodedMutableNonTxStatsCollectorIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnEncodedMutableNonTxStatsCollectorIT.java
@@ -29,10 +29,9 @@ public class ColumnEncodedMutableNonTxStatsCollectorIT extends StatsCollectorIT
super(mutable, transactional, userTableNamespaceMapped, columnEncoded);
}
- @Parameters(
- name = "columnEncoded = {0}, mutable = {1}, transactional = {2}, isUserTableNamespaceMapped = {3}")
+ @Parameters(name = "mutable = {0}, transactional = {1}, isUserTableNamespaceMapped = {2}, columnEncoded = {3}")
public static Collection<Boolean[]> data() {
return Arrays.asList(
- new Boolean[][] { { true, true, false, false }, { true, true, false, true } });
+ new Boolean[][] { { true, false, false, true }, { true, false, true, true } });
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/1c3116fc/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnEncodedMutableTxStatsCollectorIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnEncodedMutableTxStatsCollectorIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnEncodedMutableTxStatsCollectorIT.java
index e4a2734..eea591d 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnEncodedMutableTxStatsCollectorIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnEncodedMutableTxStatsCollectorIT.java
@@ -29,10 +29,9 @@ public class ColumnEncodedMutableTxStatsCollectorIT extends StatsCollectorIT {
super(mutable, transactional, userTableNamespaceMapped, columnEncoded);
}
- @Parameters(
- name = "columnEncoded = {0}, mutable = {1}, transactional = {2}, isUserTableNamespaceMapped = {3}")
+ @Parameters(name = "mutable = {0}, transactional = {1}, isUserTableNamespaceMapped = {2}, columnEncoded = {3}")
public static Collection<Boolean[]> data() {
return Arrays.asList(
- new Boolean[][] { { true, true, true, false }, { true, true, true, true } });
+ new Boolean[][] { { true, true, false, true }, { true, true, true, true } });
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/1c3116fc/phoenix-core/src/it/java/org/apache/phoenix/end2end/NonColumnEncodedImmutableNonTxStatsCollectorIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/NonColumnEncodedImmutableNonTxStatsCollectorIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/NonColumnEncodedImmutableNonTxStatsCollectorIT.java
index 792752f..fe70030 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/NonColumnEncodedImmutableNonTxStatsCollectorIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/NonColumnEncodedImmutableNonTxStatsCollectorIT.java
@@ -29,10 +29,9 @@ public class NonColumnEncodedImmutableNonTxStatsCollectorIT extends StatsCollect
super(mutable, transactional, userTableNamespaceMapped, columnEncoded);
}
- @Parameters(
- name = "columnEncoded = {0}, mutable = {1}, transactional = {2}, isUserTableNamespaceMapped = {3}")
+ @Parameters(name = "mutable = {0}, transactional = {1}, isUserTableNamespaceMapped = {2}, columnEncoded = {3}")
public static Collection<Boolean[]> data() {
return Arrays.asList(
- new Boolean[][] { { false, false, false, false }, { false, false, false, true } });
+ new Boolean[][] { { false, false, false, false }, { false, false, true, false } });
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/1c3116fc/phoenix-core/src/it/java/org/apache/phoenix/end2end/NonColumnEncodedImmutableTxStatsCollectorIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/NonColumnEncodedImmutableTxStatsCollectorIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/NonColumnEncodedImmutableTxStatsCollectorIT.java
index 8df50cb..10a846a 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/NonColumnEncodedImmutableTxStatsCollectorIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/NonColumnEncodedImmutableTxStatsCollectorIT.java
@@ -29,10 +29,9 @@ public class NonColumnEncodedImmutableTxStatsCollectorIT extends StatsCollectorI
super(mutable, transactional, userTableNamespaceMapped, columnEncoded);
}
- @Parameters(
- name = "columnEncoded = {0}, mutable = {1}, transactional = {2}, isUserTableNamespaceMapped = {3}")
+ @Parameters(name = "mutable = {0}, transactional = {1}, isUserTableNamespaceMapped = {2}, columnEncoded = {3}")
public static Collection<Boolean[]> data() {
return Arrays.asList(
- new Boolean[][] { { false, false, true, false }, { false, false, true, true } });
+ new Boolean[][] { { false, true, false, false }, { false, true, true, false } });
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/1c3116fc/phoenix-core/src/it/java/org/apache/phoenix/end2end/SysTableNamespaceMappedStatsCollectorIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SysTableNamespaceMappedStatsCollectorIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SysTableNamespaceMappedStatsCollectorIT.java
index 36779c2..ea5f32f 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SysTableNamespaceMappedStatsCollectorIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SysTableNamespaceMappedStatsCollectorIT.java
@@ -35,8 +35,7 @@ public class SysTableNamespaceMappedStatsCollectorIT extends StatsCollectorIT {
super(mutable, transactional, userTableNamespaceMapped, columnEncoded);
}
- @Parameters(
- name = "columnEncoded = {0}, mutable = {1}, transactional = {2}, isUserTableNamespaceMapped = {3}")
+ @Parameters(name = "mutable = {0}, transactional = {1}, isUserTableNamespaceMapped = {2}, columnEncoded = {3}")
public static Collection<Boolean[]> data() {
return Arrays.asList(
new Boolean[][] { { true, true, false, false }, { true, true, false, true }, });
[07/37] phoenix git commit: PHOENIX-3757 System mutex table not being
created in SYSTEM namespace when namespace mapping is enabled
Posted by ja...@apache.org.
PHOENIX-3757 System mutex table not being created in SYSTEM namespace when namespace mapping is enabled
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/6c527c1b
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/6c527c1b
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/6c527c1b
Branch: refs/heads/4.x-HBase-1.1
Commit: 6c527c1b1da6f50f75dcb63c2396daf1318c1f22
Parents: 87f8b1e
Author: Karan Mehta <ka...@gmail.com>
Authored: Thu Oct 26 11:32:14 2017 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Wed Nov 15 10:41:23 2017 -0800
----------------------------------------------------------------------
.../MigrateSystemTablesToSystemNamespaceIT.java | 402 +++++++++++++++++++
.../end2end/SystemTablePermissionsIT.java | 3 +-
.../phoenix/coprocessor/MetaDataProtocol.java | 3 +
.../exception/UpgradeInProgressException.java | 8 +-
.../query/ConnectionQueryServicesImpl.java | 184 ++++++---
.../org/apache/phoenix/util/UpgradeUtil.java | 44 +-
.../query/ConnectionQueryServicesImplTest.java | 9 +-
7 files changed, 572 insertions(+), 81 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/6c527c1b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MigrateSystemTablesToSystemNamespaceIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MigrateSystemTablesToSystemNamespaceIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MigrateSystemTablesToSystemNamespaceIT.java
new file mode 100644
index 0000000..91e34be
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MigrateSystemTablesToSystemNamespaceIT.java
@@ -0,0 +1,402 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.phoenix.coprocessor.MetaDataProtocol;
+import org.apache.phoenix.exception.UpgradeInProgressException;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
+import org.apache.phoenix.query.BaseTest;
+import org.apache.phoenix.query.ConnectionQueryServices;
+import org.apache.phoenix.query.ConnectionQueryServicesImpl;
+import org.apache.phoenix.query.QueryConstants;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.schema.PTableType;
+import org.apache.phoenix.util.ReadOnlyProps;
+import org.apache.phoenix.util.SchemaUtil;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Set;
+
+import static org.junit.Assert.*;
+
+@Category(NeedsOwnMiniClusterTest.class)
+public class MigrateSystemTablesToSystemNamespaceIT extends BaseTest {
+
+ private static final Set<String> PHOENIX_SYSTEM_TABLES = new HashSet<>(Arrays.asList(
+ "SYSTEM.CATALOG", "SYSTEM.SEQUENCE", "SYSTEM.STATS", "SYSTEM.FUNCTION",
+ "SYSTEM.MUTEX"));
+ private static final Set<String> PHOENIX_NAMESPACE_MAPPED_SYSTEM_TABLES = new HashSet<>(
+ Arrays.asList("SYSTEM:CATALOG", "SYSTEM:SEQUENCE", "SYSTEM:STATS", "SYSTEM:FUNCTION",
+ "SYSTEM:MUTEX"));
+ private static final String SCHEMA_NAME = "MIGRATETEST";
+ private static final String TABLE_NAME =
+ SCHEMA_NAME + "." + MigrateSystemTablesToSystemNamespaceIT.class.getSimpleName().toUpperCase();
+ private static final int NUM_RECORDS = 5;
+
+ private HBaseTestingUtility testUtil = null;
+ private Set<String> hbaseTables;
+
+ // Create Multiple users since Phoenix caches the connection per user
+ // Migration or upgrade code will run every time for each user.
+ final UserGroupInformation user1 =
+ UserGroupInformation.createUserForTesting("user1", new String[0]);
+ final UserGroupInformation user2 =
+ UserGroupInformation.createUserForTesting("user2", new String[0]);
+ final UserGroupInformation user3 =
+ UserGroupInformation.createUserForTesting("user3", new String[0]);
+ final UserGroupInformation user4 =
+ UserGroupInformation.createUserForTesting("user4", new String[0]);
+
+
+ @Before
+ public final void doSetup() throws Exception {
+ testUtil = new HBaseTestingUtility();
+ Configuration conf = testUtil.getConfiguration();
+ enableNamespacesOnServer(conf);
+ testUtil.startMiniCluster(1);
+ }
+
+ @After
+ public void tearDownMiniCluster() {
+ try {
+ if (testUtil != null) {
+ testUtil.shutdownMiniCluster();
+ testUtil = null;
+ }
+ } catch (Exception e) {
+ // ignore
+ }
+ }
+
+ // Tests that client can create and read tables on a fresh HBase cluster with
+ // system namespace mapping enabled from the start
+ @Test
+ public void freshClientsCreateNamespaceMappedSystemTables() throws IOException, InterruptedException {
+
+ user1.doAs(new PrivilegedExceptionAction<Void>() {
+ @Override
+ public Void run() throws Exception {
+ createConnection(getClientPropertiesWithSystemMappingEnabled());
+ createTable(getClientPropertiesWithSystemMappingEnabled());
+ return null;
+ }
+ });
+
+ hbaseTables = getHBaseTables();
+ assertTrue(hbaseTables.containsAll(PHOENIX_NAMESPACE_MAPPED_SYSTEM_TABLES));
+
+ user1.doAs(new PrivilegedExceptionAction<Void>() {
+ @Override
+ public Void run() throws Exception {
+ createConnection(getClientPropertiesWithSystemMappingEnabled());
+ readTable(getClientPropertiesWithSystemMappingEnabled());
+ return null;
+ }
+ });
+
+ }
+
+ // Tests that NEWER clients can read tables on HBase cluster after system tables are migrated
+ @Test
+ public void migrateSystemTablesInExistingCluster() throws IOException, InterruptedException {
+
+ user1.doAs(new PrivilegedExceptionAction<Void>() {
+ @Override
+ public Void run() throws Exception {
+ createConnection(getClientPropertiesWithSystemMappingDisabled());
+ createTable(getClientPropertiesWithSystemMappingDisabled());
+ return null;
+ }
+ });
+
+ hbaseTables = getHBaseTables();
+ assertTrue(hbaseTables.containsAll(PHOENIX_SYSTEM_TABLES));
+
+ user2.doAs(new PrivilegedExceptionAction<Void>() {
+ @Override
+ public Void run() throws Exception {
+ createConnection(getClientPropertiesWithSystemMappingEnabled());
+ readTable(getClientPropertiesWithSystemMappingEnabled());
+ return null;
+ }
+ });
+
+ hbaseTables = getHBaseTables();
+ assertTrue(hbaseTables.containsAll(PHOENIX_NAMESPACE_MAPPED_SYSTEM_TABLES));
+ }
+
+ // Tests that OLDER clients fail after system tables are migrated
+ // Clients should be restarted with new properties which are consistent on both client and server
+ @Test
+ public void oldClientsAfterSystemTableMigrationShouldFail() throws IOException, InterruptedException {
+
+ user1.doAs(new PrivilegedExceptionAction<Void>() {
+ @Override
+ public Void run() throws Exception {
+ createConnection(getClientPropertiesWithSystemMappingEnabled());
+ return null;
+ }
+ });
+
+ hbaseTables = getHBaseTables();
+ assertTrue(hbaseTables.size() == PHOENIX_NAMESPACE_MAPPED_SYSTEM_TABLES.size());
+ assertTrue(hbaseTables.containsAll(PHOENIX_NAMESPACE_MAPPED_SYSTEM_TABLES));
+
+ try {
+ user2.doAs(new PrivilegedExceptionAction<Void>() {
+ @Override
+ public Void run() throws Exception {
+ createConnection(getClientPropertiesWithSystemMappingDisabled());
+ return null;
+ }
+ });
+ fail("Client should not be able to connect to cluster with inconsistent SYSTEM table namespace properties");
+ } catch (Exception e) {
+ //ignore
+ }
+
+ hbaseTables = getHBaseTables();
+ assertTrue(hbaseTables.size() == PHOENIX_NAMESPACE_MAPPED_SYSTEM_TABLES.size());
+ assertTrue(hbaseTables.containsAll(PHOENIX_NAMESPACE_MAPPED_SYSTEM_TABLES));
+ }
+
+ // Tests that only one client can migrate the system table to system namespace
+ // Migrate process acquires lock in SYSMUTEX table
+ @Test
+ public void onlyOneClientCanMigrate() throws IOException, InterruptedException, SQLException {
+
+ user1.doAs(new PrivilegedExceptionAction<Void>() {
+ @Override
+ public Void run() throws Exception {
+ createConnection(getClientPropertiesWithSystemMappingDisabled());
+ return null;
+ }
+ });
+
+ hbaseTables = getHBaseTables();
+ assertTrue(hbaseTables.size() == PHOENIX_SYSTEM_TABLES.size());
+ assertTrue(hbaseTables.containsAll(PHOENIX_SYSTEM_TABLES));
+
+ user2.doAs(new PrivilegedExceptionAction<Void>() {
+ @Override
+ public Void run() throws Exception {
+ // Acquire Mutex Lock
+ changeMutexLock(getClientPropertiesWithSystemMappingDisabled(), true);
+ return null;
+ }
+ });
+
+ hbaseTables = getHBaseTables();
+ assertTrue(hbaseTables.size() == PHOENIX_SYSTEM_TABLES.size());
+ assertTrue(hbaseTables.containsAll(PHOENIX_SYSTEM_TABLES));
+
+ try {
+ user3.doAs(new PrivilegedExceptionAction<Void>() {
+ @Override
+ public Void run() throws Exception {
+ createConnection(getClientPropertiesWithSystemMappingEnabled());
+ return null;
+ }
+ });
+ fail("Multiple clients should not be able to migrate simultaneously.");
+ } catch (Exception e) {
+ if(!(e.getCause() instanceof UpgradeInProgressException)) {
+ fail("UpgradeInProgressException expected since the user is trying to migrate when SYSMUTEX is locked.");
+ }
+ }
+
+ hbaseTables = getHBaseTables();
+ assertTrue(hbaseTables.size() == PHOENIX_SYSTEM_TABLES.size());
+ assertTrue(hbaseTables.containsAll(PHOENIX_SYSTEM_TABLES));
+
+ user2.doAs(new PrivilegedExceptionAction<Void>() {
+ @Override
+ public Void run() throws Exception {
+ // Release Mutex Lock
+ changeMutexLock(getClientPropertiesWithSystemMappingDisabled(), false);
+ return null;
+ }
+ });
+
+ hbaseTables = getHBaseTables();
+ assertTrue(hbaseTables.size() == PHOENIX_SYSTEM_TABLES.size());
+ assertTrue(hbaseTables.containsAll(PHOENIX_SYSTEM_TABLES));
+
+ user3.doAs(new PrivilegedExceptionAction<Void>() {
+ @Override
+ public Void run() throws Exception {
+ createConnection(getClientPropertiesWithSystemMappingEnabled());
+ return null;
+ }
+ });
+
+ hbaseTables = getHBaseTables();
+ assertTrue(hbaseTables.size() == PHOENIX_NAMESPACE_MAPPED_SYSTEM_TABLES.size());
+ assertTrue(hbaseTables.containsAll(PHOENIX_NAMESPACE_MAPPED_SYSTEM_TABLES));
+ }
+
+ private void changeMutexLock(Properties clientProps, boolean acquire) throws SQLException, IOException {
+ ConnectionQueryServices services = null;
+ byte[] mutexRowKey = SchemaUtil.getTableKey(null, PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA,
+ PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE);
+
+ try (Connection conn = DriverManager.getConnection(getJdbcUrl(), clientProps)) {
+ services = conn.unwrap(PhoenixConnection.class).getQueryServices();
+ if(acquire) {
+ assertTrue(((ConnectionQueryServicesImpl) services)
+ .acquireUpgradeMutex(MetaDataProtocol.MIN_SYSTEM_TABLE_MIGRATION_TIMESTAMP, mutexRowKey));
+ } else {
+ ((ConnectionQueryServicesImpl) services).releaseUpgradeMutex(mutexRowKey);
+ }
+ }
+ }
+
+ private void enableNamespacesOnServer(Configuration conf) {
+ conf.set(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, Boolean.TRUE.toString());
+ }
+
+ private Properties getClientPropertiesWithSystemMappingEnabled() {
+ Properties clientProps = new Properties();
+ clientProps.setProperty(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, Boolean.TRUE.toString());
+ clientProps.setProperty(QueryServices.IS_SYSTEM_TABLE_MAPPED_TO_NAMESPACE, Boolean.TRUE.toString());
+ return clientProps;
+ }
+
+ private Properties getClientPropertiesWithSystemMappingDisabled() {
+ Properties clientProps = new Properties();
+ clientProps.setProperty(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, Boolean.TRUE.toString());
+ clientProps.setProperty(QueryServices.IS_SYSTEM_TABLE_MAPPED_TO_NAMESPACE, Boolean.FALSE.toString());
+ return clientProps;
+ }
+
+ private Set<String> getHBaseTables() throws IOException {
+ Set<String> tables = new HashSet<>();
+ for (TableName tn : testUtil.getHBaseAdmin().listTableNames()) {
+ tables.add(tn.getNameAsString());
+ }
+ return tables;
+ }
+
+ private void createConnection(Properties clientProps) throws SQLException, IOException {
+ try (Connection conn = DriverManager.getConnection(getJdbcUrl(), clientProps);
+ Statement stmt = conn.createStatement();) {
+ verifySyscatData(clientProps, conn.toString(), stmt);
+ }
+ }
+
+ private void createTable(Properties clientProps) throws SQLException {
+ try (Connection conn = DriverManager.getConnection(getJdbcUrl(), clientProps);
+ Statement stmt = conn.createStatement();) {
+ assertFalse(stmt.execute("DROP TABLE IF EXISTS " + TABLE_NAME));
+ stmt.execute("CREATE SCHEMA " + SCHEMA_NAME);
+ assertFalse(stmt.execute("CREATE TABLE " + TABLE_NAME
+ + "(pk INTEGER not null primary key, data VARCHAR)"));
+ try (PreparedStatement pstmt = conn.prepareStatement("UPSERT INTO "
+ + TABLE_NAME + " values(?, ?)")) {
+ for (int i = 0; i < NUM_RECORDS; i++) {
+ pstmt.setInt(1, i);
+ pstmt.setString(2, Integer.toString(i));
+ assertEquals(1, pstmt.executeUpdate());
+ }
+ }
+ conn.commit();
+ }
+ }
+
+ private void readTable(Properties clientProps) throws SQLException {
+ try (Connection conn = DriverManager.getConnection(getJdbcUrl(), clientProps);
+ Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("SELECT pk, data FROM " + TABLE_NAME);
+ assertNotNull(rs);
+ int i = 0;
+ while (rs.next()) {
+ assertEquals(i, rs.getInt(1));
+ assertEquals(Integer.toString(i), rs.getString(2));
+ i++;
+ }
+ assertEquals(NUM_RECORDS, i);
+ }
+ }
+
+ private void verifySyscatData(Properties clientProps, String connName, Statement stmt) throws SQLException {
+ ResultSet rs = stmt.executeQuery("SELECT * FROM SYSTEM.CATALOG");
+
+ ReadOnlyProps props = new ReadOnlyProps((Map)clientProps);
+ boolean systemTablesMapped = SchemaUtil.isNamespaceMappingEnabled(PTableType.SYSTEM, props);
+ boolean systemSchemaExists = false;
+ Set<String> namespaceMappedSystemTablesSet = new HashSet<>(PHOENIX_NAMESPACE_MAPPED_SYSTEM_TABLES);
+ Set<String> systemTablesSet = new HashSet<>(PHOENIX_SYSTEM_TABLES);
+
+ while(rs.next()) {
+
+ if(rs.getString("IS_NAMESPACE_MAPPED") == null) {
+ systemSchemaExists = rs.getString("TABLE_SCHEM").equals(PhoenixDatabaseMetaData.SYSTEM_SCHEMA_NAME) ? true : systemSchemaExists;
+ } else if (rs.getString("COLUMN_NAME") == null) {
+ String schemaName = rs.getString("TABLE_SCHEM");
+ String tableName = rs.getString("TABLE_NAME");
+
+ if(schemaName.equals(PhoenixDatabaseMetaData.SYSTEM_SCHEMA_NAME)) {
+ if (systemTablesMapped) {
+ namespaceMappedSystemTablesSet.remove(String.valueOf
+ (TableName.valueOf(schemaName + QueryConstants.NAMESPACE_SEPARATOR + tableName)));
+ assertTrue(rs.getString("IS_NAMESPACE_MAPPED").equals(Boolean.TRUE.toString()));
+ } else {
+ systemTablesSet.remove(String.valueOf
+ (TableName.valueOf(schemaName + QueryConstants.NAME_SEPARATOR + tableName)));
+ assertTrue(rs.getString("IS_NAMESPACE_MAPPED").equals(Boolean.FALSE.toString()));
+ }
+ }
+ }
+ }
+
+ if(!systemSchemaExists) {
+ fail(PhoenixDatabaseMetaData.SYSTEM_SCHEMA_NAME + " entry doesn't exist in SYSTEM.CATALOG table.");
+ }
+
+ // The set will contain SYSMUTEX table since that table is not exposed in SYSCAT
+ if (systemTablesMapped) {
+ assertTrue(namespaceMappedSystemTablesSet.size() == 1);
+ } else {
+ assertTrue(systemTablesSet.size() == 1);
+ }
+ }
+
+ private String getJdbcUrl() {
+ return "jdbc:phoenix:localhost:" + testUtil.getZkCluster().getClientPort() + ":/hbase";
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/6c527c1b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemTablePermissionsIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemTablePermissionsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemTablePermissionsIT.java
index 166b135..49202a4 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemTablePermissionsIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemTablePermissionsIT.java
@@ -57,10 +57,9 @@ public class SystemTablePermissionsIT {
private static final Set<String> PHOENIX_SYSTEM_TABLES = new HashSet<>(Arrays.asList(
"SYSTEM.CATALOG", "SYSTEM.SEQUENCE", "SYSTEM.STATS", "SYSTEM.FUNCTION",
"SYSTEM.MUTEX"));
- // PHOENIX-XXXX SYSTEM.MUTEX isn't being created in the SYSTEM namespace as it should be.
private static final Set<String> PHOENIX_NAMESPACE_MAPPED_SYSTEM_TABLES = new HashSet<>(
Arrays.asList("SYSTEM:CATALOG", "SYSTEM:SEQUENCE", "SYSTEM:STATS", "SYSTEM:FUNCTION",
- "SYSTEM.MUTEX"));
+ "SYSTEM:MUTEX"));
private static final String TABLE_NAME =
SystemTablePermissionsIT.class.getSimpleName().toUpperCase();
http://git-wip-us.apache.org/repos/asf/phoenix/blob/6c527c1b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
index 09abde4..655068d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
@@ -68,6 +68,8 @@ public abstract class MetaDataProtocol extends MetaDataService {
VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, PHOENIX_MINOR_VERSION, PHOENIX_PATCH_NUMBER);
public static final long MIN_TABLE_TIMESTAMP = 0;
+ public static final long MIN_SYSTEM_TABLE_MIGRATION_TIMESTAMP = 0;
+ public static final String MIGRATION_IN_PROGRESS = "MigrationInProgress";
public static final int DEFAULT_MAX_META_DATA_VERSIONS = 1000;
public static final boolean DEFAULT_META_DATA_KEEP_DELETED_CELLS = true;
@@ -95,6 +97,7 @@ public abstract class MetaDataProtocol extends MetaDataService {
// Key is the SYSTEM.CATALOG timestamp for the version and value is the version string.
private static final NavigableMap<Long, String> TIMESTAMP_VERSION_MAP = new TreeMap<>();
static {
+ TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_MIGRATION_TIMESTAMP, MIGRATION_IN_PROGRESS);
TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_4_1_0, "4.1.x");
TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_4_2_0, "4.2.0");
TIMESTAMP_VERSION_MAP.put(MIN_SYSTEM_TABLE_TIMESTAMP_4_2_1, "4.2.1");
http://git-wip-us.apache.org/repos/asf/phoenix/blob/6c527c1b/phoenix-core/src/main/java/org/apache/phoenix/exception/UpgradeInProgressException.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/exception/UpgradeInProgressException.java b/phoenix-core/src/main/java/org/apache/phoenix/exception/UpgradeInProgressException.java
index 08ae304..9c9f2a8 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/exception/UpgradeInProgressException.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/exception/UpgradeInProgressException.java
@@ -18,10 +18,14 @@
package org.apache.phoenix.exception;
+import org.apache.phoenix.coprocessor.MetaDataProtocol;
+
public class UpgradeInProgressException extends RetriableUpgradeException {
public UpgradeInProgressException(String upgradeFrom, String upgradeTo) {
- super("Cluster is being concurrently upgraded from " + upgradeFrom + " to " + upgradeTo
+ super((upgradeFrom.equals(MetaDataProtocol.MIGRATION_IN_PROGRESS) ?
+ "System Tables are concurrently being migrated to system namespace" :
+ "Cluster is being concurrently upgraded from " + upgradeFrom + " to " + upgradeTo)
+ ". Please retry establishing connection.", SQLExceptionCode.CONCURRENT_UPGRADE_IN_PROGRESS
.getSQLState(), SQLExceptionCode.CONCURRENT_UPGRADE_IN_PROGRESS.getErrorCode());
}
-}
\ No newline at end of file
+}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/6c527c1b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 6ddcc7e..4868551 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -190,7 +190,6 @@ import org.apache.phoenix.schema.EmptySequenceCacheException;
import org.apache.phoenix.schema.FunctionNotFoundException;
import org.apache.phoenix.schema.MetaDataClient;
import org.apache.phoenix.schema.MetaDataSplitPolicy;
-import org.apache.phoenix.schema.NewerSchemaAlreadyExistsException;
import org.apache.phoenix.schema.NewerTableAlreadyExistsException;
import org.apache.phoenix.schema.PColumn;
import org.apache.phoenix.schema.PColumnFamily;
@@ -334,7 +333,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
return hbaseVersion >= PhoenixDatabaseMetaData.MIN_RENEW_LEASE_VERSION;
}
});
-
+
private PMetaData newEmptyMetaData() {
return new PSynchronizedMetaData(new PMetaDataImpl(INITIAL_META_DATA_TABLE_CAPACITY, getProps()));
}
@@ -821,7 +820,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
return false;
}
-
+
private void addCoprocessors(byte[] tableName, HTableDescriptor descriptor, PTableType tableType, Map<String,Object> tableProps) throws SQLException {
// The phoenix jar must be available on HBase classpath
int priority = props.getInt(QueryServices.COPROCESSOR_PRIORITY_ATTRIB, QueryServicesOptions.DEFAULT_COPROCESSOR_PRIORITY);
@@ -1616,7 +1615,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
private void dropTable(byte[] tableNameToDelete) throws SQLException {
dropTables(Collections.<byte[]>singletonList(tableNameToDelete));
}
-
+
private void dropTables(final List<byte[]> tableNamesToDelete) throws SQLException {
SQLException sqlE = null;
try (HBaseAdmin admin = getAdmin()) {
@@ -2393,26 +2392,31 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
//check if the server is already updated and have namespace config properly set.
checkClientServerCompatibility(SYSTEM_CATALOG_NAME_BYTES);
}
- ensureSystemTablesUpgraded(ConnectionQueryServicesImpl.this.getProps());
- } else if (mappedSystemCatalogExists) { throw new SQLExceptionInfo.Builder(
- SQLExceptionCode.INCONSISTENT_NAMESPACE_MAPPING_PROPERTIES)
- .setMessage("Cannot initiate connection as "
- + SchemaUtil.getPhysicalTableName(
- SYSTEM_CATALOG_NAME_BYTES, true)
- + " is found but client does not have "
- + IS_NAMESPACE_MAPPING_ENABLED + " enabled")
- .build().buildException(); }
- createSysMutexTable(admin);
+
+ // If SYSTEM tables exist, they are migrated to HBase SYSTEM namespace
+ // If they don't exist, this method will create HBase SYSTEM namespace and return
+ ensureSystemTablesMigratedToSystemNamespace(ConnectionQueryServicesImpl.this.getProps());
+ } else if (mappedSystemCatalogExists) {
+ throw new SQLExceptionInfo.Builder(
+ SQLExceptionCode.INCONSISTENT_NAMESPACE_MAPPING_PROPERTIES)
+ .setMessage("Cannot initiate connection as "
+ + SchemaUtil.getPhysicalTableName(
+ SYSTEM_CATALOG_NAME_BYTES, true)
+ + " is found but client does not have "
+ + IS_NAMESPACE_MAPPING_ENABLED + " enabled")
+ .build().buildException();
+ }
}
Properties scnProps = PropertiesUtil.deepCopy(props);
scnProps.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB,
Long.toString(MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP));
scnProps.remove(PhoenixRuntime.TENANT_ID_ATTRIB);
String globalUrl = JDBCUtil.removeProperty(url, PhoenixRuntime.TENANT_ID_ATTRIB);
- try (PhoenixConnection metaConnection = new PhoenixConnection(ConnectionQueryServicesImpl.this, globalUrl,
- scnProps, newEmptyMetaData())) {
+ try (HBaseAdmin hBaseAdmin = getAdmin();
+ PhoenixConnection metaConnection = new PhoenixConnection(ConnectionQueryServicesImpl.this, globalUrl,
+ scnProps, newEmptyMetaData())) {
try {
- metaConnection.setRunningUpgrade(true);
+ metaConnection.setRunningUpgrade(true);
metaConnection.createStatement().executeUpdate(QueryConstants.CREATE_TABLE_METADATA);
} catch (NewerTableAlreadyExistsException ignore) {
// Ignore, as this will happen if the SYSTEM.CATALOG already exists at this fixed
@@ -2434,8 +2438,17 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
}
return null;
}
+
+ // HBase Namespace SYSTEM is created by {@link ensureSystemTablesMigratedToSystemNamespace(ReadOnlyProps)} method
+ // This statement will create its entry in SYSCAT table, so that GRANT/REVOKE commands can work
+ // with SYSTEM Namespace. (See PHOENIX-4227 https://issues.apache.org/jira/browse/PHOENIX-4227)
+ if (SchemaUtil.isNamespaceMappingEnabled(PTableType.SYSTEM,
+ ConnectionQueryServicesImpl.this.getProps())) {
+ metaConnection.createStatement().execute("CREATE SCHEMA IF NOT EXISTS "
+ + PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA);
+ }
if (!ConnectionQueryServicesImpl.this.upgradeRequired.get()) {
- createOtherSystemTables(metaConnection);
+ createOtherSystemTables(metaConnection, hBaseAdmin);
} else if (isAutoUpgradeEnabled && !isDoNotUpgradePropSet) {
upgradeSystemTables(url, props);
}
@@ -2448,7 +2461,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
throw e;
} catch (Exception e) {
if (e instanceof SQLException) {
- initializationException = (SQLException)e;
+ initializationException = (SQLException) e;
} else {
// wrap every other exception into a SQLException
initializationException = new SQLException(e);
@@ -2467,13 +2480,15 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
}
} finally {
try {
- if (initializationException != null) { throw initializationException; }
+ if (initializationException != null) {
+ throw initializationException;
+ }
} finally {
initialized = true;
}
}
}
- }
+ }
return null;
}
});
@@ -2482,11 +2497,11 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
Throwables.propagate(e);
}
}
-
- private void createSysMutexTable(HBaseAdmin admin) throws IOException, SQLException {
+
+ void createSysMutexTable(HBaseAdmin admin, ReadOnlyProps props) throws IOException, SQLException {
try {
- final TableName mutexTableName = TableName.valueOf(
- PhoenixDatabaseMetaData.SYSTEM_MUTEX_NAME_BYTES);
+ final TableName mutexTableName = SchemaUtil.getPhysicalTableName(
+ PhoenixDatabaseMetaData.SYSTEM_MUTEX_NAME, props);
List<TableName> systemTables = getSystemTableNames(admin);
if (systemTables.contains(mutexTableName)) {
logger.debug("System mutex table already appears to exist, not creating it");
@@ -2498,7 +2513,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
columnDesc.setTimeToLive(TTL_FOR_MUTEX); // Let mutex expire after some time
tableDesc.addFamily(columnDesc);
admin.createTable(tableDesc);
- try (HTableInterface sysMutexTable = getTable(PhoenixDatabaseMetaData.SYSTEM_MUTEX_NAME_BYTES)) {
+ try (HTableInterface sysMutexTable = getTable(mutexTableName.getName())) {
byte[] mutexRowKey = SchemaUtil.getTableKey(null, PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA,
PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE);
Put put = new Put(mutexRowKey);
@@ -2514,7 +2529,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
return Lists.newArrayList(admin.listTableNames(QueryConstants.SYSTEM_SCHEMA_NAME + "\\..*"));
}
- private void createOtherSystemTables(PhoenixConnection metaConnection) throws SQLException {
+ private void createOtherSystemTables(PhoenixConnection metaConnection, HBaseAdmin hbaseAdmin) throws SQLException, IOException {
try {
metaConnection.createStatement().execute(QueryConstants.CREATE_SEQUENCE_METADATA);
} catch (TableAlreadyExistsException e) {
@@ -2526,8 +2541,16 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
try {
metaConnection.createStatement().execute(QueryConstants.CREATE_FUNCTION_METADATA);
} catch (TableAlreadyExistsException ignore) {}
+
+ // Catch the IOException to log the error message and then bubble it up for the client to retry.
+ try {
+ createSysMutexTable(hbaseAdmin, ConnectionQueryServicesImpl.this.getProps());
+ } catch (IOException exception) {
+ logger.error("Failed to created SYSMUTEX table. Upgrade or migration is not possible without it. Please retry.");
+ throw exception;
+ }
}
-
+
/**
* There is no other locking needed here since only one connection (on the same or different JVM) will be able to
* acquire the upgrade mutex via {@link #acquireUpgradeMutex(long, byte[])}.
@@ -2833,7 +2856,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
metaConnection.createStatement().executeUpdate(
QueryConstants.CREATE_STATS_TABLE_METADATA);
} catch (NewerTableAlreadyExistsException ignore) {
-
+
} catch (TableAlreadyExistsException e) {
long currentServerSideTableTimeStamp = e.getTable().getTimeStamp();
if (currentServerSideTableTimeStamp < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_3_0) {
@@ -2861,14 +2884,6 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
try {
metaConnection.createStatement().executeUpdate(QueryConstants.CREATE_FUNCTION_METADATA);
} catch (NewerTableAlreadyExistsException e) {} catch (TableAlreadyExistsException e) {}
- if (SchemaUtil.isNamespaceMappingEnabled(PTableType.SYSTEM,
- ConnectionQueryServicesImpl.this.getProps())) {
- try {
- metaConnection.createStatement().executeUpdate(
- "CREATE SCHEMA IF NOT EXISTS "
- + PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA);
- } catch (NewerSchemaAlreadyExistsException e) {}
- }
ConnectionQueryServicesImpl.this.upgradeRequired.set(false);
success = true;
} catch (UpgradeInProgressException | UpgradeNotRequiredException e) {
@@ -2905,14 +2920,18 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
}
} finally {
if (acquiredMutexLock) {
- releaseUpgradeMutex(mutexRowKey);
+ try {
+ releaseUpgradeMutex(mutexRowKey);
+ } catch (IOException e) {
+ logger.warn("Release of upgrade mutex failed ", e);
+ }
}
}
if (toThrow != null) { throw toThrow; }
}
}
}
-
+
// Special method for adding the column qualifier column for 4.10.
private PhoenixConnection addColumnQualifierColumn(PhoenixConnection oldMetaConnection, Long timestamp) throws SQLException {
Properties props = PropertiesUtil.deepCopy(oldMetaConnection.getClientInfo());
@@ -2935,7 +2954,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
metaConnection.rollback();
PColumn column = new PColumnImpl(PNameFactory.newName("COLUMN_QUALIFIER"),
PNameFactory.newName(DEFAULT_COLUMN_FAMILY_NAME), PVarbinary.INSTANCE, null, null, true, numColumns,
- SortOrder.ASC, null, null, false, null, false, false,
+ SortOrder.ASC, null, null, false, null, false, false,
Bytes.toBytes("COLUMN_QUALIFIER"));
String upsertColumnMetadata = "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\"( " +
TENANT_ID + "," +
@@ -3086,12 +3105,18 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
}
}
- void ensureSystemTablesUpgraded(ReadOnlyProps props)
+ void ensureSystemTablesMigratedToSystemNamespace(ReadOnlyProps props)
throws SQLException, IOException, IllegalArgumentException, InterruptedException {
if (!SchemaUtil.isNamespaceMappingEnabled(PTableType.SYSTEM, props)) { return; }
+
+ boolean acquiredMutexLock = false;
+ byte[] mutexRowKey = SchemaUtil.getTableKey(null, PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA,
+ PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE);
+
HTableInterface metatable = null;
try (HBaseAdmin admin = getAdmin()) {
- // Namespace-mapping is enabled at this point.
+ // SYSTEM namespace needs to be created via HBase API's because "CREATE SCHEMA" statement tries to write its metadata
+ // in SYSTEM:CATALOG table. Without SYSTEM namespace, SYSTEM:CATALOG table cannot be created.
try {
ensureNamespaceCreated(QueryConstants.SYSTEM_SCHEMA_NAME);
} catch (PhoenixIOException e) {
@@ -3101,7 +3126,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
// Regardless of the case 1 or 2, if the NS does not exist, we will error expectedly
// below. If the NS does exist and is mapped, the below check will exit gracefully.
}
-
+
List<TableName> tableNames = getSystemTableNames(admin);
// No tables exist matching "SYSTEM\..*", they are all already in "SYSTEM:.*"
if (tableNames.size() == 0) { return; }
@@ -3109,41 +3134,64 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
if (tableNames.size() > 5) {
logger.warn("Expected 5 system tables but found " + tableNames.size() + ":" + tableNames);
}
+
+ // Try acquiring a lock in SYSMUTEX table before migrating the tables since it involves disabling the table
+ // If we cannot acquire lock, it means some old client is either migrating SYSCAT or trying to upgrade the
+ // schema of SYSCAT table and hence it should not be interrupted
+ acquiredMutexLock = acquireUpgradeMutex(MetaDataProtocol.MIN_SYSTEM_TABLE_MIGRATION_TIMESTAMP, mutexRowKey);
+ if(acquiredMutexLock) {
+ logger.debug("Acquired lock in SYSMUTEX table for migrating SYSTEM tables to SYSTEM namespace");
+ }
+ // We will not reach here if we fail to acquire the lock, since it throws UpgradeInProgressException
+
+ // Handle the upgrade of SYSMUTEX table separately since it doesn't have any entries in SYSCAT
+ logger.info("Migrating SYSTEM.MUTEX table to SYSTEM namespace.");
+ String sysMutexSrcTableName = PhoenixDatabaseMetaData.SYSTEM_MUTEX_NAME;
+ String sysMutexDestTableName = SchemaUtil.getPhysicalName(sysMutexSrcTableName.getBytes(), props).getNameAsString();
+ UpgradeUtil.mapTableToNamespace(admin, sysMutexSrcTableName, sysMutexDestTableName, PTableType.SYSTEM);
+ tableNames.remove(PhoenixDatabaseMetaData.SYSTEM_MUTEX_HBASE_TABLE_NAME);
+
byte[] mappedSystemTable = SchemaUtil
.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, props).getName();
metatable = getTable(mappedSystemTable);
if (tableNames.contains(PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME)) {
if (!admin.tableExists(mappedSystemTable)) {
+ logger.info("Migrating SYSTEM.CATALOG table to SYSTEM namespace.");
+ // Actual migration of SYSCAT table
UpgradeUtil.mapTableToNamespace(admin, metatable,
PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME, props, null, PTableType.SYSTEM,
null);
+ // Invalidate the client-side metadataCache
ConnectionQueryServicesImpl.this.removeTable(null,
PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME, null,
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_1_0);
}
tableNames.remove(PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME);
}
- tableNames.remove(PhoenixDatabaseMetaData.SYSTEM_MUTEX_HBASE_TABLE_NAME);
for (TableName table : tableNames) {
+ logger.info(String.format("Migrating %s table to SYSTEM namespace.", table.getNameAsString()));
UpgradeUtil.mapTableToNamespace(admin, metatable, table.getNameAsString(), props, null, PTableType.SYSTEM,
null);
ConnectionQueryServicesImpl.this.removeTable(null, table.getNameAsString(), null,
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_1_0);
}
- if (!tableNames.isEmpty()) {
- clearCache();
- }
+
+ // Clear the server-side metadataCache when all tables are migrated so that the new PTable can be loaded with NS mapping
+ clearCache();
} finally {
if (metatable != null) {
metatable.close();
}
+ if(acquiredMutexLock) {
+ releaseUpgradeMutex(mutexRowKey);
+ }
}
}
-
+
/**
* Acquire distributed mutex of sorts to make sure only one JVM is able to run the upgrade code by
* making use of HBase's checkAndPut api.
- *
+ *
* @return true if client won the race, false otherwise
* @throws IOException
* @throws SQLException
@@ -3152,7 +3200,14 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
public boolean acquireUpgradeMutex(long currentServerSideTableTimestamp, byte[] rowToLock) throws IOException,
SQLException {
Preconditions.checkArgument(currentServerSideTableTimestamp < MIN_SYSTEM_TABLE_TIMESTAMP);
- try (HTableInterface sysMutexTable = getTable(PhoenixDatabaseMetaData.SYSTEM_MUTEX_NAME_BYTES)) {
+
+ byte[] sysMutexPhysicalTableNameBytes = getSysMutexPhysicalTableNameBytes();
+ if(sysMutexPhysicalTableNameBytes == null) {
+ throw new UpgradeInProgressException(getVersion(currentServerSideTableTimestamp),
+ getVersion(MIN_SYSTEM_TABLE_TIMESTAMP));
+ }
+
+ try (HTableInterface sysMutexTable = getTable(sysMutexPhysicalTableNameBytes)) {
byte[] family = PhoenixDatabaseMetaData.SYSTEM_MUTEX_FAMILY_NAME_BYTES;
byte[] qualifier = UPGRADE_MUTEX;
byte[] oldValue = UPGRADE_MUTEX_UNLOCKED;
@@ -3177,11 +3232,18 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
return true;
}
}
-
+
@VisibleForTesting
- public boolean releaseUpgradeMutex(byte[] mutexRowKey) {
+ public boolean releaseUpgradeMutex(byte[] mutexRowKey) throws IOException, SQLException {
boolean released = false;
- try (HTableInterface sysMutexTable = getTable(PhoenixDatabaseMetaData.SYSTEM_MUTEX_NAME_BYTES)) {
+
+ byte[] sysMutexPhysicalTableNameBytes = getSysMutexPhysicalTableNameBytes();
+ if(sysMutexPhysicalTableNameBytes == null) {
+ // We shouldn't never be really in this situation where neither SYSMUTEX or SYS:MUTEX exists
+ return true;
+ }
+
+ try (HTableInterface sysMutexTable = getTable(sysMutexPhysicalTableNameBytes)) {
byte[] family = PhoenixDatabaseMetaData.SYSTEM_MUTEX_FAMILY_NAME_BYTES;
byte[] qualifier = UPGRADE_MUTEX;
byte[] expectedValue = UPGRADE_MUTEX_LOCKED;
@@ -3195,6 +3257,19 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
return released;
}
+ private byte[] getSysMutexPhysicalTableNameBytes() throws IOException, SQLException {
+ byte[] sysMutexPhysicalTableNameBytes = null;
+ try(HBaseAdmin admin = getAdmin()) {
+ if(admin.tableExists(PhoenixDatabaseMetaData.SYSTEM_MUTEX_HBASE_TABLE_NAME)) {
+ sysMutexPhysicalTableNameBytes = PhoenixDatabaseMetaData.SYSTEM_MUTEX_NAME_BYTES;
+ } else if (admin.tableExists(TableName.valueOf(
+ SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_MUTEX_NAME, props).getName()))) {
+ sysMutexPhysicalTableNameBytes = SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_MUTEX_NAME, props).getName();
+ }
+ }
+ return sysMutexPhysicalTableNameBytes;
+ }
+
private String addColumn(String columnsToAddSoFar, String columns) {
if (columnsToAddSoFar == null || columnsToAddSoFar.isEmpty()) {
return columns;
@@ -3662,6 +3737,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
SQLException sqlE = null;
HTableInterface htable = this.getTable(SchemaUtil
.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, this.getProps()).getName());
+
try {
htable.coprocessorService(MetaDataService.class, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW,
new Batch.Call<MetaDataService, ClearTableFromCacheResponse>() {
@@ -4041,7 +4117,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
private void waitForRandomDuration() throws InterruptedException {
new CountDownLatch(1).await(random.nextInt(MAX_WAIT_TIME), MILLISECONDS);
}
-
+
private static class InternalRenewLeaseTaskException extends Exception {
public InternalRenewLeaseTaskException(String msg) {
super(msg);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/6c527c1b/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
index c06912d..f5825b4 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
@@ -1735,26 +1735,7 @@ public class UpgradeUtil {
? "For system table " + QueryServices.IS_SYSTEM_TABLE_MAPPED_TO_NAMESPACE
+ " also needs to be enabled along with " + QueryServices.IS_NAMESPACE_MAPPING_ENABLED
: QueryServices.IS_NAMESPACE_MAPPING_ENABLED + " is not enabled"); }
- boolean srcTableExists=admin.tableExists(srcTableName);
- // we need to move physical table in actual namespace for TABLE and Index
- if (srcTableExists && (PTableType.TABLE.equals(pTableType)
- || PTableType.INDEX.equals(pTableType) || PTableType.SYSTEM.equals(pTableType))) {
- boolean destTableExists=admin.tableExists(destTableName);
- if (!destTableExists) {
- String snapshotName = QueryConstants.UPGRADE_TABLE_SNAPSHOT_PREFIX + srcTableName;
- logger.info("Disabling table " + srcTableName + " ..");
- admin.disableTable(srcTableName);
- logger.info(String.format("Taking snapshot %s of table %s..", snapshotName, srcTableName));
- admin.snapshot(snapshotName, srcTableName);
- logger.info(
- String.format("Restoring snapshot %s in destination table %s..", snapshotName, destTableName));
- admin.cloneSnapshot(Bytes.toBytes(snapshotName), Bytes.toBytes(destTableName));
- logger.info(String.format("deleting old table %s..", srcTableName));
- admin.deleteTable(srcTableName);
- logger.info(String.format("deleting snapshot %s..", snapshotName));
- admin.deleteSnapshot(snapshotName);
- }
- }
+ mapTableToNamespace(admin, srcTableName, destTableName, pTableType);
byte[] tableKey = SchemaUtil.getTableKey(tenantId != null ? tenantId.getString() : null,
SchemaUtil.getSchemaNameFromFullName(phoenixTableName),
@@ -1778,6 +1759,29 @@ public class UpgradeUtil {
}
}
+ public static void mapTableToNamespace(HBaseAdmin admin, String srcTableName, String destTableName, PTableType pTableType) throws IOException {
+ boolean srcTableExists=admin.tableExists(srcTableName);
+ // we need to move physical table in actual namespace for TABLE and Index
+ if (srcTableExists && (PTableType.TABLE.equals(pTableType)
+ || PTableType.INDEX.equals(pTableType) || PTableType.SYSTEM.equals(pTableType))) {
+ boolean destTableExists=admin.tableExists(destTableName);
+ if (!destTableExists) {
+ String snapshotName = QueryConstants.UPGRADE_TABLE_SNAPSHOT_PREFIX + srcTableName;
+ logger.info("Disabling table " + srcTableName + " ..");
+ admin.disableTable(srcTableName);
+ logger.info(String.format("Taking snapshot %s of table %s..", snapshotName, srcTableName));
+ admin.snapshot(snapshotName, srcTableName);
+ logger.info(
+ String.format("Restoring snapshot %s in destination table %s..", snapshotName, destTableName));
+ admin.cloneSnapshot(Bytes.toBytes(snapshotName), Bytes.toBytes(destTableName));
+ logger.info(String.format("deleting old table %s..", srcTableName));
+ admin.deleteTable(srcTableName);
+ logger.info(String.format("deleting snapshot %s..", snapshotName));
+ admin.deleteSnapshot(snapshotName);
+ }
+ }
+ }
+
/*
* Method to map existing phoenix table to a namespace. Should not be use if tables has views and indexes ,instead
* use map table utility in psql.py
http://git-wip-us.apache.org/repos/asf/phoenix/blob/6c527c1b/phoenix-core/src/test/java/org/apache/phoenix/query/ConnectionQueryServicesImplTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/ConnectionQueryServicesImplTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/ConnectionQueryServicesImplTest.java
index 73ddd2d..4708ffb 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/query/ConnectionQueryServicesImplTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/query/ConnectionQueryServicesImplTest.java
@@ -20,6 +20,7 @@ import static org.junit.Assert.assertEquals;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyString;
import static org.mockito.Mockito.doCallRealMethod;
+import static org.mockito.Mockito.doNothing;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
@@ -46,7 +47,9 @@ public class ConnectionQueryServicesImplTest {
ConnectionQueryServicesImpl cqs = mock(ConnectionQueryServicesImpl.class);
// Invoke the real methods for these two calls
when(cqs.createSchema(any(List.class), anyString())).thenCallRealMethod();
- doCallRealMethod().when(cqs).ensureSystemTablesUpgraded(any(ReadOnlyProps.class));
+ doCallRealMethod().when(cqs).ensureSystemTablesMigratedToSystemNamespace(any(ReadOnlyProps.class));
+ // Do nothing for this method, just check that it was invoked later
+ doNothing().when(cqs).createSysMutexTable(any(HBaseAdmin.class), any(ReadOnlyProps.class));
// Spoof out this call so that ensureSystemTablesUpgrade() will return-fast.
when(cqs.getSystemTableNames(any(HBaseAdmin.class))).thenReturn(Collections.<TableName> emptyList());
@@ -54,10 +57,10 @@ public class ConnectionQueryServicesImplTest {
// Throw a special exception to check on later
doThrow(PHOENIX_IO_EXCEPTION).when(cqs).ensureNamespaceCreated(anyString());
- // Make sure that ensureSystemTablesUpgraded will try to migrate the system tables.
+ // Make sure that ensureSystemTablesMigratedToSystemNamespace will try to migrate the system tables.
Map<String,String> props = new HashMap<>();
props.put(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, "true");
- cqs.ensureSystemTablesUpgraded(new ReadOnlyProps(props));
+ cqs.ensureSystemTablesMigratedToSystemNamespace(new ReadOnlyProps(props));
// Should be called after upgradeSystemTables()
// Proves that execution proceeded
[24/37] phoenix git commit: PHOENIX-4287 Addendum to correctly set
useStatsForParallelization property
Posted by ja...@apache.org.
PHOENIX-4287 Addendum to correctly set useStatsForParallelization property
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/637b24f3
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/637b24f3
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/637b24f3
Branch: refs/heads/4.x-HBase-1.1
Commit: 637b24f323dd28c008709d8f9eeb97a8eb3c0b7d
Parents: ef39fee
Author: Samarth Jain <sa...@apache.org>
Authored: Wed Nov 1 21:13:40 2017 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Wed Nov 15 10:46:40 2017 -0800
----------------------------------------------------------------------
.../end2end/ExplainPlanWithStatsEnabledIT.java | 87 +++++++++++++++++---
.../coprocessor/MetaDataEndpointImpl.java | 2 +-
.../phoenix/iterate/BaseResultIterators.java | 9 +-
.../apache/phoenix/schema/DelegateTable.java | 2 +-
.../apache/phoenix/schema/MetaDataClient.java | 26 +++---
.../java/org/apache/phoenix/schema/PTable.java | 2 +-
.../org/apache/phoenix/schema/PTableImpl.java | 22 ++---
7 files changed, 110 insertions(+), 40 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/637b24f3/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExplainPlanWithStatsEnabledIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExplainPlanWithStatsEnabledIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExplainPlanWithStatsEnabledIT.java
index 25d4194..b5e4588 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExplainPlanWithStatsEnabledIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExplainPlanWithStatsEnabledIT.java
@@ -17,7 +17,6 @@
*/
package org.apache.phoenix.end2end;
-import static org.apache.phoenix.query.QueryServicesOptions.DEFAULT_USE_STATS_FOR_PARALLELIZATION;
import static org.apache.phoenix.util.PhoenixRuntime.TENANT_ID_ATTRIB;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
@@ -34,6 +33,7 @@ import java.util.List;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixResultSet;
import org.apache.phoenix.query.BaseTest;
import org.apache.phoenix.schema.PTableKey;
import org.apache.phoenix.schema.TableNotFoundException;
@@ -352,7 +352,7 @@ public class ExplainPlanWithStatsEnabledIT extends ParallelStatsEnabledIT {
}
@Test
- public void testSettingUseStatsForQueryPlanProperty() throws Exception {
+ public void testSettingUseStatsForParallelizationProperty() throws Exception {
try (Connection conn = DriverManager.getConnection(getUrl())) {
String table = generateUniqueName();
String ddl =
@@ -360,20 +360,31 @@ public class ExplainPlanWithStatsEnabledIT extends ParallelStatsEnabledIT {
+ " (PK1 INTEGER NOT NULL PRIMARY KEY, KV1 VARCHAR) USE_STATS_FOR_PARALLELIZATION = false";
conn.createStatement().execute(ddl);
assertUseStatsForQueryFlag(table, conn.unwrap(PhoenixConnection.class), false);
+
ddl = "ALTER TABLE " + table + " SET USE_STATS_FOR_PARALLELIZATION = true";
conn.createStatement().execute(ddl);
assertUseStatsForQueryFlag(table, conn.unwrap(PhoenixConnection.class), true);
+
+ table = generateUniqueName();
+ ddl =
+ "CREATE TABLE " + table
+ + " (PK1 INTEGER NOT NULL PRIMARY KEY, KV1 VARCHAR) USE_STATS_FOR_PARALLELIZATION = false";
+ conn.createStatement().execute(ddl);
+ assertUseStatsForQueryFlag(table, conn.unwrap(PhoenixConnection.class), false);
+
table = generateUniqueName();
ddl = "CREATE TABLE " + table + " (PK1 INTEGER NOT NULL PRIMARY KEY, KV1 VARCHAR)";
conn.createStatement().execute(ddl);
- assertUseStatsForQueryFlag(table, conn.unwrap(PhoenixConnection.class),
- DEFAULT_USE_STATS_FOR_PARALLELIZATION);
+
+ // because we didn't set the property, PTable.useStatsForParallelization() should return
+ // null
+ assertUseStatsForQueryFlag(table, conn.unwrap(PhoenixConnection.class), null);
}
}
private static void assertUseStatsForQueryFlag(String tableName, PhoenixConnection conn,
- boolean flag) throws TableNotFoundException, SQLException {
- assertEquals(flag,
+ Boolean expected) throws TableNotFoundException, SQLException {
+ assertEquals(expected,
conn.unwrap(PhoenixConnection.class).getMetaDataCache()
.getTableRef(new PTableKey(null, tableName)).getTable()
.useStatsForParallelization());
@@ -383,7 +394,12 @@ public class ExplainPlanWithStatsEnabledIT extends ParallelStatsEnabledIT {
stmt.setString(1, tableName);
ResultSet rs = stmt.executeQuery();
rs.next();
- assertEquals(flag, rs.getBoolean(1));
+ boolean b = rs.getBoolean(1);
+ if (expected == null) {
+ assertTrue(rs.wasNull());
+ } else {
+ assertEquals(expected, b);
+ }
}
@Test
@@ -510,8 +526,7 @@ public class ExplainPlanWithStatsEnabledIT extends ParallelStatsEnabledIT {
int guidePostWidth = 20;
String ddl =
"CREATE TABLE " + tableName + " (k INTEGER PRIMARY KEY, a bigint, b bigint)"
- + " GUIDE_POSTS_WIDTH=" + guidePostWidth
- + ", USE_STATS_FOR_PARALLELIZATION=false";
+ + " GUIDE_POSTS_WIDTH=" + guidePostWidth;
byte[][] splits =
new byte[][] { Bytes.toBytes(102), Bytes.toBytes(105), Bytes.toBytes(108) };
BaseTest.createTestTable(getUrl(), ddl, splits, null);
@@ -531,18 +546,70 @@ public class ExplainPlanWithStatsEnabledIT extends ParallelStatsEnabledIT {
List<Object> binds = Lists.newArrayList();
try (Connection conn = DriverManager.getConnection(getUrl())) {
String sql = "SELECT COUNT(*) " + " FROM " + tableName;
+ // We don't have the use stats for parallelization property
+ // set on the table. In this case, we end up defaulting to the
+ // value set in config which is true.
ResultSet rs = conn.createStatement().executeQuery(sql);
+ // stats are being used for parallelization. So number of scans is higher.
+ assertEquals(14, rs.unwrap(PhoenixResultSet.class).getStatement().getQueryPlan()
+ .getScans().get(0).size());
assertTrue(rs.next());
assertEquals(10, rs.getInt(1));
Estimate info = getByteRowEstimates(conn, sql, binds);
assertEquals((Long) 10l, info.getEstimatedRows());
assertTrue(info.getEstimateInfoTs() > 0);
+
+ // Now, let's disable USE_STATS_FOR_PARALLELIZATION on the table
+ conn.createStatement().execute("ALTER TABLE " + tableName + " SET USE_STATS_FOR_PARALLELIZATION = " + false);
+ rs = conn.createStatement().executeQuery(sql);
+ // stats are not being used for parallelization. So number of scans is lower.
+ assertEquals(4, rs.unwrap(PhoenixResultSet.class).getStatement().getQueryPlan()
+ .getScans().get(0).size());
+ assertTrue(rs.next());
+ assertEquals(10, rs.getInt(1));
+ info = getByteRowEstimates(conn, sql, binds);
+ assertEquals((Long) 10l, info.getEstimatedRows());
+ assertTrue(info.getEstimateInfoTs() > 0);
+
+ // assert that the aggregate query on view also works correctly
+ String viewName = "V_" + generateUniqueName();
+ conn.createStatement()
+ .execute("CREATE VIEW " + viewName + " AS SELECT * FROM " + tableName + " USE_STATS_FOR_PARALLELIZATION = false");
+ sql = "SELECT COUNT(*) FROM " + viewName;
+ rs = conn.createStatement().executeQuery(sql);
+ // stats are not being used for parallelization. So number of scans is lower.
+ assertEquals(4, rs.unwrap(PhoenixResultSet.class).getStatement().getQueryPlan()
+ .getScans().get(0).size());
+ assertTrue(rs.next());
+ assertEquals(10, rs.getInt(1));
+ info = getByteRowEstimates(conn, sql, binds);
+ assertEquals((Long) 10l, info.getEstimatedRows());
+ assertTrue(info.getEstimateInfoTs() > 0);
// Now let's make sure that when using stats for parallelization, our estimates
- // and query results stay the same
+ // and query results stay the same for view and base table
conn.createStatement().execute(
"ALTER TABLE " + tableName + " SET USE_STATS_FOR_PARALLELIZATION=true");
+ sql = "SELECT COUNT(*) FROM " + tableName;
+ // query the table
+ rs = conn.createStatement().executeQuery(sql);
+ // stats are being used for parallelization. So number of scans is higher.
+ assertEquals(14, rs.unwrap(PhoenixResultSet.class).getStatement().getQueryPlan()
+ .getScans().get(0).size());
+ assertTrue(rs.next());
+ assertEquals(10, rs.getInt(1));
+ info = getByteRowEstimates(conn, sql, binds);
+ assertEquals((Long) 10l, info.getEstimatedRows());
+ assertTrue(info.getEstimateInfoTs() > 0);
+
+ conn.createStatement().execute(
+ "ALTER TABLE " + viewName + " SET USE_STATS_FOR_PARALLELIZATION=true");
+ sql = "SELECT COUNT(*) FROM " + viewName;
+ // query the view
rs = conn.createStatement().executeQuery(sql);
+ // stats are not being used for parallelization. So number of scans is higher.
+ assertEquals(14, rs.unwrap(PhoenixResultSet.class).getStatement().getQueryPlan()
+ .getScans().get(0).size());
assertTrue(rs.next());
assertEquals(10, rs.getInt(1));
info = getByteRowEstimates(conn, sql, binds);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/637b24f3/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index 43c885a..5c16e5b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -960,7 +960,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
.fromSerializedValue((byte)PTinyint.INSTANCE.toObject(encodingSchemeKv.getValueArray(),
encodingSchemeKv.getValueOffset(), encodingSchemeKv.getValueLength()));
Cell useStatsForParallelizationKv = tableKeyValues[USE_STATS_FOR_PARALLELIZATION_INDEX];
- boolean useStatsForParallelization = useStatsForParallelizationKv == null ? true : Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(useStatsForParallelizationKv.getValueArray(), useStatsForParallelizationKv.getValueOffset(), useStatsForParallelizationKv.getValueLength()));
+ Boolean useStatsForParallelization = useStatsForParallelizationKv == null ? null : Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(useStatsForParallelizationKv.getValueArray(), useStatsForParallelizationKv.getValueOffset(), useStatsForParallelizationKv.getValueLength()));
List<PColumn> columns = Lists.newArrayListWithExpectedSize(columnCount);
List<PTable> indexes = Lists.newArrayList();
http://git-wip-us.apache.org/repos/asf/phoenix/blob/637b24f3/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
index e9deec3..b4c9698 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
@@ -35,7 +35,6 @@ import java.io.ByteArrayInputStream;
import java.io.DataInput;
import java.io.DataInputStream;
import java.io.EOFException;
-import java.io.IOException;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.BitSet;
@@ -152,7 +151,7 @@ public abstract class BaseResultIterators extends ExplainTable implements Result
private Long estimateInfoTimestamp;
private boolean hasGuidePosts;
private Scan scan;
- private boolean useStatsForParallelization;
+ private final boolean useStatsForParallelization;
protected Map<ImmutableBytesPtr,ServerCache> caches;
static final Function<HRegionLocation, KeyRange> TO_KEY_RANGE = new Function<HRegionLocation, KeyRange>() {
@@ -492,7 +491,11 @@ public abstract class BaseResultIterators extends ExplainTable implements Result
scanId = new UUID(ThreadLocalRandom.current().nextLong(), ThreadLocalRandom.current().nextLong()).toString();
initializeScan(plan, perScanLimit, offset, scan);
- this.useStatsForParallelization = table.useStatsForParallelization();
+ this.useStatsForParallelization =
+ table.useStatsForParallelization() == null
+ ? context.getConnection().getQueryServices().getConfiguration().getBoolean(
+ USE_STATS_FOR_PARALLELIZATION, DEFAULT_USE_STATS_FOR_PARALLELIZATION)
+ : table.useStatsForParallelization();
this.scans = getParallelScans();
List<KeyRange> splitRanges = Lists.newArrayListWithExpectedSize(scans.size() * ESTIMATED_GUIDEPOSTS_PER_REGION);
for (List<Scan> scanList : scans) {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/637b24f3/phoenix-core/src/main/java/org/apache/phoenix/schema/DelegateTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/DelegateTable.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/DelegateTable.java
index 3da27a7..8f15c5e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/DelegateTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/DelegateTable.java
@@ -312,7 +312,7 @@ public class DelegateTable implements PTable {
}
@Override
- public boolean useStatsForParallelization() {
+ public Boolean useStatsForParallelization() {
return delegate.useStatsForParallelization();
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/637b24f3/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index 701633b..7ce2167 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -2002,15 +2002,8 @@ public class MetaDataClient {
}
}
- boolean useStatsForParallelization =
- connection.getQueryServices().getProps().getBoolean(
- QueryServices.USE_STATS_FOR_PARALLELIZATION,
- QueryServicesOptions.DEFAULT_USE_STATS_FOR_PARALLELIZATION);
Boolean useStatsForParallelizationProp =
(Boolean) TableProperty.USE_STATS_FOR_PARALLELIZATION.getValue(tableProps);
- if (useStatsForParallelizationProp != null) {
- useStatsForParallelization = useStatsForParallelizationProp;
- }
boolean sharedTable = statement.getTableType() == PTableType.VIEW || allocateIndexId;
if (transactional) {
@@ -2606,7 +2599,11 @@ public class MetaDataClient {
}
tableUpsert.setByte(26, immutableStorageScheme.getSerializedMetadataValue());
tableUpsert.setByte(27, encodingScheme.getSerializedMetadataValue());
- tableUpsert.setBoolean(28, useStatsForParallelization);
+ if (useStatsForParallelizationProp == null) {
+ tableUpsert.setNull(28, Types.BOOLEAN);
+ } else {
+ tableUpsert.setBoolean(28, useStatsForParallelizationProp);
+ }
tableUpsert.execute();
if (asyncCreatedDate != null) {
@@ -2711,7 +2708,7 @@ public class MetaDataClient {
PTable.INITIAL_SEQ_NUM, pkName == null ? null : PNameFactory.newName(pkName), saltBucketNum, columns.values(),
parent == null ? null : parent.getSchemaName(), parent == null ? null : parent.getTableName(), Collections.<PTable>emptyList(), isImmutableRows,
physicalNames, defaultFamilyName == null ? null : PNameFactory.newName(defaultFamilyName), viewStatement, Boolean.TRUE.equals(disableWAL), multiTenant, storeNulls, viewType,
- result.getViewIndexId(), indexType, rowKeyOrderOptimizable, transactional, updateCacheFrequency, 0L, isNamespaceMapped, autoPartitionSeq, isAppendOnlySchema, immutableStorageScheme, encodingScheme, cqCounterToBe, useStatsForParallelization);
+ result.getViewIndexId(), indexType, rowKeyOrderOptimizable, transactional, updateCacheFrequency, 0L, isNamespaceMapped, autoPartitionSeq, isAppendOnlySchema, immutableStorageScheme, encodingScheme, cqCounterToBe, useStatsForParallelizationProp);
result = new MetaDataMutationResult(code, result.getMutationTime(), table, true);
addTableToCache(result);
return table;
@@ -3304,11 +3301,12 @@ public class MetaDataClient {
}
}
Boolean useStatsForParallelization = null;
- if (useStatsForParallelizationProp != null) {
- if (useStatsForParallelizationProp.booleanValue() != table.useStatsForParallelization()) {
- useStatsForParallelization = useStatsForParallelizationProp;
- changingPhoenixTableProperty = true;
- }
+ if (useStatsForParallelizationProp != null
+ && (table.useStatsForParallelization() == null
+ || (useStatsForParallelizationProp.booleanValue() != table
+ .useStatsForParallelization()))) {
+ useStatsForParallelization = useStatsForParallelizationProp;
+ changingPhoenixTableProperty = true;
}
Boolean isTransactional = null;
if (isTransactionalProp != null) {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/637b24f3/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java
index d59e785..ec931b7 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java
@@ -705,7 +705,7 @@ public interface PTable extends PMetaDataEntity {
ImmutableStorageScheme getImmutableStorageScheme();
QualifierEncodingScheme getEncodingScheme();
EncodedCQCounter getEncodedCQCounter();
- boolean useStatsForParallelization();
+ Boolean useStatsForParallelization();
/**
* Class to help track encoded column qualifier counters per column family.
http://git-wip-us.apache.org/repos/asf/phoenix/blob/637b24f3/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
index 1cf61a2..9525127 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
@@ -151,7 +151,7 @@ public class PTableImpl implements PTable {
private ImmutableStorageScheme immutableStorageScheme;
private QualifierEncodingScheme qualifierEncodingScheme;
private EncodedCQCounter encodedCQCounter;
- private boolean useStatsForParallelization;
+ private Boolean useStatsForParallelization;
public PTableImpl() {
this.indexes = Collections.emptyList();
@@ -184,7 +184,7 @@ public class PTableImpl implements PTable {
this.isNamespaceMapped = isNamespaceMapped;
}
- public PTableImpl(PName tenantId, String schemaName, String tableName, long timestamp, List<PColumnFamily> families, boolean isNamespaceMapped, ImmutableStorageScheme storageScheme, QualifierEncodingScheme encodingScheme, boolean useStatsForParallelization) { // For base table of mapped VIEW
+ public PTableImpl(PName tenantId, String schemaName, String tableName, long timestamp, List<PColumnFamily> families, boolean isNamespaceMapped, ImmutableStorageScheme storageScheme, QualifierEncodingScheme encodingScheme, Boolean useStatsForParallelization) { // For base table of mapped VIEW
Preconditions.checkArgument(tenantId==null || tenantId.getBytes().length > 0); // tenantId should be null or not empty
this.tenantId = tenantId;
this.name = PNameFactory.newName(SchemaUtil.getTableName(schemaName, tableName));
@@ -214,7 +214,7 @@ public class PTableImpl implements PTable {
// For indexes stored in shared physical tables
public PTableImpl(PName tenantId, PName schemaName, PName tableName, long timestamp, List<PColumnFamily> families,
List<PColumn> columns, List<PName> physicalNames, Short viewIndexId, boolean multiTenant, boolean isNamespaceMpped, ImmutableStorageScheme storageScheme, QualifierEncodingScheme qualifierEncodingScheme,
- EncodedCQCounter encodedCQCounter, boolean useStatsForParallelization) throws SQLException {
+ EncodedCQCounter encodedCQCounter, Boolean useStatsForParallelization) throws SQLException {
this.pkColumns = this.allColumns = Collections.emptyList();
this.rowKeySchema = RowKeySchema.EMPTY_SCHEMA;
this.indexes = Collections.emptyList();
@@ -385,7 +385,7 @@ public class PTableImpl implements PTable {
boolean isImmutableRows, List<PName> physicalNames, PName defaultFamilyName, String viewExpression,
boolean disableWAL, boolean multiTenant, boolean storeNulls, ViewType viewType, Short viewIndexId,
IndexType indexType, boolean rowKeyOrderOptimizable, boolean isTransactional, long updateCacheFrequency,
- long indexDisableTimestamp, boolean isNamespaceMapped, String autoPartitionSeqName, boolean isAppendOnlySchema, ImmutableStorageScheme storageScheme, QualifierEncodingScheme qualifierEncodingScheme, EncodedCQCounter encodedCQCounter, boolean useStatsForParallelization) throws SQLException {
+ long indexDisableTimestamp, boolean isNamespaceMapped, String autoPartitionSeqName, boolean isAppendOnlySchema, ImmutableStorageScheme storageScheme, QualifierEncodingScheme qualifierEncodingScheme, EncodedCQCounter encodedCQCounter, Boolean useStatsForParallelization) throws SQLException {
return new PTableImpl(tenantId, schemaName, tableName, type, state, timeStamp, sequenceNumber, pkName, bucketNum, columns, dataSchemaName,
dataTableName, indexes, isImmutableRows, physicalNames, defaultFamilyName,
viewExpression, disableWAL, multiTenant, storeNulls, viewType, viewIndexId,
@@ -401,7 +401,7 @@ public class PTableImpl implements PTable {
IndexType indexType, boolean rowKeyOrderOptimizable, boolean isTransactional, long updateCacheFrequency,
int baseColumnCount, long indexDisableTimestamp, boolean isNamespaceMapped,
String autoPartitionSeqName, boolean isAppendOnlySchema, ImmutableStorageScheme storageScheme,
- QualifierEncodingScheme qualifierEncodingScheme, EncodedCQCounter encodedCQCounter, boolean useStatsForParallelization)
+ QualifierEncodingScheme qualifierEncodingScheme, EncodedCQCounter encodedCQCounter, Boolean useStatsForParallelization)
throws SQLException {
return new PTableImpl(tenantId, schemaName, tableName, type, state, timeStamp, sequenceNumber, pkName,
bucketNum, columns, dataSchemaName, dataTableName, indexes, isImmutableRows, physicalNames,
@@ -417,7 +417,7 @@ public class PTableImpl implements PTable {
boolean storeNulls, ViewType viewType, Short viewIndexId, IndexType indexType,
int baseColumnCount, boolean rowKeyOrderOptimizable, boolean isTransactional, long updateCacheFrequency,
long indexDisableTimestamp, boolean isNamespaceMapped, String autoPartitionSeqName, boolean isAppendOnlySchema, ImmutableStorageScheme storageScheme,
- QualifierEncodingScheme qualifierEncodingScheme, EncodedCQCounter encodedCQCounter, boolean useStatsForParallelization) throws SQLException {
+ QualifierEncodingScheme qualifierEncodingScheme, EncodedCQCounter encodedCQCounter, Boolean useStatsForParallelization) throws SQLException {
init(tenantId, schemaName, tableName, type, state, timeStamp, sequenceNumber, pkName, bucketNum, columns,
parentSchemaName, parentTableName, indexes, isImmutableRows, physicalNames, defaultFamilyName,
viewExpression, disableWAL, multiTenant, storeNulls, viewType, viewIndexId, indexType, baseColumnCount, rowKeyOrderOptimizable,
@@ -457,7 +457,7 @@ public class PTableImpl implements PTable {
boolean multiTenant, boolean storeNulls, ViewType viewType, Short viewIndexId,
IndexType indexType , int baseColumnCount, boolean rowKeyOrderOptimizable, boolean isTransactional, long updateCacheFrequency, long indexDisableTimestamp,
boolean isNamespaceMapped, String autoPartitionSeqName, boolean isAppendOnlySchema, ImmutableStorageScheme storageScheme, QualifierEncodingScheme qualifierEncodingScheme,
- EncodedCQCounter encodedCQCounter, boolean useStatsForParallelization) throws SQLException {
+ EncodedCQCounter encodedCQCounter, Boolean useStatsForParallelization) throws SQLException {
Preconditions.checkNotNull(schemaName);
Preconditions.checkArgument(tenantId==null || tenantId.getBytes().length > 0); // tenantId should be null or not empty
int estimatedSize = SizedUtil.OBJECT_SIZE * 2 + 23 * SizedUtil.POINTER_SIZE + 4 * SizedUtil.INT_SIZE + 2 * SizedUtil.LONG_SIZE + 2 * SizedUtil.INT_OBJECT_SIZE +
@@ -1354,7 +1354,7 @@ public class PTableImpl implements PTable {
}
}
}
- boolean useStatsForParallelization = true;
+ Boolean useStatsForParallelization = null;
if (table.hasUseStatsForParallelization()) {
useStatsForParallelization = table.getUseStatsForParallelization();
}
@@ -1466,7 +1466,9 @@ public class PTableImpl implements PTable {
if (table.getEncodingScheme() != null) {
builder.setEncodingScheme(ByteStringer.wrap(new byte[]{table.getEncodingScheme().getSerializedMetadataValue()}));
}
- builder.setUseStatsForParallelization(table.useStatsForParallelization());
+ if (table.useStatsForParallelization() != null) {
+ builder.setUseStatsForParallelization(table.useStatsForParallelization());
+ }
return builder.build();
}
@@ -1553,7 +1555,7 @@ public class PTableImpl implements PTable {
}
@Override
- public boolean useStatsForParallelization() {
+ public Boolean useStatsForParallelization() {
return useStatsForParallelization;
}
[21/37] phoenix git commit: PHOENIX-4333 Test to demonstrate partial
stats information for tenant views
Posted by ja...@apache.org.
PHOENIX-4333 Test to demonstrate partial stats information for tenant views
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/e811218f
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/e811218f
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/e811218f
Branch: refs/heads/4.x-HBase-1.1
Commit: e811218f9b8d9fba71e88a8a4d9cd3e2ea47ff8d
Parents: cba2b57
Author: Samarth Jain <sa...@apache.org>
Authored: Tue Oct 31 14:14:56 2017 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Wed Nov 15 10:46:40 2017 -0800
----------------------------------------------------------------------
.../end2end/ExplainPlanWithStatsEnabledIT.java | 95 ++++++++++++++++++++
1 file changed, 95 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/e811218f/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExplainPlanWithStatsEnabledIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExplainPlanWithStatsEnabledIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExplainPlanWithStatsEnabledIT.java
index 931c398..25d4194 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExplainPlanWithStatsEnabledIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExplainPlanWithStatsEnabledIT.java
@@ -31,6 +31,7 @@ import java.sql.SQLException;
import java.util.Collections;
import java.util.List;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.query.BaseTest;
@@ -782,4 +783,98 @@ public class ExplainPlanWithStatsEnabledIT extends ParallelStatsEnabledIT {
this.time += t;
}
}
+
+ @Test
+ public void testPartialStatsForTenantViews() throws Exception {
+ String tenant1View = generateUniqueName();
+ String tenant2View = generateUniqueName();
+ String multiTenantTable = generateUniqueName();
+ String tenantId1 = "00Dabcdetenant1";
+ String tenantId2 = "00Dabcdetenant2";
+
+ String ddl =
+ "CREATE TABLE " + multiTenantTable
+ + " (orgId CHAR(15) NOT NULL, pk2 CHAR(3) NOT NULL, a bigint, b bigint CONSTRAINT PK PRIMARY KEY "
+ + "(ORGID, PK2)) MULTI_TENANT=true, GUIDE_POSTS_WIDTH=20";
+ createTestTable(getUrl(), ddl, null, null);
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ // split such that some data for view2 resides on region of view1
+ try (HBaseAdmin admin =
+ conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
+ byte[] splitKey = Bytes.toBytes("00Dabcdetenant200B");
+ admin.split(Bytes.toBytes(multiTenantTable), splitKey);
+ }
+
+ /**
+ * Insert 2 rows for tenant1 and 6 for tenant2
+ */
+ conn.createStatement().execute(
+ "upsert into " + multiTenantTable + " values ('" + tenantId1 + "','00A',1,1)");
+ conn.createStatement().execute(
+ "upsert into " + multiTenantTable + " values ('" + tenantId1 + "','00B',2,2)");
+ conn.createStatement().execute(
+ "upsert into " + multiTenantTable + " values ('" + tenantId2 + "','00A',3,3)");
+ // We split at tenant2 + 00B. So the following rows will reside in a different region
+ conn.createStatement().execute(
+ "upsert into " + multiTenantTable + " values ('" + tenantId2 + "','00B',4,4)");
+ conn.createStatement().execute(
+ "upsert into " + multiTenantTable + " values ('" + tenantId2 + "','00C',5,5)");
+ conn.createStatement().execute(
+ "upsert into " + multiTenantTable + " values ('" + tenantId2 + "','00D',6,6)");
+ conn.createStatement().execute(
+ "upsert into " + multiTenantTable + " values ('" + tenantId2 + "','00E',7,7)");
+ conn.createStatement().execute(
+ "upsert into " + multiTenantTable + " values ('" + tenantId2 + "','00F',8,8)");
+ conn.commit();
+ }
+ try (Connection conn = getTenantConnection(tenantId1)) {
+ conn.createStatement().execute(
+ "CREATE VIEW " + tenant1View + " AS SELECT * FROM " + multiTenantTable);
+ }
+ try (Connection conn = getTenantConnection(tenantId2)) {
+ conn.createStatement().execute(
+ "CREATE VIEW " + tenant2View + " AS SELECT * FROM " + multiTenantTable);
+ }
+ String sql = "";
+ List<Object> binds = Lists.newArrayList();
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ /*
+ * I have seen compaction running and generating stats for the second region of
+ * tenant2View So let's disable compaction on the table, delete any stats we have
+ * collected in SYSTEM.STATS table, clear cache and run update stats to make sure our
+ * test gets a deterministic setup.
+ */
+ String disableCompaction =
+ "ALTER TABLE " + multiTenantTable + " SET COMPACTION_ENABLED = false";
+ conn.createStatement().executeUpdate(disableCompaction);
+ String delete =
+ "DELETE FROM SYSTEM.STATS WHERE PHYSICAL_NAME = '" + multiTenantTable + "'";
+ conn.createStatement().executeUpdate(delete);
+ conn.commit();
+ conn.unwrap(PhoenixConnection.class).getQueryServices().clearCache();
+ }
+ // Now let's run update stats on tenant1View
+ try (Connection conn = getTenantConnection(tenantId1)) {
+ conn.createStatement().execute("UPDATE STATISTICS " + tenant1View);
+ }
+ // query tenant2 view
+ try (Connection conn = getTenantConnection(tenantId2)) {
+ sql = "SELECT * FROM " + tenant2View;
+
+ Estimate info = getByteRowEstimates(conn, sql, binds);
+ /*
+ * Because we ran update stats only for tenant1View, there is only partial guidepost
+ * info available for tenant2View.
+ */
+ assertEquals((Long) 1l, info.estimatedRows);
+ // ok now run update stats for tenant2 view
+ conn.createStatement().execute("UPDATE STATISTICS " + tenant2View);
+ /*
+ * And now, let's recheck our estimate info. We should have all the rows of view2
+ * available now.
+ */
+ info = getByteRowEstimates(conn, sql, binds);
+ assertEquals((Long) 6l, info.estimatedRows);
+ }
+ }
}
[16/37] phoenix git commit: PHOENIX-4329 Test IndexScrutinyTool while
table is taking writes (Vincent Poon)
Posted by ja...@apache.org.
PHOENIX-4329 Test IndexScrutinyTool while table is taking writes (Vincent Poon)
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/e319ff02
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/e319ff02
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/e319ff02
Branch: refs/heads/4.x-HBase-1.1
Commit: e319ff02e2d135c526b7334a65bfe1628c0dd220
Parents: 7c21a83
Author: James Taylor <jt...@salesforce.com>
Authored: Sun Oct 29 15:20:23 2017 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Wed Nov 15 10:46:39 2017 -0800
----------------------------------------------------------------------
.../phoenix/end2end/IndexScrutinyToolIT.java | 101 ++++++++++++++++++-
1 file changed, 96 insertions(+), 5 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/e319ff02/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexScrutinyToolIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexScrutinyToolIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexScrutinyToolIT.java
index 10595a7..cbce7b2 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexScrutinyToolIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexScrutinyToolIT.java
@@ -36,6 +36,9 @@ import java.util.Properties;
import java.util.Random;
import java.util.TreeSet;
import java.util.UUID;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
import com.google.common.collect.Sets;
import org.apache.commons.io.IOUtils;
@@ -43,6 +46,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.Job;
@@ -103,6 +107,7 @@ public class IndexScrutinyToolIT extends BaseTest {
private PreparedStatement indexTableUpsertStmt;
private long testTime;
+ private Properties props;
@Parameterized.Parameters
public static Collection<Object[]> data() {
@@ -120,8 +125,11 @@ public class IndexScrutinyToolIT extends BaseTest {
@BeforeClass
public static void doSetup() throws Exception {
- Map<String, String> props = Maps.newHashMap();
- setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
+ Map<String, String> serverProps = Maps.newHashMap();
+ //disable major compactions
+ serverProps.put(HConstants.MAJOR_COMPACTION_PERIOD, "0");
+ Map<String, String> clientProps = Maps.newHashMap();
+ setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()), new ReadOnlyProps(clientProps.entrySet().iterator()));
}
/**
@@ -133,7 +141,7 @@ public class IndexScrutinyToolIT extends BaseTest {
createTestTable(getUrl(), String.format(dataTableDdl, dataTableFullName));
createTestTable(getUrl(),
String.format(indexTableDdl, indexTableName, dataTableFullName));
- Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+ props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
conn = DriverManager.getConnection(getUrl(), props);
String dataTableUpsert = String.format(UPSERT_SQL, dataTableFullName);
dataTableUpsertStmt = conn.prepareStatement(dataTableUpsert);
@@ -141,6 +149,7 @@ public class IndexScrutinyToolIT extends BaseTest {
indexTableUpsertStmt = conn.prepareStatement(indexTableUpsert);
conn.setAutoCommit(false);
testTime = EnvironmentEdgeManager.currentTimeMillis() - 1000;
+
}
@After
@@ -177,6 +186,77 @@ public class IndexScrutinyToolIT extends BaseTest {
}
/**
+ * Tests running a scrutiny while updates and deletes are happening.
+ * Since CURRENT_SCN is set, the scrutiny shouldn't report any issue.
+ */
+ @Test
+ public void testScrutinyWhileTakingWrites() throws Exception {
+ int id = 0;
+ while (id < 1000) {
+ int index = 1;
+ dataTableUpsertStmt.setInt(index++, id);
+ dataTableUpsertStmt.setString(index++, "name-" + id);
+ dataTableUpsertStmt.setInt(index++, id);
+ dataTableUpsertStmt.setTimestamp(index++, new Timestamp(testTime));
+ dataTableUpsertStmt.executeUpdate();
+ id++;
+ }
+ conn.commit();
+
+ //CURRENT_SCN for scrutiny
+ long scrutinyTS = EnvironmentEdgeManager.currentTimeMillis();
+
+ // launch background upserts and deletes
+ final Random random = new Random(0);
+ Runnable backgroundUpserts = new Runnable() {
+ @Override
+ public void run() {
+ int idToUpsert = random.nextInt(1000);
+ try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
+ PreparedStatement dataPS =
+ conn.prepareStatement(String.format(UPSERT_SQL, dataTableFullName));
+ upsertRow(dataPS, idToUpsert, "modified-" + idToUpsert, idToUpsert + 1000);
+ conn.commit();
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+ };
+ Runnable backgroundDeletes = new Runnable() {
+ @Override
+ public void run() {
+ int idToDelete = random.nextInt(1000);
+ try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
+ String deleteSql =
+ String.format(DELETE_SQL, indexTableFullName) + "WHERE \":ID\"="
+ + idToDelete;
+ conn.createStatement().executeUpdate(deleteSql);
+ conn.commit();
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+ };
+ ScheduledExecutorService scheduledThreadPool = Executors.newScheduledThreadPool(2);
+ scheduledThreadPool.scheduleWithFixedDelay(backgroundUpserts, 200, 200,
+ TimeUnit.MILLISECONDS);
+ scheduledThreadPool.scheduleWithFixedDelay(backgroundDeletes, 200, 200,
+ TimeUnit.MILLISECONDS);
+
+ // scrutiny should report everything as ok
+ List<Job> completedJobs =
+ runScrutinyCurrentSCN(schemaName, dataTableName, indexTableName,
+ scrutinyTS);
+ Job job = completedJobs.get(0);
+ assertTrue(job.isSuccessful());
+ Counters counters = job.getCounters();
+ assertEquals(1000, getCounterValue(counters, VALID_ROW_COUNT));
+ assertEquals(0, getCounterValue(counters, INVALID_ROW_COUNT));
+ scheduledThreadPool.shutdown();
+ scheduledThreadPool.awaitTermination(10000, TimeUnit.MILLISECONDS);
+ }
+
+ /**
* Tests an index with the same # of rows as the data table, but one of the index rows is
* incorrect Scrutiny should report the invalid rows.
*/
@@ -570,6 +650,13 @@ public class IndexScrutinyToolIT extends BaseTest {
private String[] getArgValues(String schemaName, String dataTable, String indxTable, Long batchSize,
SourceTable sourceTable, boolean outputInvalidRows, OutputFormat outputFormat,
Long maxOutputRows) {
+ return getArgValues(schemaName, dataTable, indxTable, batchSize, sourceTable,
+ outputInvalidRows, outputFormat, maxOutputRows, Long.MAX_VALUE);
+ }
+
+ private String[] getArgValues(String schemaName, String dataTable, String indxTable, Long batchSize,
+ SourceTable sourceTable, boolean outputInvalidRows, OutputFormat outputFormat,
+ Long maxOutputRows, Long scrutinyTs) {
final List<String> args = Lists.newArrayList();
if (schemaName != null) {
args.add("-s");
@@ -591,7 +678,7 @@ public class IndexScrutinyToolIT extends BaseTest {
args.add(outputDir);
}
args.add("-t");
- args.add(String.valueOf(Long.MAX_VALUE));
+ args.add(String.valueOf(scrutinyTs));
args.add("-run-foreground");
if (batchSize != null) {
args.add("-b");
@@ -619,6 +706,10 @@ public class IndexScrutinyToolIT extends BaseTest {
return args.toArray(new String[0]);
}
+ private List<Job> runScrutinyCurrentSCN(String schemaName, String dataTableName, String indexTableName, Long scrutinyTS) throws Exception {
+ return runScrutiny(getArgValues(schemaName, dataTableName, indexTableName, null, SourceTable.BOTH, false, null, null, scrutinyTS));
+ }
+
private List<Job> runScrutiny(String schemaName, String dataTableName, String indexTableName) throws Exception {
return runScrutiny(schemaName, dataTableName, indexTableName, null, null);
}
@@ -632,7 +723,7 @@ public class IndexScrutinyToolIT extends BaseTest {
Long batchSize, SourceTable sourceTable) throws Exception {
final String[] cmdArgs =
getArgValues(schemaName, dataTableName, indexTableName, batchSize, sourceTable, false,
- null, null);
+ null, null, Long.MAX_VALUE);
return runScrutiny(cmdArgs);
}
[36/37] phoenix git commit: PHOENIX-4291 Addendum - Merge release
script for mac and linux
Posted by ja...@apache.org.
PHOENIX-4291 Addendum - Merge release script for mac and linux
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/b115f9b4
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/b115f9b4
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/b115f9b4
Branch: refs/heads/4.x-HBase-1.1
Commit: b115f9b46531c08edbefb8c60bde9811d19fff6e
Parents: 47e7c60
Author: Mujtaba <mu...@apache.org>
Authored: Fri Nov 3 13:41:45 2017 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Wed Nov 15 10:46:42 2017 -0800
----------------------------------------------------------------------
dev/make_rc.sh | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/b115f9b4/dev/make_rc.sh
----------------------------------------------------------------------
diff --git a/dev/make_rc.sh b/dev/make_rc.sh
index 31cb9f9..638968c 100755
--- a/dev/make_rc.sh
+++ b/dev/make_rc.sh
@@ -81,7 +81,7 @@ cp bin/* $DIR_BIN;
cp -R $DIR_PHERF_CONF $DIR_BIN;
# Copy release docs
-cp README $DIR_REL_BIN_PATH;
+cp README* $DIR_REL_BIN_PATH;
cp $DIR_DOCS/* $DIR_REL_BIN_PATH;
# Copy examples
[11/37] phoenix git commit: PHOENIX-4292 Filters on Tables and Views
with composite PK of VARCHAR fields with sort direction DESC do not work
(addendum)
Posted by ja...@apache.org.
PHOENIX-4292 Filters on Tables and Views with composite PK of VARCHAR fields with sort direction DESC do not work (addendum)
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/87f8b1ed
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/87f8b1ed
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/87f8b1ed
Branch: refs/heads/4.x-HBase-1.1
Commit: 87f8b1ed0f64f2504fdf6b084f81ad7f98641c77
Parents: 3f453e1
Author: Thomas D'Silva <td...@apache.org>
Authored: Mon Oct 23 20:19:15 2017 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Wed Nov 15 10:41:23 2017 -0800
----------------------------------------------------------------------
.../java/org/apache/phoenix/end2end/ViewIT.java | 202 ++++++++++++-------
1 file changed, 129 insertions(+), 73 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/87f8b1ed/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
index 66e2430..5c0d100 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
@@ -791,77 +791,133 @@ public class ViewIT extends BaseViewIT {
assertArrayEquals(expectedPKs, actualPKs);
}
- @Test
- public void testCompositeDescPK() throws SQLException {
- Properties props = new Properties();
- try (Connection globalConn = DriverManager.getConnection(getUrl(), props)) {
- String tableName = generateUniqueName();
- String viewName = generateUniqueName();
-
- // create global base table
- globalConn.createStatement().execute("CREATE TABLE " + tableName
- + " (TENANT_ID CHAR(15) NOT NULL, KEY_PREFIX CHAR(3) NOT NULL, CREATED_DATE DATE, CREATED_BY CHAR(15), SYSTEM_MODSTAMP DATE CONSTRAINT PK PRIMARY KEY (TENANT_ID, KEY_PREFIX)) VERSIONS=1, MULTI_TENANT=true, IMMUTABLE_ROWS=TRUE, REPLICATION_SCOPE=1");
-
- // create various tenant specific views
- globalConn.createStatement()
- .execute("CREATE VIEW " + viewName
- + " (pk1 VARCHAR(10) NOT NULL, pk2 VARCHAR(10) NOT NULL, col1 DATE, col3 DECIMAL CONSTRAINT PK PRIMARY KEY (pk1 DESC, pk2 DESC)) AS SELECT * FROM "
- + tableName + " WHERE KEY_PREFIX = 'abc' ");
-
- String tenantId = "tenantId";
- Properties tenantProps = new Properties();
- tenantProps.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId);
- // create a tenant specific view
- try (Connection tenantConn = DriverManager.getConnection(getUrl(), tenantProps)) {
- // upsert rows
- tenantConn.createStatement().execute("UPSERT INTO " + viewName
- + " (pk1, pk2, col1, col3) VALUES ('testa', 'testb', TO_DATE('2017-10-16 22:00:00', 'yyyy-MM-dd HH:mm:ss'), 10)");
- tenantConn.createStatement().execute("UPSERT INTO " + viewName
- + " (pk1, pk2, col1, col3) VALUES ('testa', 'testc', TO_DATE('2017-10-16 22:00:00', 'yyyy-MM-dd HH:mm:ss'), 10)");
- tenantConn.createStatement().execute("UPSERT INTO " + viewName
- + " (pk1, pk2, col1, col3) VALUES ('testa', 'testd', TO_DATE('2017-10-16 22:00:00', 'yyyy-MM-dd HH:mm:ss'), 10)");
- tenantConn.createStatement().execute("UPSERT INTO " + viewName
- + " (pk1, pk2, col1, col3) VALUES ('testa', 'teste', TO_DATE('2017-10-16 22:00:00', 'yyyy-MM-dd HH:mm:ss'), 10)");
- tenantConn.createStatement().execute("UPSERT INTO " + viewName
- + " (pk1, pk2, col1, col3) VALUES ('testb', 'testa', TO_DATE('2017-10-16 22:00:00', 'yyyy-MM-dd HH:mm:ss'), 10)");
- tenantConn.commit();
-
- // run queries
- ResultSet rs = tenantConn.createStatement()
- .executeQuery("SELECT count(*) FROM " + viewName + " WHERE pk1 = 'testa'");
- assertTrue(rs.next());
- assertEquals(4, rs.getLong(1));
- assertFalse(rs.next());
-
- rs = tenantConn.createStatement().executeQuery("SELECT count(*) FROM " + viewName);
- assertTrue(rs.next());
- assertEquals(5, rs.getLong(1));
- assertFalse(rs.next());
-
- rs = tenantConn.createStatement()
- .executeQuery("SELECT count(*) FROM " + viewName + " WHERE pk1 >= 'testa'");
- assertTrue(rs.next());
- assertEquals(5, rs.getLong(1));
- assertFalse(rs.next());
-
- rs = tenantConn.createStatement()
- .executeQuery("SELECT count(*) FROM " + viewName + " WHERE pk1 <= 'testa'");
- assertTrue(rs.next());
- assertEquals(4, rs.getLong(1));
- assertFalse(rs.next());
-
- rs = tenantConn.createStatement()
- .executeQuery("SELECT count(*) FROM " + viewName + " WHERE pk1 > 'testa'");
- assertTrue(rs.next());
- assertEquals(1, rs.getLong(1));
- assertFalse(rs.next());
-
- rs = tenantConn.createStatement()
- .executeQuery("SELECT count(*) FROM " + viewName + " WHERE pk1 < 'testa'");
- assertTrue(rs.next());
- assertEquals(0, rs.getLong(1));
- assertFalse(rs.next());
- }
- }
- }
+ @Test
+ public void testCompositeDescPK() throws SQLException {
+ Properties props = new Properties();
+ try (Connection globalConn = DriverManager.getConnection(getUrl(), props)) {
+ String tableName = generateUniqueName();
+ String viewName1 = generateUniqueName();
+ String viewName2 = generateUniqueName();
+ String viewName3 = generateUniqueName();
+ String viewName4 = generateUniqueName();
+
+ // create global base table
+ globalConn.createStatement().execute("CREATE TABLE " + tableName
+ + " (TENANT_ID CHAR(15) NOT NULL, KEY_PREFIX CHAR(3) NOT NULL, CREATED_DATE DATE, CREATED_BY CHAR(15), SYSTEM_MODSTAMP DATE CONSTRAINT PK PRIMARY KEY (TENANT_ID, KEY_PREFIX)) VERSIONS=1, MULTI_TENANT=true, IMMUTABLE_ROWS=TRUE, REPLICATION_SCOPE=1");
+
+ String tenantId = "tenantId";
+ Properties tenantProps = new Properties();
+ tenantProps.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId);
+ // create a tenant specific view
+ try (Connection tenantConn = DriverManager.getConnection(getUrl(), tenantProps)) {
+ // create various tenant specific views
+ // view with composite PK with multiple PK values of VARCHAR values DESC
+ tenantConn.createStatement()
+ .execute("CREATE VIEW " + viewName1
+ + " (pk1 VARCHAR(10) NOT NULL, pk2 VARCHAR(10) NOT NULL, col1 DATE, col3 DECIMAL CONSTRAINT PK PRIMARY KEY (pk1 DESC, pk2 DESC)) AS SELECT * FROM "
+ + tableName + " WHERE KEY_PREFIX = 'abc' ");
+ // view with composite PK with single pk value DESC
+ tenantConn.createStatement()
+ .execute("CREATE VIEW " + viewName2
+ + " (pk1 VARCHAR(10) NOT NULL, pk2 VARCHAR(10) NOT NULL, col1 DATE, col3 DECIMAL CONSTRAINT PK PRIMARY KEY (pk1 DESC, pk2 DESC)) AS SELECT * FROM "
+ + tableName + " WHERE KEY_PREFIX = 'abc' ");
+
+ // upsert rows
+ upsertRows(viewName1, tenantConn);
+ upsertRows(viewName2, tenantConn);
+
+ // run queries
+ String[] whereClauses =
+ new String[] { "pk1 = 'testa'", "", "pk1 >= 'testa'", "pk1 <= 'testa'",
+ "pk1 > 'testa'", "pk1 < 'testa'" };
+ long[] expectedArray = new long[] { 4, 5, 5, 4, 1, 0 };
+ validate(viewName1, tenantConn, whereClauses, expectedArray);
+ validate(viewName2, tenantConn, whereClauses, expectedArray);
+
+ // view with composite PK with multiple Date PK values DESC
+ tenantConn.createStatement()
+ .execute("CREATE VIEW " + viewName3
+ + " (pk1 DATE(10) NOT NULL, pk2 DATE(10) NOT NULL, col1 VARCHAR(10), col3 DECIMAL CONSTRAINT PK PRIMARY KEY (pk1 DESC, pk2 DESC)) AS SELECT * FROM "
+ + tableName + " WHERE KEY_PREFIX = 'ab3' ");
+
+ tenantConn.createStatement().execute("UPSERT INTO " + viewName3
+ + " (pk1, pk2, col1, col3) VALUES (TO_DATE('2017-10-16 22:00:00', 'yyyy-MM-dd HH:mm:ss'), TO_DATE('2017-10-16 21:00:00', 'yyyy-MM-dd HH:mm:ss'), 'txt1', 10)");
+ tenantConn.createStatement().execute("UPSERT INTO " + viewName3
+ + " (pk1, pk2, col1, col3) VALUES (TO_DATE('2017-10-16 22:00:00', 'yyyy-MM-dd HH:mm:ss'), TO_DATE('2017-10-16 21:01:00', 'yyyy-MM-dd HH:mm:ss'), 'txt1', 10)");
+ tenantConn.createStatement().execute("UPSERT INTO " + viewName3
+ + " (pk1, pk2, col1, col3) VALUES (TO_DATE('2017-10-16 22:00:00', 'yyyy-MM-dd HH:mm:ss'), TO_DATE('2017-10-16 21:02:00', 'yyyy-MM-dd HH:mm:ss'), 'txt1', 10)");
+ tenantConn.createStatement().execute("UPSERT INTO " + viewName3
+ + " (pk1, pk2, col1, col3) VALUES (TO_DATE('2017-10-16 22:00:00', 'yyyy-MM-dd HH:mm:ss'), TO_DATE('2017-10-16 21:03:00', 'yyyy-MM-dd HH:mm:ss'), 'txt1', 10)");
+ tenantConn.createStatement().execute("UPSERT INTO " + viewName3
+ + " (pk1, pk2, col1, col3) VALUES (TO_DATE('2017-10-16 23:00:00', 'yyyy-MM-dd HH:mm:ss'), TO_DATE('2017-10-16 21:04:00', 'yyyy-MM-dd HH:mm:ss'), 'txt1', 10)");
+ tenantConn.commit();
+
+ String[] view3WhereClauses =
+ new String[] {
+ "pk1 = TO_DATE('2017-10-16 22:00:00', 'yyyy-MM-dd HH:mm:ss')", "",
+ "pk1 >= TO_DATE('2017-10-16 22:00:00', 'yyyy-MM-dd HH:mm:ss')",
+ "pk1 <= TO_DATE('2017-10-16 22:00:00', 'yyyy-MM-dd HH:mm:ss')",
+ "pk1 > TO_DATE('2017-10-16 22:00:00', 'yyyy-MM-dd HH:mm:ss')",
+ "pk1 < TO_DATE('2017-10-16 22:00:00', 'yyyy-MM-dd HH:mm:ss')" };
+ validate(viewName3, tenantConn, view3WhereClauses, expectedArray);
+
+ tenantConn.createStatement()
+ .execute("CREATE VIEW " + viewName4
+ + " (pk1 DATE(10) NOT NULL, pk2 DECIMAL NOT NULL, pk3 VARCHAR(10) NOT NULL, col3 DECIMAL CONSTRAINT PK PRIMARY KEY (pk1 DESC, pk2 DESC, pk3 DESC)) AS SELECT * FROM "
+ + tableName + " WHERE KEY_PREFIX = 'ab4' ");
+
+ tenantConn.createStatement().execute("UPSERT INTO " + viewName4
+ + " (pk1, pk2, pk3, col3) VALUES (TO_DATE('2017-10-16 22:00:00', 'yyyy-MM-dd HH:mm:ss'), 1, 'txt1', 10)");
+ tenantConn.createStatement().execute("UPSERT INTO " + viewName4
+ + " (pk1, pk2, pk3, col3) VALUES (TO_DATE('2017-10-16 22:00:00', 'yyyy-MM-dd HH:mm:ss'), 2, 'txt2', 10)");
+ tenantConn.createStatement().execute("UPSERT INTO " + viewName4
+ + " (pk1, pk2, pk3, col3) VALUES (TO_DATE('2017-10-16 22:00:00', 'yyyy-MM-dd HH:mm:ss'), 3, 'txt3', 10)");
+ tenantConn.createStatement().execute("UPSERT INTO " + viewName4
+ + " (pk1, pk2, pk3, col3) VALUES (TO_DATE('2017-10-16 22:00:00', 'yyyy-MM-dd HH:mm:ss'), 4, 'txt4', 10)");
+ tenantConn.createStatement().execute("UPSERT INTO " + viewName4
+ + " (pk1, pk2, pk3, col3) VALUES (TO_DATE('2017-10-16 23:00:00', 'yyyy-MM-dd HH:mm:ss'), 1, 'txt1', 10)");
+ tenantConn.commit();
+
+ String[] view4WhereClauses =
+ new String[] {
+ "pk1 = TO_DATE('2017-10-16 22:00:00', 'yyyy-MM-dd HH:mm:ss')",
+ "pk1 = TO_DATE('2017-10-16 22:00:00', 'yyyy-MM-dd HH:mm:ss') AND pk2 = 2",
+ "pk1 = TO_DATE('2017-10-16 22:00:00', 'yyyy-MM-dd HH:mm:ss') AND pk2 > 2",
+ "", "pk1 >= TO_DATE('2017-10-16 22:00:00', 'yyyy-MM-dd HH:mm:ss')",
+ "pk1 <= TO_DATE('2017-10-16 22:00:00', 'yyyy-MM-dd HH:mm:ss')",
+ "pk1 > TO_DATE('2017-10-16 22:00:00', 'yyyy-MM-dd HH:mm:ss')",
+ "pk1 < TO_DATE('2017-10-16 22:00:00', 'yyyy-MM-dd HH:mm:ss')" };
+ long[] view4ExpectedArray = new long[] { 4, 1, 2, 5, 5, 4, 1, 0 };
+ validate(viewName4, tenantConn, view4WhereClauses, view4ExpectedArray);
+
+ }
+ }
+ }
+
+ private void validate(String viewName, Connection tenantConn, String[] whereClauseArray,
+ long[] expectedArray) throws SQLException {
+ for (int i = 0; i < whereClauseArray.length; ++i) {
+ String where = !whereClauseArray[i].isEmpty() ? (" WHERE " + whereClauseArray[i]) : "";
+ ResultSet rs =
+ tenantConn.createStatement()
+ .executeQuery("SELECT count(*) FROM " + viewName + where);
+ assertTrue(rs.next());
+ assertEquals(expectedArray[i], rs.getLong(1));
+ assertFalse(rs.next());
+ }
+ }
+
+ private void upsertRows(String viewName1, Connection tenantConn) throws SQLException {
+ tenantConn.createStatement().execute("UPSERT INTO " + viewName1
+ + " (pk1, pk2, col1, col3) VALUES ('testa', 'testb', TO_DATE('2017-10-16 22:00:00', 'yyyy-MM-dd HH:mm:ss'), 10)");
+ tenantConn.createStatement().execute("UPSERT INTO " + viewName1
+ + " (pk1, pk2, col1, col3) VALUES ('testa', 'testc', TO_DATE('2017-10-16 22:00:00', 'yyyy-MM-dd HH:mm:ss'), 10)");
+ tenantConn.createStatement().execute("UPSERT INTO " + viewName1
+ + " (pk1, pk2, col1, col3) VALUES ('testa', 'testd', TO_DATE('2017-10-16 22:00:00', 'yyyy-MM-dd HH:mm:ss'), 10)");
+ tenantConn.createStatement().execute("UPSERT INTO " + viewName1
+ + " (pk1, pk2, col1, col3) VALUES ('testa', 'teste', TO_DATE('2017-10-16 22:00:00', 'yyyy-MM-dd HH:mm:ss'), 10)");
+ tenantConn.createStatement().execute("UPSERT INTO " + viewName1
+ + " (pk1, pk2, col1, col3) VALUES ('testb', 'testa', TO_DATE('2017-10-16 22:00:00', 'yyyy-MM-dd HH:mm:ss'), 10)");
+ tenantConn.commit();
+ }
}
[29/37] phoenix git commit: PHOENIX-4348 Point deletes do not work
when there are immutable indexes with only row key columns
Posted by ja...@apache.org.
PHOENIX-4348 Point deletes do not work when there are immutable indexes with only row key columns
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/5820ff43
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/5820ff43
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/5820ff43
Branch: refs/heads/4.x-HBase-1.1
Commit: 5820ff438e8e90c8b7b3a7d41bd9d44ac270e405
Parents: a50aab0
Author: James Taylor <jt...@salesforce.com>
Authored: Thu Nov 2 18:47:01 2017 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Wed Nov 15 10:46:41 2017 -0800
----------------------------------------------------------------------
.../org/apache/phoenix/end2end/DeleteIT.java | 96 +++++++++++++++++++-
.../apache/phoenix/compile/DeleteCompiler.java | 5 +-
2 files changed, 94 insertions(+), 7 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/5820ff43/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java
index aa4d36e..9eac0af 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java
@@ -18,6 +18,7 @@
package org.apache.phoenix.end2end;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertTrue;
import java.sql.Connection;
@@ -32,10 +33,7 @@ import java.util.Arrays;
import java.util.Collections;
import java.util.List;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.util.QueryUtil;
-import org.apache.phoenix.util.TestUtil;
import org.junit.Test;
@@ -339,8 +337,6 @@ public class DeleteIT extends ParallelStatsDisabledIT {
con.commit();
}
- TestUtil.dumpTable(con.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(tableName)));
-
ResultSet rs = con.createStatement().executeQuery("SELECT /*+ NO_INDEX */ count(*) FROM " + tableName);
assertTrue(rs.next());
assertEquals(0, rs.getLong(1));
@@ -370,6 +366,96 @@ public class DeleteIT extends ParallelStatsDisabledIT {
testDeleteRowFromTableWithImmutableIndex(true, false);
}
+ @Test
+ public void testPointDeleteRowFromTableWithImmutableIndex() throws Exception {
+ testPointDeleteRowFromTableWithImmutableIndex(false, false);
+ }
+
+ @Test
+ public void testPointDeleteRowFromTableWithLocalImmutableIndex() throws Exception {
+ testPointDeleteRowFromTableWithImmutableIndex(true, false);
+ }
+
+ @Test
+ public void testPointDeleteRowFromTableWithImmutableIndex2() throws Exception {
+ testPointDeleteRowFromTableWithImmutableIndex(false, true);
+ }
+
+ public void testPointDeleteRowFromTableWithImmutableIndex(boolean localIndex, boolean addNonPKIndex) throws Exception {
+ Connection con = null;
+ try {
+ boolean autoCommit = false;
+ con = DriverManager.getConnection(getUrl());
+ con.setAutoCommit(autoCommit);
+
+ Statement stm = con.createStatement();
+
+ String tableName = generateUniqueName();
+ String indexName1 = generateUniqueName();
+ String indexName2 = generateUniqueName();
+ String indexName3 = addNonPKIndex? generateUniqueName() : null;
+
+ stm.execute("CREATE TABLE IF NOT EXISTS " + tableName + " (" +
+ "HOST CHAR(2) NOT NULL," +
+ "DOMAIN VARCHAR NOT NULL, " +
+ "FEATURE VARCHAR NOT NULL, " +
+ "\"DATE\" DATE NOT NULL, \n" +
+ "USAGE.CORE BIGINT," +
+ "USAGE.DB BIGINT," +
+ "STATS.ACTIVE_VISITOR INTEGER " +
+ "CONSTRAINT PK PRIMARY KEY (HOST, DOMAIN, FEATURE, \"DATE\")) IMMUTABLE_ROWS=true");
+ stm.execute("CREATE " + (localIndex ? "LOCAL" : "") + " INDEX " + indexName1 + " ON " + tableName + " (\"DATE\", FEATURE)");
+ stm.execute("CREATE " + (localIndex ? "LOCAL" : "") + " INDEX " + indexName2 + " ON " + tableName + " (FEATURE, DOMAIN)");
+ if (addNonPKIndex) {
+ stm.execute("CREATE " + (localIndex ? "LOCAL" : "") + " INDEX " + indexName3 + " ON " + tableName + " (\"DATE\", FEATURE, USAGE.DB)");
+ }
+
+ Date date = new Date(0);
+ PreparedStatement psInsert = con
+ .prepareStatement("UPSERT INTO " + tableName + "(HOST, DOMAIN, FEATURE, \"DATE\", CORE, DB, ACTIVE_VISITOR) VALUES(?,?, ? , ?, ?, ?, ?)");
+ psInsert.setString(1, "AA");
+ psInsert.setString(2, "BB");
+ psInsert.setString(3, "CC");
+ psInsert.setDate(4, date);
+ psInsert.setLong(5, 1L);
+ psInsert.setLong(6, 2L);
+ psInsert.setLong(7, 3);
+ psInsert.execute();
+ if (!autoCommit) {
+ con.commit();
+ }
+
+ String dml = "DELETE FROM " + tableName + " WHERE (HOST, DOMAIN, FEATURE, \"DATE\") = (?,?,?,?)";
+ PreparedStatement psDelete = con.prepareStatement(dml);
+ psDelete.setString(1, "AA");
+ psDelete.setString(2, "BB");
+ psDelete.setString(3, "CC");
+ psDelete.setDate(4, date);
+ psDelete.execute();
+ if (!autoCommit) {
+ con.commit();
+ }
+ psDelete = con.prepareStatement("EXPLAIN " + dml);
+ psDelete.setString(1, "AA");
+ psDelete.setString(2, "BB");
+ psDelete.setString(3, "CC");
+ psDelete.setDate(4, date);
+ String explainPlan = QueryUtil.getExplainPlan(psDelete.executeQuery());
+ if (addNonPKIndex) {
+ assertNotEquals("DELETE SINGLE ROW", explainPlan);
+ } else {
+ assertEquals("DELETE SINGLE ROW", explainPlan);
+ }
+
+ assertDeleted(con, tableName, indexName1, indexName2, indexName3);
+ } finally {
+ try {
+ con.close();
+ } catch (Exception ex) {
+ }
+ }
+ }
+
public void testDeleteRowFromTableWithImmutableIndex(boolean localIndex, boolean useCoveredIndex) throws Exception {
Connection con = null;
try {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/5820ff43/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
index 73689d5..f038cda 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
@@ -331,8 +331,9 @@ public class DeleteCompiler {
@Override
public MutationState execute() throws SQLException {
MutationState state = firstPlan.execute();
+ statement.getConnection().getMutationState().join(state);
for (MutationPlan plan : plans.subList(1, plans.size())) {
- plan.execute();
+ statement.getConnection().getMutationState().join(plan.execute());
}
return state;
}
@@ -564,7 +565,7 @@ public class DeleteCompiler {
while (iterator.hasNext()) {
mutation.put(new ImmutableBytesPtr(iterator.next().getLowerRange()), new RowMutationState(PRow.DELETE_MARKER, statement.getConnection().getStatementExecutionCounter(), NULL_ROWTIMESTAMP_INFO, null));
}
- return new MutationState(context.getCurrentTable(), mutation, 0, maxSize, maxSizeBytes, connection);
+ return new MutationState(plan.getTableRef(), mutation, 0, maxSize, maxSizeBytes, connection);
}
@Override
[20/37] phoenix git commit: Revert "PHOENIX-4322 DESC primary key
column with variable length does not work in SkipScanFilter (fix test
failures)"
Posted by ja...@apache.org.
Revert "PHOENIX-4322 DESC primary key column with variable length does not work in SkipScanFilter (fix test failures)"
This reverts commit 45a9c275dbbf9206264236c690f40c309d97da3c.
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/0ac05493
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/0ac05493
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/0ac05493
Branch: refs/heads/4.x-HBase-1.1
Commit: 0ac05493534686cd4946b40589fc7e92944ceae5
Parents: 1d85ffa
Author: James Taylor <jt...@salesforce.com>
Authored: Mon Oct 30 19:24:36 2017 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Wed Nov 15 10:46:39 2017 -0800
----------------------------------------------------------------------
phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/0ac05493/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java
index 3fe8ad3..8ab4f20 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java
@@ -434,7 +434,7 @@ public class ScanUtil {
// The result of an RVC evaluation can come with a trailing separator already, so we
// should avoid adding another one.
if ( !isFixedWidth
- && ( bytes.length == 0 || slotSpan[i] == 0 || key[offset - 1] != sepByte )
+ && ( bytes.length == 0 || key[offset - 1] != sepByte )
&& ( sepByte == QueryConstants.DESC_SEPARATOR_BYTE
|| ( !exclusiveUpper
&& (fieldIndex < schema.getMaxFields() || inclusiveUpper || exclusiveLower) ) ) ) {
[10/37] phoenix git commit: PHOENIX-4269 IndexScrutinyToolIT is
flapping
Posted by ja...@apache.org.
PHOENIX-4269 IndexScrutinyToolIT is flapping
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/b1fa6b53
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/b1fa6b53
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/b1fa6b53
Branch: refs/heads/4.x-HBase-1.1
Commit: b1fa6b53c69096a2eb743e10eb11b0ae899bcba0
Parents: e98c447
Author: Vincent Poon <vi...@apache.org>
Authored: Thu Oct 19 15:54:11 2017 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Wed Nov 15 10:41:23 2017 -0800
----------------------------------------------------------------------
.../phoenix/end2end/IndexScrutinyToolIT.java | 61 ++++++++++----------
1 file changed, 30 insertions(+), 31 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/b1fa6b53/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexScrutinyToolIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexScrutinyToolIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexScrutinyToolIT.java
index f2384ec..10595a7 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexScrutinyToolIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexScrutinyToolIT.java
@@ -56,6 +56,7 @@ import org.apache.phoenix.mapreduce.index.SourceTargetColumnNames;
import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
import org.apache.phoenix.query.BaseTest;
import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.util.EnvironmentEdgeManager;
import org.apache.phoenix.util.PhoenixRuntime;
import org.apache.phoenix.util.PropertiesUtil;
import org.apache.phoenix.util.ReadOnlyProps;
@@ -107,7 +108,9 @@ public class IndexScrutinyToolIT extends BaseTest {
public static Collection<Object[]> data() {
return Arrays.asList(new Object[][] {
{ "CREATE TABLE %s (ID INTEGER NOT NULL PRIMARY KEY, NAME VARCHAR, ZIP INTEGER, EMPLOY_DATE TIMESTAMP, EMPLOYER VARCHAR)", "CREATE LOCAL INDEX %s ON %s (NAME, EMPLOY_DATE) INCLUDE (ZIP)" },
- { "CREATE TABLE %s (ID INTEGER NOT NULL PRIMARY KEY, NAME VARCHAR, ZIP INTEGER, EMPLOY_DATE TIMESTAMP, EMPLOYER VARCHAR) SALT_BUCKETS=2", "CREATE INDEX %s ON %s (NAME, EMPLOY_DATE) INCLUDE (ZIP)" } });
+ { "CREATE TABLE %s (ID INTEGER NOT NULL PRIMARY KEY, NAME VARCHAR, ZIP INTEGER, EMPLOY_DATE TIMESTAMP, EMPLOYER VARCHAR) SALT_BUCKETS=2", "CREATE INDEX %s ON %s (NAME, EMPLOY_DATE) INCLUDE (ZIP)" },
+ { "CREATE TABLE %s (ID INTEGER NOT NULL PRIMARY KEY, NAME VARCHAR, ZIP INTEGER, EMPLOY_DATE TIMESTAMP, EMPLOYER VARCHAR) SALT_BUCKETS=2", "CREATE LOCAL INDEX %s ON %s (NAME, EMPLOY_DATE) INCLUDE (ZIP)" }
+ });
}
public IndexScrutinyToolIT(String dataTableDdl, String indexTableDdl) {
@@ -137,7 +140,7 @@ public class IndexScrutinyToolIT extends BaseTest {
String indexTableUpsert = String.format(INDEX_UPSERT_SQL, indexTableFullName);
indexTableUpsertStmt = conn.prepareStatement(indexTableUpsert);
conn.setAutoCommit(false);
- testTime = System.currentTimeMillis();
+ testTime = EnvironmentEdgeManager.currentTimeMillis() - 1000;
}
@After
@@ -257,8 +260,7 @@ public class IndexScrutinyToolIT extends BaseTest {
// run scrutiny with batch size of 10
List<Job> completedJobs =
- runScrutiny(schemaName, dataTableName, indexTableName, System.currentTimeMillis(),
- 10L);
+ runScrutiny(schemaName, dataTableName, indexTableName, 10L);
Job job = completedJobs.get(0);
assertTrue(job.isSuccessful());
Counters counters = job.getCounters();
@@ -305,8 +307,8 @@ public class IndexScrutinyToolIT extends BaseTest {
conn.commit();
List<Job> completedJobs =
- runScrutiny(schemaName, dataTableName, indexTableName, System.currentTimeMillis(),
- 10L, SourceTable.INDEX_TABLE_SOURCE);
+ runScrutiny(schemaName, dataTableName, indexTableName, 10L,
+ SourceTable.INDEX_TABLE_SOURCE);
Job job = completedJobs.get(0);
assertTrue(job.isSuccessful());
Counters counters = job.getCounters();
@@ -334,8 +336,8 @@ public class IndexScrutinyToolIT extends BaseTest {
conn.commit();
List<Job> completedJobs =
- runScrutiny(schemaName, dataTableName, indexTableName, System.currentTimeMillis(),
- 10L, SourceTable.BOTH);
+ runScrutiny(schemaName, dataTableName, indexTableName, 10L,
+ SourceTable.BOTH);
assertEquals(2, completedJobs.size());
for (Job job : completedJobs) {
assertTrue(job.isSuccessful());
@@ -353,8 +355,8 @@ public class IndexScrutinyToolIT extends BaseTest {
insertOneValid_OneBadVal_OneMissingTarget();
String[] argValues =
- getArgValues(schemaName, dataTableName, indexTableName, System.currentTimeMillis(),
- 10L, SourceTable.DATA_TABLE_SOURCE, true, OutputFormat.FILE, null);
+ getArgValues(schemaName, dataTableName, indexTableName, 10L,
+ SourceTable.DATA_TABLE_SOURCE, true, OutputFormat.FILE, null);
runScrutiny(argValues);
// check the output files
@@ -404,8 +406,8 @@ public class IndexScrutinyToolIT extends BaseTest {
public void testOutputInvalidRowsToTable() throws Exception {
insertOneValid_OneBadVal_OneMissingTarget();
String[] argValues =
- getArgValues(schemaName, dataTableName, indexTableName, System.currentTimeMillis(),
- 10L, SourceTable.DATA_TABLE_SOURCE, true, OutputFormat.TABLE, null);
+ getArgValues(schemaName, dataTableName, indexTableName, 10L,
+ SourceTable.DATA_TABLE_SOURCE, true, OutputFormat.TABLE, null);
List<Job> completedJobs = runScrutiny(argValues);
// check that the output table contains the invalid rows
@@ -448,8 +450,8 @@ public class IndexScrutinyToolIT extends BaseTest {
insertOneValid_OneBadVal_OneMissingTarget();
// set max to 1. There are two bad rows, but only 1 should get written to output table
String[] argValues =
- getArgValues(schemaName, dataTableName, indexTableName, System.currentTimeMillis(),
- 10L, SourceTable.DATA_TABLE_SOURCE, true, OutputFormat.TABLE, new Long(1));
+ getArgValues(schemaName, dataTableName, indexTableName, 10L,
+ SourceTable.DATA_TABLE_SOURCE, true, OutputFormat.TABLE, new Long(1));
List<Job> completedJobs = runScrutiny(argValues);
long scrutinyTimeMillis =
PhoenixConfigurationUtil
@@ -565,9 +567,9 @@ public class IndexScrutinyToolIT extends BaseTest {
return counters.findCounter(counter).getValue();
}
- private String[] getArgValues(String schemaName, String dataTable, String indxTable, long ts,
- Long batchSize, SourceTable sourceTable, boolean outputInvalidRows,
- OutputFormat outputFormat, Long maxOutputRows) {
+ private String[] getArgValues(String schemaName, String dataTable, String indxTable, Long batchSize,
+ SourceTable sourceTable, boolean outputInvalidRows, OutputFormat outputFormat,
+ Long maxOutputRows) {
final List<String> args = Lists.newArrayList();
if (schemaName != null) {
args.add("-s");
@@ -589,7 +591,7 @@ public class IndexScrutinyToolIT extends BaseTest {
args.add(outputDir);
}
args.add("-t");
- args.add(String.valueOf(ts));
+ args.add(String.valueOf(Long.MAX_VALUE));
args.add("-run-foreground");
if (batchSize != null) {
args.add("-b");
@@ -617,26 +619,20 @@ public class IndexScrutinyToolIT extends BaseTest {
return args.toArray(new String[0]);
}
- private List<Job> runScrutiny(String schemaName, String dataTableName, String indexTableName)
- throws Exception {
- return runScrutiny(schemaName, dataTableName, indexTableName, System.currentTimeMillis());
+ private List<Job> runScrutiny(String schemaName, String dataTableName, String indexTableName) throws Exception {
+ return runScrutiny(schemaName, dataTableName, indexTableName, null, null);
}
private List<Job> runScrutiny(String schemaName, String dataTableName, String indexTableName,
- long ts) throws Exception {
- return runScrutiny(schemaName, dataTableName, indexTableName, ts, null, null);
+ Long batchSize) throws Exception {
+ return runScrutiny(schemaName, dataTableName, indexTableName, batchSize, null);
}
private List<Job> runScrutiny(String schemaName, String dataTableName, String indexTableName,
- long ts, Long batchSize) throws Exception {
- return runScrutiny(schemaName, dataTableName, indexTableName, ts, batchSize, null);
- }
-
- private List<Job> runScrutiny(String schemaName, String dataTableName, String indexTableName,
- long ts, Long batchSize, SourceTable sourceTable) throws Exception {
+ Long batchSize, SourceTable sourceTable) throws Exception {
final String[] cmdArgs =
- getArgValues(schemaName, dataTableName, indexTableName, ts, batchSize, sourceTable,
- false, null, null);
+ getArgValues(schemaName, dataTableName, indexTableName, batchSize, sourceTable, false,
+ null, null);
return runScrutiny(cmdArgs);
}
@@ -646,6 +642,9 @@ public class IndexScrutinyToolIT extends BaseTest {
scrutiny.setConf(conf);
int status = scrutiny.run(cmdArgs);
assertEquals(0, status);
+ for (Job job : scrutiny.getJobs()) {
+ assertTrue(job.waitForCompletion(true));
+ }
return scrutiny.getJobs();
}
[09/37] phoenix git commit: PHOENIX-4242 Fix Indexer post-compact
hook logging of NPE and TableNotFound
Posted by ja...@apache.org.
PHOENIX-4242 Fix Indexer post-compact hook logging of NPE and TableNotFound
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/e98c4477
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/e98c4477
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/e98c4477
Branch: refs/heads/4.x-HBase-1.1
Commit: e98c44773fb5711cca47d9dc545453462acd4ec9
Parents: 5003ac3
Author: Vincent Poon <vi...@apache.org>
Authored: Thu Oct 19 14:28:27 2017 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Wed Nov 15 10:41:23 2017 -0800
----------------------------------------------------------------------
.../UngroupedAggregateRegionObserverIT.java | 171 +++++++++++++++++++
.../UngroupedAggregateRegionObserver.java | 103 ++++++-----
.../org/apache/phoenix/hbase/index/Indexer.java | 52 ------
.../apache/phoenix/schema/MetaDataClient.java | 3 +
4 files changed, 239 insertions(+), 90 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/e98c4477/phoenix-core/src/it/java/org/apache/phoenix/end2end/UngroupedAggregateRegionObserverIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UngroupedAggregateRegionObserverIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UngroupedAggregateRegionObserverIT.java
new file mode 100644
index 0000000..3efd40e
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UngroupedAggregateRegionObserverIT.java
@@ -0,0 +1,171 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import static org.hamcrest.CoreMatchers.is;
+import static org.junit.Assert.assertThat;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.never;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.log4j.Appender;
+import org.apache.log4j.Level;
+import org.apache.log4j.LogManager;
+import org.apache.log4j.spi.LoggingEvent;
+import org.apache.phoenix.coprocessor.UngroupedAggregateRegionObserver;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.schema.PIndexState;
+import org.apache.phoenix.util.EnvironmentEdgeManager;
+import org.apache.phoenix.util.IndexUtil;
+import org.apache.phoenix.util.PropertiesUtil;
+import org.apache.phoenix.util.SchemaUtil;
+import org.apache.phoenix.util.TestUtil;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.mockito.ArgumentCaptor;
+import org.mockito.Captor;
+import org.mockito.Mock;
+import org.mockito.Mockito;
+import org.mockito.runners.MockitoJUnitRunner;
+
+@RunWith(MockitoJUnitRunner.class)
+public class UngroupedAggregateRegionObserverIT extends ParallelStatsDisabledIT {
+
+ private String dataTableName;
+ private String indexTableName;
+ private String schemaName;
+ private String dataTableFullName;
+ private static String indexTableFullName;
+
+ @Mock
+ private Appender mockAppender;
+
+ @Captor
+ private ArgumentCaptor<LoggingEvent> captorLoggingEvent;
+ private UngroupedAggregateRegionObserver ungroupedObserver;
+
+ @Before
+ public void setup() {
+ ungroupedObserver = new UngroupedAggregateRegionObserver();
+ ungroupedObserver.setCompactionConfig(PropertiesUtil.cloneConfig(config));
+ }
+
+ /**
+ * Tests the that post compact hook doesn't log any NPE for a System table
+ */
+ @Test
+ public void testPostCompactSystemSequence() throws Exception {
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ startCapturingIndexLog();
+ // run the post-compact hook
+ ungroupedObserver.clearTsOnDisabledIndexes("SYSTEM.SEQUENCE");
+ stopCapturingIndexLog();
+ // uneventful - nothing should be logged
+ Mockito.verify(mockAppender, never())
+ .doAppend((LoggingEvent) captorLoggingEvent.capture());
+ }
+ }
+
+ /**
+ * Tests that calling the post compact hook on the data table permanently disables an index that
+ * is being rebuilt (i.e. already disabled or inactive)
+ */
+ @Test
+ public void testPostCompactDataTableDuringRebuild() throws Exception {
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ generateUniqueTableNames();
+ testRebuildPostCompact(conn, dataTableFullName);
+ }
+ }
+
+ /**
+ * Tests that calling the post compact hook on the index table permanently disables an index
+ * that is being rebuilt (i.e. already disabled or inactive)
+ */
+ @Test
+ public void testPostCompactIndexTableDuringRebuild() throws Exception {
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ generateUniqueTableNames();
+ testRebuildPostCompact(conn, indexTableFullName);
+ }
+ }
+
+ private void testRebuildPostCompact(Connection conn, String tableToCompact)
+ throws SQLException {
+ conn.createStatement().execute(
+ String.format(PartialScannerResultsDisabledIT.TEST_TABLE_DDL, dataTableFullName));
+ conn.createStatement().execute(String.format(PartialScannerResultsDisabledIT.INDEX_1_DDL,
+ indexTableName, dataTableFullName));
+ // disable the index, simulating an index write failure
+ PhoenixConnection pConn = conn.unwrap(PhoenixConnection.class);
+ IndexUtil.updateIndexState(pConn, indexTableFullName, PIndexState.DISABLE,
+ EnvironmentEdgeManager.currentTimeMillis());
+
+ // run the post-compact hook on the data table
+ startCapturingIndexLog();
+ ungroupedObserver.clearTsOnDisabledIndexes(tableToCompact);
+ stopCapturingIndexLog();
+ // an event should've been logged
+ Mockito.verify(mockAppender).doAppend((LoggingEvent) captorLoggingEvent.capture());
+ LoggingEvent loggingEvent = (LoggingEvent) captorLoggingEvent.getValue();
+ assertThat(loggingEvent.getLevel(), is(Level.INFO));
+ // index should be permanently disabled (disabletime of 0)
+ assertTrue(TestUtil.checkIndexState(pConn, indexTableFullName, PIndexState.DISABLE, 0L));
+ }
+
+ /**
+ * Tests that a non-Phoenix table (created purely through HBase) doesn't log a warning in
+ * postCompact
+ */
+ @Test
+ public void testPostCompactTableNotFound() throws Exception {
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ HBaseTestingUtility utility = getUtility();
+ String nonPhoenixTable = "NOT_A_PHOENIX_TABLE";
+ utility.getHBaseAdmin().createTable(utility.createTableDescriptor(nonPhoenixTable));
+ startCapturingIndexLog();
+ ungroupedObserver.clearTsOnDisabledIndexes(nonPhoenixTable);
+ stopCapturingIndexLog();
+ // a debug level event should've been logged
+ Mockito.verify(mockAppender).doAppend((LoggingEvent) captorLoggingEvent.capture());
+ LoggingEvent loggingEvent = (LoggingEvent) captorLoggingEvent.getValue();
+ assertThat(loggingEvent.getLevel(), is(Level.DEBUG));
+ }
+ }
+
+ private void stopCapturingIndexLog() {
+ LogManager.getLogger(UngroupedAggregateRegionObserver.class).removeAppender(mockAppender);
+ }
+
+ private void startCapturingIndexLog() {
+ LogManager.getLogger(UngroupedAggregateRegionObserver.class).addAppender(mockAppender);
+ }
+
+ private void generateUniqueTableNames() {
+ schemaName = generateUniqueName();
+ dataTableName = generateUniqueName() + "_DATA";
+ dataTableFullName = SchemaUtil.getTableName(schemaName, dataTableName);
+ indexTableName = generateUniqueName() + "_IDX";
+ indexTableFullName = SchemaUtil.getTableName(schemaName, indexTableName);
+ }
+}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/e98c4477/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index c3024a7..af50420 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -97,6 +97,7 @@ import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
import org.apache.phoenix.hbase.index.util.KeyValueBuilder;
import org.apache.phoenix.index.IndexMaintainer;
import org.apache.phoenix.index.PhoenixIndexCodec;
+import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
import org.apache.phoenix.join.HashJoinInfo;
import org.apache.phoenix.query.QueryConstants;
@@ -108,8 +109,10 @@ import org.apache.phoenix.schema.PIndexState;
import org.apache.phoenix.schema.PRow;
import org.apache.phoenix.schema.PTable;
import org.apache.phoenix.schema.PTableImpl;
+import org.apache.phoenix.schema.PTableType;
import org.apache.phoenix.schema.RowKeySchema;
import org.apache.phoenix.schema.SortOrder;
+import org.apache.phoenix.schema.TableNotFoundException;
import org.apache.phoenix.schema.TableRef;
import org.apache.phoenix.schema.ValueSchema.Field;
import org.apache.phoenix.schema.stats.StatisticsCollectionRunTracker;
@@ -133,7 +136,10 @@ import org.apache.phoenix.util.ExpressionUtil;
import org.apache.phoenix.util.IndexUtil;
import org.apache.phoenix.util.KeyValueUtil;
import org.apache.phoenix.util.LogUtil;
+import org.apache.phoenix.util.MetaDataUtil;
+import org.apache.phoenix.util.PhoenixRuntime;
import org.apache.phoenix.util.PropertiesUtil;
+import org.apache.phoenix.util.QueryUtil;
import org.apache.phoenix.util.ScanUtil;
import org.apache.phoenix.util.SchemaUtil;
import org.apache.phoenix.util.ServerUtil;
@@ -142,7 +148,10 @@ import org.apache.phoenix.util.TimeKeeper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Predicate;
import com.google.common.base.Throwables;
+import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import com.google.common.primitives.Ints;
@@ -926,7 +935,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
}
@Override
- public void postCompact(final ObserverContext<RegionCoprocessorEnvironment> e, final Store store,
+ public void postCompact(final ObserverContext<RegionCoprocessorEnvironment> c, final Store store,
final StoreFile resultFile, CompactionRequest request) throws IOException {
// If we're compacting all files, then delete markers are removed
// and we must permanently disable an index that needs to be
@@ -940,49 +949,67 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
User.runAsLoginUser(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
- MutationCode mutationCode = null;
- long disableIndexTimestamp = 0;
-
- try (CoprocessorHConnection coprocessorHConnection =
- new CoprocessorHConnection(compactionConfig,
- (HRegionServer) e.getEnvironment()
- .getRegionServerServices());
- HTableInterface htable =
- coprocessorHConnection
- .getTable(SchemaUtil.getPhysicalTableName(
- PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES,
- compactionConfig))) {
- String tableName = e.getEnvironment().getRegion().getRegionInfo().getTable().getNameAsString();
- // FIXME: if this is an index on a view, we won't find a row for it in SYSTEM.CATALOG
- // Instead, we need to disable all indexes on the view.
- byte[] tableKey = SchemaUtil.getTableKeyFromFullName(tableName);
- Get get = new Get(tableKey);
- get.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP_BYTES);
- Result result = htable.get(get);
- if (!result.isEmpty()) {
- Cell cell = result.listCells().get(0);
- if (cell.getValueLength() > 0) {
- disableIndexTimestamp = PLong.INSTANCE.getCodec().decodeLong(cell.getValueArray(), cell.getValueOffset(), SortOrder.getDefault());
- if (disableIndexTimestamp != 0) {
- logger.info("Major compaction running while index on table is disabled. Clearing index disable timestamp: " + tableName);
- mutationCode = IndexUtil.updateIndexState(tableKey, 0L, htable, PIndexState.DISABLE).getMutationCode();
- }
- }
- }
- } catch (Throwable t) { // log, but swallow exception as we don't want to impact compaction
- logger.warn("Potential failure to permanently disable index during compaction " + e.getEnvironment().getRegionInfo().getTable().getNameAsString(), t);
- } finally {
- if (disableIndexTimestamp != 0 && mutationCode != MutationCode.TABLE_ALREADY_EXISTS && mutationCode != MutationCode.TABLE_NOT_FOUND) {
- logger.warn("Attempt to permanently disable index " + e.getEnvironment().getRegionInfo().getTable().getNameAsString() +
- " during compaction" + (mutationCode == null ? "" : " failed with code = " + mutationCode));
- }
- }
+ String fullTableName = c.getEnvironment().getRegion().getRegionInfo().getTable().getNameAsString();
+ clearTsOnDisabledIndexes(fullTableName);
return null;
}
});
}
}
+ @VisibleForTesting
+ public void clearTsOnDisabledIndexes(final String fullTableName) {
+ try (PhoenixConnection conn =
+ QueryUtil.getConnectionOnServer(compactionConfig).unwrap(PhoenixConnection.class)) {
+ String baseTable = fullTableName;
+ PTable table = PhoenixRuntime.getTableNoCache(conn, baseTable);
+ List<PTable> indexes;
+ // if it's an index table, we just need to check if it's disabled
+ if (PTableType.INDEX.equals(table.getType())) {
+ indexes = Lists.newArrayList(table.getIndexes());
+ indexes.add(table);
+ } else {
+ // for a data table, check all its indexes
+ indexes = table.getIndexes();
+ }
+ // FIXME need handle views and indexes on views as well
+ // if any index is disabled, we won't have all the data for a rebuild after compaction
+ for (PTable index : indexes) {
+ if (index.getIndexDisableTimestamp() != 0) {
+ try {
+ logger.info(
+ "Major compaction running while index on table is disabled. Clearing index disable timestamp: "
+ + index);
+ IndexUtil.updateIndexState(conn, index.getName().getString(),
+ PIndexState.DISABLE, Long.valueOf(0L));
+ } catch (SQLException e) {
+ logger.warn(
+ "Unable to permanently disable index " + index.getName().getString(),
+ e);
+ }
+ }
+ }
+ } catch (Exception e) {
+ if (e instanceof TableNotFoundException) {
+ logger.debug("Ignoring HBase table that is not a Phoenix table: " + fullTableName);
+ // non-Phoenix HBase tables won't be found, do nothing
+ return;
+ }
+ // If we can't reach the stats table, don't interrupt the normal
+ // compaction operation, just log a warning.
+ if (logger.isWarnEnabled()) {
+ logger.warn("Unable to permanently disable indexes being partially rebuild for "
+ + fullTableName,
+ e);
+ }
+ }
+ }
+
+ @VisibleForTesting
+ public void setCompactionConfig(Configuration compactionConfig) {
+ this.compactionConfig = compactionConfig;
+ }
+
private static PTable deserializeTable(byte[] b) {
try {
PTableProtos.PTable ptableProto = PTableProtos.PTable.parseFrom(b);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/e98c4477/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
index 4ac4ab5..9686789 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
@@ -25,7 +25,6 @@ import static org.apache.phoenix.hbase.index.write.IndexWriterUtils.INDEX_WRITER
import java.io.IOException;
import java.security.PrivilegedExceptionAction;
-import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
@@ -62,8 +61,6 @@ import org.apache.hadoop.hbase.regionserver.OperationStatus;
import org.apache.hadoop.hbase.regionserver.Region;
import org.apache.hadoop.hbase.regionserver.ScanType;
import org.apache.hadoop.hbase.regionserver.Store;
-import org.apache.hadoop.hbase.regionserver.StoreFile;
-import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.apache.hadoop.hbase.security.User;
@@ -89,18 +86,12 @@ import org.apache.phoenix.hbase.index.write.IndexWriter;
import org.apache.phoenix.hbase.index.write.RecoveryIndexWriter;
import org.apache.phoenix.hbase.index.write.recovery.PerRegionIndexWriteCache;
import org.apache.phoenix.hbase.index.write.recovery.StoreFailuresInCachePolicy;
-import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.query.QueryServices;
import org.apache.phoenix.query.QueryServicesOptions;
-import org.apache.phoenix.schema.PIndexState;
-import org.apache.phoenix.schema.PTable;
import org.apache.phoenix.trace.TracingUtils;
import org.apache.phoenix.trace.util.NullSpan;
import org.apache.phoenix.util.EnvironmentEdgeManager;
-import org.apache.phoenix.util.IndexUtil;
-import org.apache.phoenix.util.PhoenixRuntime;
import org.apache.phoenix.util.PropertiesUtil;
-import org.apache.phoenix.util.QueryUtil;
import org.apache.phoenix.util.ServerUtil;
import com.google.common.collect.Lists;
@@ -847,48 +838,5 @@ public class Indexer extends BaseRegionObserver {
properties.put(Indexer.INDEX_BUILDER_CONF_KEY, builder.getName());
desc.addCoprocessor(Indexer.class.getName(), null, priority, properties);
}
-
- @Override
- public void postCompact(final ObserverContext<RegionCoprocessorEnvironment> c, final Store store,
- final StoreFile resultFile, CompactionRequest request) throws IOException {
- // If we're compacting all files, then delete markers are removed
- // and we must permanently disable an index that needs to be
- // partially rebuild because we're potentially losing the information
- // we need to successfully rebuilt it.
- if (request.isAllFiles() || request.isMajor()) {
- // Compaction and split upcalls run with the effective user context of the requesting user.
- // This will lead to failure of cross cluster RPC if the effective user is not
- // the login user. Switch to the login user context to ensure we have the expected
- // security context.
- User.runAsLoginUser(new PrivilegedExceptionAction<Void>() {
- @Override
- public Void run() throws Exception {
- String fullTableName = c.getEnvironment().getRegion().getRegionInfo().getTable().getNameAsString();
- try {
- PhoenixConnection conn = QueryUtil.getConnectionOnServer(compactionConfig).unwrap(PhoenixConnection.class);
- PTable table = PhoenixRuntime.getTableNoCache(conn, fullTableName);
- // FIXME: we may need to recurse into children of this table too
- for (PTable index : table.getIndexes()) {
- if (index.getIndexDisableTimestamp() != 0) {
- try {
- LOG.info("Major compaction running while index on table is disabled. Clearing index disable timestamp: " + fullTableName);
- IndexUtil.updateIndexState(conn, index.getName().getString(), PIndexState.DISABLE, Long.valueOf(0L));
- } catch (SQLException e) {
- LOG.warn("Unable to permanently disable index " + index.getName().getString(), e);
- }
- }
- }
- } catch (Exception e) {
- // If we can't reach the stats table, don't interrupt the normal
- // compaction operation, just log a warning.
- if (LOG.isWarnEnabled()) {
- LOG.warn("Unable to permanently disable indexes being partially rebuild for " + fullTableName, e);
- }
- }
- return null;
- }
- });
- }
- }
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/e98c4477/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index 0f6bab2..0ce4246 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -602,6 +602,9 @@ public class MetaDataClient {
}
if (SYSTEM_CATALOG_SCHEMA.equals(schemaName)) {
+ if (result.getMutationCode() == MutationCode.TABLE_ALREADY_EXISTS && result.getTable() == null) {
+ result.setTable(table);
+ }
return result;
}
MutationCode code = result.getMutationCode();
[04/37] phoenix git commit: Revert "PHOENIX-4198 Remove the need for
users to have access to the Phoenix SYSTEM tables to create tables"
Posted by ja...@apache.org.
http://git-wip-us.apache.org/repos/asf/phoenix/blob/5003ac30/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
deleted file mode 100644
index 8437b37..0000000
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
+++ /dev/null
@@ -1,628 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.coprocessor;
-
-import java.io.IOException;
-import java.net.InetAddress;
-import java.security.PrivilegedExceptionAction;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.AuthUtil;
-import org.apache.hadoop.hbase.CoprocessorEnvironment;
-import org.apache.hadoop.hbase.DoNotRetryIOException;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.NamespaceDescriptor;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.ClusterConnection;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.coprocessor.BaseMasterAndRegionObserver;
-import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
-import org.apache.hadoop.hbase.coprocessor.ObserverContext;
-import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController;
-import org.apache.hadoop.hbase.ipc.RpcServer;
-import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos;
-import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.AccessControlService;
-import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
-import org.apache.hadoop.hbase.security.AccessDeniedException;
-import org.apache.hadoop.hbase.security.User;
-import org.apache.hadoop.hbase.security.UserProvider;
-import org.apache.hadoop.hbase.security.access.AccessControlClient;
-import org.apache.hadoop.hbase.security.access.AuthResult;
-import org.apache.hadoop.hbase.security.access.Permission;
-import org.apache.hadoop.hbase.security.access.Permission.Action;
-import org.apache.hadoop.hbase.security.access.UserPermission;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.phoenix.coprocessor.PhoenixMetaDataCoprocessorHost.PhoenixMetaDataControllerEnvironment;
-import org.apache.phoenix.query.QueryServices;
-import org.apache.phoenix.query.QueryServicesOptions;
-import org.apache.phoenix.schema.PIndexState;
-import org.apache.phoenix.schema.PTable;
-import org.apache.phoenix.schema.PTableType;
-import org.apache.phoenix.util.MetaDataUtil;
-
-import com.google.common.collect.Lists;
-import com.google.protobuf.RpcCallback;
-
-public class PhoenixAccessController extends BaseMetaDataEndpointObserver {
-
- private PhoenixMetaDataControllerEnvironment env;
- private ArrayList<BaseMasterAndRegionObserver> accessControllers;
- private boolean accessCheckEnabled;
- private UserProvider userProvider;
- private boolean isAutomaticGrantEnabled;
- private boolean isStrictMode;
- public static final Log LOG = LogFactory.getLog(PhoenixAccessController.class);
- private static final Log AUDITLOG =
- LogFactory.getLog("SecurityLogger."+PhoenixAccessController.class.getName());
-
- private List<BaseMasterAndRegionObserver> getAccessControllers() throws IOException {
- if (accessControllers == null) {
- synchronized (this) {
- if (accessControllers == null) {
- accessControllers = new ArrayList<BaseMasterAndRegionObserver>();
- RegionCoprocessorHost cpHost = this.env.getCoprocessorHost();
- List<BaseMasterAndRegionObserver> coprocessors = cpHost
- .findCoprocessors(BaseMasterAndRegionObserver.class);
- for (BaseMasterAndRegionObserver cp : coprocessors) {
- if (cp instanceof AccessControlService.Interface) {
- accessControllers.add(cp);
- }
- }
- }
- }
- }
- return accessControllers;
- }
-
- @Override
- public void preGetTable(ObserverContext<PhoenixMetaDataControllerEnvironment> ctx, String tenantId,
- String tableName, TableName physicalTableName) throws IOException {
- for (BaseMasterAndRegionObserver observer : getAccessControllers()) {
- observer.preGetTableDescriptors(new ObserverContext<MasterCoprocessorEnvironment>(),
- Lists.newArrayList(physicalTableName), Collections.<HTableDescriptor> emptyList());
- }
- }
-
- @Override
- public void start(CoprocessorEnvironment env) throws IOException {
- Configuration conf = env.getConfiguration();
- this.accessCheckEnabled = conf.getBoolean(QueryServices.PHOENIX_ACLS_ENABLED,
- QueryServicesOptions.DEFAULT_PHOENIX_ACLS_ENABLED);
- this.isAutomaticGrantEnabled=conf.getBoolean(QueryServices.PHOENIX_AUTOMATIC_GRANT_ENABLED,
- QueryServicesOptions.DEFAULT_PHOENIX_AUTOMATIC_GRANT_ENABLED);
- if (!this.accessCheckEnabled) {
- LOG.warn("PhoenixAccessController has been loaded with authorization checks disabled.");
- }
- if (env instanceof PhoenixMetaDataControllerEnvironment) {
- this.env = (PhoenixMetaDataControllerEnvironment)env;
- } else {
- throw new IllegalArgumentException(
- "Not a valid environment, should be loaded by PhoenixMetaDataControllerEnvironment");
- }
- // set the user-provider.
- this.userProvider = UserProvider.instantiate(env.getConfiguration());
- this.isStrictMode = conf.getBoolean(QueryServices.PHOENIX_SECURITY_PERMISSION_STRICT_MODE_ENABLED,
- QueryServicesOptions.DEFAULT_PHOENIX_SECURITY_PERMISSION_STRICT_MODE_ENABLED);
- // init superusers and add the server principal (if using security)
- // or process owner as default super user.
- Superusers.initialize(env.getConfiguration());
- }
-
- @Override
- public void stop(CoprocessorEnvironment env) throws IOException {}
-
- @Override
- public void preCreateTable(ObserverContext<PhoenixMetaDataControllerEnvironment> ctx, String tenantId,
- String tableName, TableName physicalTableName, TableName parentPhysicalTableName, PTableType tableType,
- Set<byte[]> familySet, Set<TableName> indexes) throws IOException {
- if (!accessCheckEnabled) { return; }
-
- if (tableType != PTableType.VIEW) {
- final HTableDescriptor htd = new HTableDescriptor(physicalTableName);
- for (byte[] familyName : familySet) {
- htd.addFamily(new HColumnDescriptor(familyName));
- }
- for (BaseMasterAndRegionObserver observer : getAccessControllers()) {
- observer.preCreateTable(new ObserverContext<MasterCoprocessorEnvironment>(), htd, null);
- }
- }
-
- // Index and view require read access on parent physical table.
- Set<TableName> physicalTablesChecked = new HashSet<TableName>();
- if (tableType == PTableType.VIEW || tableType == PTableType.INDEX) {
- physicalTablesChecked.add(parentPhysicalTableName);
- requireAccess("Create" + tableType, parentPhysicalTableName, Action.READ, Action.EXEC);
- }
-
- if (tableType == PTableType.VIEW) {
-
- Action[] requiredActions = { Action.READ, Action.EXEC };
- for (TableName index : indexes) {
- if (!physicalTablesChecked.add(index)) {
- // skip check for local index as we have already check the ACLs above
- // And for same physical table multiple times like view index table
- continue;
- }
-
- User user = getActiveUser();
- List<UserPermission> permissionForUser = getPermissionForUser(
- getUserPermissions(index.getNameAsString()), Bytes.toBytes(user.getShortName()));
- Set<Action> requireAccess = new HashSet<>();
- Set<Action> accessExists = new HashSet<>();
- if (permissionForUser != null) {
- for (UserPermission userPermission : permissionForUser) {
- for (Action action : Arrays.asList(requiredActions)) {
- if (!userPermission.implies(action)) {
- requireAccess.add(action);
- }
- }
- }
- if (!requireAccess.isEmpty()) {
- for (UserPermission userPermission : permissionForUser) {
- accessExists.addAll(Arrays.asList(userPermission.getActions()));
- }
-
- }
- } else {
- requireAccess.addAll(Arrays.asList(requiredActions));
- }
- if (!requireAccess.isEmpty()) {
- byte[] indexPhysicalTable = index.getName();
- handleRequireAccessOnDependentTable("Create" + tableType, user.getName(),
- TableName.valueOf(indexPhysicalTable), tableName, requireAccess, accessExists);
- }
- }
-
- }
-
- if (tableType == PTableType.INDEX) {
- // All the users who have READ access on data table should have access to Index table as well.
- // WRITE is needed for the index updates done by the user who has WRITE access on data table.
- // CREATE is needed during the drop of the table.
- // We are doing this because existing user while querying data table should not see access denied for the
- // new indexes.
- // TODO: confirm whether granting permission from coprocessor is a security leak.(currently it is done if
- // automatic grant is enabled explicitly by user in configuration
- // skip check for local index
- if (physicalTableName != null && !parentPhysicalTableName.equals(physicalTableName)
- && !MetaDataUtil.isViewIndex(physicalTableName.getNameAsString())) {
- authorizeOrGrantAccessToUsers("Create" + tableType, parentPhysicalTableName,
- Arrays.asList(Action.READ, Action.WRITE, Action.CREATE, Action.EXEC, Action.ADMIN),
- physicalTableName);
- }
- }
- }
-
-
- public void handleRequireAccessOnDependentTable(String request, String userName, TableName dependentTable,
- String requestTable, Set<Action> requireAccess, Set<Action> accessExists) throws IOException {
-
- if (!isStrictMode) {
- AUDITLOG.warn("Strict mode is not enabled, so " + request + " is allowed but User:" + userName
- + " will not have following access " + requireAccess + " to the existing dependent physical table "
- + dependentTable);
- return;
- }
- if (isAutomaticGrantEnabled) {
- Set<Action> unionSet = new HashSet<Action>();
- unionSet.addAll(requireAccess);
- unionSet.addAll(accessExists);
- AUDITLOG.info(request + ": Automatically granting access to index table during creation of view:"
- + requestTable + authString(userName, dependentTable, requireAccess));
- grantPermissions(userName, dependentTable.getName(), unionSet.toArray(new Action[0]));
- } else {
- throw new AccessDeniedException(
- "Insufficient permissions for users of dependent table" + authString(userName, dependentTable, requireAccess));
- }
- }
-
- private void grantPermissions(final String toUser, final byte[] table, final Action... actions) throws IOException {
- User.runAsLoginUser(new PrivilegedExceptionAction<Void>() {
- @Override
- public Void run() throws Exception {
- try (Connection conn = ConnectionFactory.createConnection(env.getConfiguration())) {
- AccessControlClient.grant(conn, TableName.valueOf(table), toUser , null, null,
- actions);
- } catch (Throwable e) {
- new DoNotRetryIOException(e);
- }
- return null;
- }
- });
- }
-
- private void authorizeOrGrantAccessToUsers(final String request, final TableName fromTable,
- final List<Action> requiredActionsOnTable, final TableName toTable)
- throws IOException {
- User.runAsLoginUser(new PrivilegedExceptionAction<Void>() {
- @Override
- public Void run() throws IOException {
- try (Connection conn = ConnectionFactory.createConnection(env.getConfiguration())) {
- List<UserPermission> userPermissions = getUserPermissions(fromTable.getNameAsString());
- List<UserPermission> permissionsOnTheTable = getUserPermissions(toTable.getNameAsString());
- if (userPermissions != null) {
- for (UserPermission userPermission : userPermissions) {
- Set<Action> requireAccess = new HashSet<Action>();
- Set<Action> accessExists = new HashSet<Action>();
- List<UserPermission> permsToTable = getPermissionForUser(permissionsOnTheTable,
- userPermission.getUser());
- for (Action action : requiredActionsOnTable) {
- boolean haveAccess=false;
- if (userPermission.implies(action)) {
- if (permsToTable == null) {
- requireAccess.add(action);
- } else {
- for (UserPermission permToTable : permsToTable) {
- if (permToTable.implies(action)) {
- haveAccess=true;
- }
- }
- if (!haveAccess) {
- requireAccess.add(action);
- }
- }
- }
- }
- if (permsToTable != null) {
- // Append access to already existing access for the user
- for (UserPermission permToTable : permsToTable) {
- accessExists.addAll(Arrays.asList(permToTable.getActions()));
- }
- }
- if (!requireAccess.isEmpty()) {
- if(AuthUtil.isGroupPrincipal(Bytes.toString(userPermission.getUser()))){
- AUDITLOG.warn("Users of GROUP:" + Bytes.toString(userPermission.getUser())
- + " will not have following access " + requireAccess
- + " to the newly created index " + toTable
- + ", Automatic grant is not yet allowed on Groups");
- continue;
- }
- handleRequireAccessOnDependentTable(request, Bytes.toString(userPermission.getUser()),
- toTable, toTable.getNameAsString(), requireAccess, accessExists);
- }
- }
- }
- }
- return null;
- }
- });
- }
-
- private List<UserPermission> getPermissionForUser(List<UserPermission> perms, byte[] user) {
- if (perms != null) {
- // get list of permissions for the user as multiple implementation of AccessControl coprocessors can give
- // permissions for same users
- List<UserPermission> permissions = new ArrayList<>();
- for (UserPermission p : perms) {
- if (Bytes.equals(p.getUser(),user)){
- permissions.add(p);
- }
- }
- if (!permissions.isEmpty()){
- return permissions;
- }
- }
- return null;
- }
-
- @Override
- public void preDropTable(ObserverContext<PhoenixMetaDataControllerEnvironment> ctx, String tenantId,
- String tableName, TableName physicalTableName, TableName parentPhysicalTableName, PTableType tableType,
- List<PTable> indexes) throws IOException {
- if (!accessCheckEnabled) { return; }
-
- for (BaseMasterAndRegionObserver observer : getAccessControllers()) {
- if (tableType != PTableType.VIEW) {
- observer.preDeleteTable(new ObserverContext<MasterCoprocessorEnvironment>(), physicalTableName);
- }
- if (indexes != null) {
- for (PTable index : indexes) {
- observer.preDeleteTable(new ObserverContext<MasterCoprocessorEnvironment>(),
- TableName.valueOf(index.getPhysicalName().getBytes()));
- }
- }
- }
- //checking similar permission checked during the create of the view.
- if (tableType == PTableType.VIEW || tableType == PTableType.INDEX) {
- requireAccess("Drop "+tableType, parentPhysicalTableName, Action.READ, Action.EXEC);
- }
- }
-
- @Override
- public void preAlterTable(ObserverContext<PhoenixMetaDataControllerEnvironment> ctx, String tenantId,
- String tableName, TableName physicalTableName, TableName parentPhysicalTableName, PTableType tableType) throws IOException {
- if (!accessCheckEnabled) { return; }
- for (BaseMasterAndRegionObserver observer : getAccessControllers()) {
- if (tableType != PTableType.VIEW) {
- observer.preModifyTable(new ObserverContext<MasterCoprocessorEnvironment>(), physicalTableName,
- new HTableDescriptor(physicalTableName));
- }
- }
- if (tableType == PTableType.VIEW) {
- requireAccess("Alter "+tableType, parentPhysicalTableName, Action.READ, Action.EXEC);
- }
- }
-
- @Override
- public void preGetSchema(ObserverContext<PhoenixMetaDataControllerEnvironment> ctx, String schemaName)
- throws IOException {
- if (!accessCheckEnabled) { return; }
- for (BaseMasterAndRegionObserver observer : getAccessControllers()) {
- observer.preListNamespaceDescriptors(new ObserverContext<MasterCoprocessorEnvironment>(),
- Arrays.asList(NamespaceDescriptor.create(schemaName).build()));
- }
- }
-
- @Override
- public void preCreateSchema(ObserverContext<PhoenixMetaDataControllerEnvironment> ctx, String schemaName)
- throws IOException {
- if (!accessCheckEnabled) { return; }
- for (BaseMasterAndRegionObserver observer : getAccessControllers()) {
- observer.preCreateNamespace(new ObserverContext<MasterCoprocessorEnvironment>(),
- NamespaceDescriptor.create(schemaName).build());
- }
- }
-
- @Override
- public void preDropSchema(ObserverContext<PhoenixMetaDataControllerEnvironment> ctx, String schemaName)
- throws IOException {
- if (!accessCheckEnabled) { return; }
- for (BaseMasterAndRegionObserver observer : getAccessControllers()) {
- observer.preDeleteNamespace(new ObserverContext<MasterCoprocessorEnvironment>(), schemaName);
- }
- }
-
- @Override
- public void preIndexUpdate(ObserverContext<PhoenixMetaDataControllerEnvironment> ctx, String tenantId,
- String indexName, TableName physicalTableName, TableName parentPhysicalTableName, PIndexState newState)
- throws IOException {
- if (!accessCheckEnabled) { return; }
- for (BaseMasterAndRegionObserver observer : getAccessControllers()) {
- observer.preModifyTable(new ObserverContext<MasterCoprocessorEnvironment>(), physicalTableName,
- new HTableDescriptor(physicalTableName));
- }
- // Check for read access in case of rebuild
- if (newState == PIndexState.BUILDING) {
- requireAccess("Rebuild:", parentPhysicalTableName, Action.READ, Action.EXEC);
- }
- }
-
- private List<UserPermission> getUserPermissions(final String tableName) throws IOException {
- return User.runAsLoginUser(new PrivilegedExceptionAction<List<UserPermission>>() {
- @Override
- public List<UserPermission> run() throws Exception {
- final List<UserPermission> userPermissions = new ArrayList<UserPermission>();
- try (Connection connection = ConnectionFactory.createConnection(env.getConfiguration())) {
- for (BaseMasterAndRegionObserver service : accessControllers) {
- if (service.getClass().getName().equals(org.apache.hadoop.hbase.security.access.AccessController.class.getName())) {
- userPermissions.addAll(AccessControlClient.getUserPermissions(connection, tableName));
- } else {
- AccessControlProtos.GetUserPermissionsRequest.Builder builder = AccessControlProtos.GetUserPermissionsRequest
- .newBuilder();
- builder.setTableName(ProtobufUtil.toProtoTableName(TableName.valueOf(tableName)));
- builder.setType(AccessControlProtos.Permission.Type.Table);
- AccessControlProtos.GetUserPermissionsRequest request = builder.build();
-
- PayloadCarryingRpcController controller = ((ClusterConnection)connection)
- .getRpcControllerFactory().newController();
- ((AccessControlService.Interface)service).getUserPermissions(controller, request,
- new RpcCallback<AccessControlProtos.GetUserPermissionsResponse>() {
- @Override
- public void run(AccessControlProtos.GetUserPermissionsResponse message) {
- if (message != null) {
- for (AccessControlProtos.UserPermission perm : message
- .getUserPermissionList()) {
- userPermissions.add(ProtobufUtil.toUserPermission(perm));
- }
- }
- }
- });
- }
- }
- } catch (Throwable e) {
- if (e instanceof Exception) {
- throw (Exception) e;
- } else if (e instanceof Error) {
- throw (Error) e;
- }
- throw new Exception(e);
- }
- return userPermissions;
- }
- });
- }
-
- /**
- * Authorizes that the current user has all the given permissions for the
- * given table
- * @param tableName Table requested
- * @throws IOException if obtaining the current user fails
- * @throws AccessDeniedException if user has no authorization
- */
- private void requireAccess(String request, TableName tableName, Action... permissions) throws IOException {
- User user = getActiveUser();
- AuthResult result = null;
- List<Action> requiredAccess = new ArrayList<Action>();
- for (Action permission : permissions) {
- if (hasAccess(getUserPermissions(tableName.getNameAsString()), tableName, permission, user)) {
- result = AuthResult.allow(request, "Table permission granted", user, permission, tableName, null, null);
- } else {
- result = AuthResult.deny(request, "Insufficient permissions", user, permission, tableName, null, null);
- requiredAccess.add(permission);
- }
- logResult(result);
- }
- if (!requiredAccess.isEmpty()) {
- result = AuthResult.deny(request, "Insufficient permissions", user, requiredAccess.get(0), tableName, null,
- null);
- }
- if (!result.isAllowed()) { throw new AccessDeniedException("Insufficient permissions "
- + authString(user.getName(), tableName, new HashSet<Permission.Action>(Arrays.asList(permissions)))); }
- }
-
- /**
- * Checks if the user has access to the table for the specified action.
- *
- * @param perms All table permissions
- * @param table tablename
- * @param action action for access is required
- * @return true if the user has access to the table for specified action, false otherwise
- */
- private boolean hasAccess(List<UserPermission> perms, TableName table, Permission.Action action, User user) {
- if (Superusers.isSuperUser(user)){
- return true;
- }
- if (perms != null) {
- List<UserPermission> permissionsForUser = getPermissionForUser(perms, user.getShortName().getBytes());
- if (permissionsForUser != null) {
- for (UserPermission permissionForUser : permissionsForUser) {
- if (permissionForUser.implies(action)) { return true; }
- }
- }
- String[] groupNames = user.getGroupNames();
- if (groupNames != null) {
- for (String group : groupNames) {
- List<UserPermission> groupPerms = getPermissionForUser(perms,(AuthUtil.toGroupEntry(group)).getBytes());
- if (groupPerms != null) for (UserPermission permissionForUser : groupPerms) {
- if (permissionForUser.implies(action)) { return true; }
- }
- }
- }
- } else if (LOG.isDebugEnabled()) {
- LOG.debug("No permissions found for table=" + table);
- }
- return false;
- }
-
- private User getActiveUser() throws IOException {
- User user = RpcServer.getRequestUser();
- if (user == null) {
- // for non-rpc handling, fallback to system user
- user = userProvider.getCurrent();
- }
- return user;
- }
-
- private void logResult(AuthResult result) {
- if (AUDITLOG.isTraceEnabled()) {
- InetAddress remoteAddr = RpcServer.getRemoteAddress();
- AUDITLOG.trace("Access " + (result.isAllowed() ? "allowed" : "denied") + " for user "
- + (result.getUser() != null ? result.getUser().getShortName() : "UNKNOWN") + "; reason: "
- + result.getReason() + "; remote address: " + (remoteAddr != null ? remoteAddr : "") + "; request: "
- + result.getRequest() + "; context: " + result.toContextString());
- }
- }
-
- private static final class Superusers {
- private static final Log LOG = LogFactory.getLog(Superusers.class);
-
- /** Configuration key for superusers */
- public static final String SUPERUSER_CONF_KEY = org.apache.hadoop.hbase.security.Superusers.SUPERUSER_CONF_KEY; // Not getting a name
-
- private static List<String> superUsers;
- private static List<String> superGroups;
- private static User systemUser;
-
- private Superusers(){}
-
- /**
- * Should be called only once to pre-load list of super users and super
- * groups from Configuration. This operation is idempotent.
- * @param conf configuration to load users from
- * @throws IOException if unable to initialize lists of superusers or super groups
- * @throws IllegalStateException if current user is null
- */
- public static void initialize(Configuration conf) throws IOException {
- superUsers = new ArrayList<>();
- superGroups = new ArrayList<>();
- systemUser = User.getCurrent();
-
- if (systemUser == null) {
- throw new IllegalStateException("Unable to obtain the current user, "
- + "authorization checks for internal operations will not work correctly!");
- }
-
- if (LOG.isTraceEnabled()) {
- LOG.trace("Current user name is " + systemUser.getShortName());
- }
- String currentUser = systemUser.getShortName();
- String[] superUserList = conf.getStrings(SUPERUSER_CONF_KEY, new String[0]);
- for (String name : superUserList) {
- if (AuthUtil.isGroupPrincipal(name)) {
- superGroups.add(AuthUtil.getGroupName(name));
- } else {
- superUsers.add(name);
- }
- }
- superUsers.add(currentUser);
- }
-
- /**
- * @return true if current user is a super user (whether as user running process,
- * declared as individual superuser or member of supergroup), false otherwise.
- * @param user to check
- * @throws IllegalStateException if lists of superusers/super groups
- * haven't been initialized properly
- */
- public static boolean isSuperUser(User user) {
- if (superUsers == null) {
- throw new IllegalStateException("Super users/super groups lists"
- + " haven't been initialized properly.");
- }
- if (superUsers.contains(user.getShortName())) {
- return true;
- }
-
- for (String group : user.getGroupNames()) {
- if (superGroups.contains(group)) {
- return true;
- }
- }
- return false;
- }
-
- public static List<String> getSuperUsers() {
- return superUsers;
- }
-
- public static User getSystemUser() {
- return systemUser;
- }
- }
-
- public String authString(String user, TableName table, Set<Action> actions) {
- StringBuilder sb = new StringBuilder();
- sb.append(" (user=").append(user != null ? user : "UNKNOWN").append(", ");
- sb.append("scope=").append(table == null ? "GLOBAL" : table.getNameWithNamespaceInclAsString()).append(", ");
- sb.append(actions.size() > 1 ? "actions=" : "action=").append(actions != null ? actions.toString() : "")
- .append(")");
- return sb.toString();
- }
-
-}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/5003ac30/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixMetaDataCoprocessorHost.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixMetaDataCoprocessorHost.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixMetaDataCoprocessorHost.java
deleted file mode 100644
index 15b0020..0000000
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixMetaDataCoprocessorHost.java
+++ /dev/null
@@ -1,236 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.coprocessor;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.Set;
-import java.util.concurrent.ConcurrentMap;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.Coprocessor;
-import org.apache.hadoop.hbase.CoprocessorEnvironment;
-import org.apache.hadoop.hbase.DoNotRetryIOException;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
-import org.apache.hadoop.hbase.coprocessor.ObserverContext;
-import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-import org.apache.hadoop.hbase.regionserver.Region;
-import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
-import org.apache.hadoop.hbase.regionserver.RegionServerServices;
-import org.apache.phoenix.query.QueryServices;
-import org.apache.phoenix.query.QueryServicesOptions;
-import org.apache.phoenix.schema.PIndexState;
-import org.apache.phoenix.schema.PTable;
-import org.apache.phoenix.schema.PTableType;
-
-public class PhoenixMetaDataCoprocessorHost
- extends CoprocessorHost<PhoenixMetaDataCoprocessorHost.PhoenixMetaDataControllerEnvironment> {
- private RegionCoprocessorEnvironment env;
- public static final String PHOENIX_META_DATA_COPROCESSOR_CONF_KEY =
- "hbase.coprocessor.phoenix.classes";
- public static final String DEFAULT_PHOENIX_META_DATA_COPROCESSOR_CONF_KEY="org.apache.phoenix.coprocessor.PhoenixAccessController";
-
- public PhoenixMetaDataCoprocessorHost(RegionCoprocessorEnvironment env) {
- super(null);
- this.env = env;
- this.conf = env.getConfiguration();
- boolean accessCheckEnabled = this.conf.getBoolean(QueryServices.PHOENIX_ACLS_ENABLED,
- QueryServicesOptions.DEFAULT_PHOENIX_ACLS_ENABLED);
- if (this.conf.get(PHOENIX_META_DATA_COPROCESSOR_CONF_KEY) == null && accessCheckEnabled) {
- this.conf.set(PHOENIX_META_DATA_COPROCESSOR_CONF_KEY, DEFAULT_PHOENIX_META_DATA_COPROCESSOR_CONF_KEY);
- }
- loadSystemCoprocessors(conf, PHOENIX_META_DATA_COPROCESSOR_CONF_KEY);
- }
-
- private static abstract class CoprocessorOperation<T extends CoprocessorEnvironment> extends ObserverContext<T> {
- abstract void call(MetaDataEndpointObserver oserver, ObserverContext<T> ctx) throws IOException;
-
- public void postEnvCall(T env) {}
- }
-
- private boolean execOperation(
- final CoprocessorOperation<PhoenixMetaDataCoprocessorHost.PhoenixMetaDataControllerEnvironment> ctx)
- throws IOException {
- if (ctx == null) return false;
- boolean bypass = false;
- for (PhoenixMetaDataControllerEnvironment env : coprocessors) {
- if (env.getInstance() instanceof MetaDataEndpointObserver) {
- ctx.prepare(env);
- Thread currentThread = Thread.currentThread();
- ClassLoader cl = currentThread.getContextClassLoader();
- try {
- currentThread.setContextClassLoader(env.getClassLoader());
- ctx.call((MetaDataEndpointObserver)env.getInstance(), ctx);
- } catch (Throwable e) {
- handleCoprocessorThrowable(env, e);
- } finally {
- currentThread.setContextClassLoader(cl);
- }
- bypass |= ctx.shouldBypass();
- if (ctx.shouldComplete()) {
- break;
- }
- }
- ctx.postEnvCall(env);
- }
- return bypass;
- }
-
- @Override
- protected void handleCoprocessorThrowable(final CoprocessorEnvironment env, final Throwable e) throws IOException {
- if (e instanceof IOException) {
- if (e.getCause() instanceof DoNotRetryIOException) { throw (IOException)e.getCause(); }
- }
- super.handleCoprocessorThrowable(env, e);
- }
-
- /**
- * Encapsulation of the environment of each coprocessor
- */
- static class PhoenixMetaDataControllerEnvironment extends CoprocessorHost.Environment
- implements RegionCoprocessorEnvironment {
-
- private RegionCoprocessorEnvironment env;
-
- public PhoenixMetaDataControllerEnvironment(RegionCoprocessorEnvironment env, Coprocessor instance,
- int priority, int sequence, Configuration conf) {
- super(instance, priority, sequence, conf);
- this.env = env;
- }
-
- @Override
- public RegionServerServices getRegionServerServices() {
- return env.getRegionServerServices();
- }
-
- public RegionCoprocessorHost getCoprocessorHost() {
- return env.getRegion().getCoprocessorHost();
- }
-
- @Override
- public Region getRegion() {
- return env.getRegion();
- }
-
- @Override
- public HRegionInfo getRegionInfo() {
- return env.getRegionInfo();
- }
-
- @Override
- public ConcurrentMap<String, Object> getSharedData() {
- return env.getSharedData();
- }
- }
-
- @Override
- public PhoenixMetaDataControllerEnvironment createEnvironment(Class<?> implClass, Coprocessor instance,
- int priority, int sequence, Configuration conf) {
- return new PhoenixMetaDataControllerEnvironment(env, instance, priority, sequence, conf);
- }
-
- public void preGetTable(final String tenantId, final String tableName, final TableName physicalTableName)
- throws IOException {
- execOperation(new CoprocessorOperation<PhoenixMetaDataControllerEnvironment>() {
- @Override
- public void call(MetaDataEndpointObserver observer,
- ObserverContext<PhoenixMetaDataControllerEnvironment> ctx) throws IOException {
- observer.preGetTable(ctx, tenantId, tableName, physicalTableName);
- }
- });
- }
-
- public void preCreateTable(final String tenantId, final String tableName, final TableName physicalTableName,
- final TableName parentPhysicalTableName, final PTableType tableType, final Set<byte[]> familySet, final Set<TableName> indexes)
- throws IOException {
- execOperation(new CoprocessorOperation<PhoenixMetaDataControllerEnvironment>() {
- @Override
- public void call(MetaDataEndpointObserver observer,
- ObserverContext<PhoenixMetaDataControllerEnvironment> ctx) throws IOException {
- observer.preCreateTable(ctx, tenantId, tableName, physicalTableName, parentPhysicalTableName, tableType,
- familySet, indexes);
- }
- });
- }
-
- public void preDropTable(final String tenantId, final String tableName, final TableName physicalTableName,
- final TableName parentPhysicalTableName, final PTableType tableType, final List<PTable> indexes) throws IOException {
- execOperation(new CoprocessorOperation<PhoenixMetaDataControllerEnvironment>() {
- @Override
- public void call(MetaDataEndpointObserver observer,
- ObserverContext<PhoenixMetaDataControllerEnvironment> ctx) throws IOException {
- observer.preDropTable(ctx, tenantId, tableName, physicalTableName, parentPhysicalTableName, tableType, indexes);
- }
- });
- }
-
- public void preAlterTable(final String tenantId, final String tableName, final TableName physicalTableName,
- final TableName parentPhysicalTableName, final PTableType type) throws IOException {
- execOperation(new CoprocessorOperation<PhoenixMetaDataControllerEnvironment>() {
- @Override
- public void call(MetaDataEndpointObserver observer,
- ObserverContext<PhoenixMetaDataControllerEnvironment> ctx) throws IOException {
- observer.preAlterTable(ctx, tenantId, tableName, physicalTableName, parentPhysicalTableName, type);
- }
- });
- }
-
- public void preGetSchema(final String schemaName) throws IOException {
- execOperation(new CoprocessorOperation<PhoenixMetaDataControllerEnvironment>() {
- @Override
- public void call(MetaDataEndpointObserver observer,
- ObserverContext<PhoenixMetaDataControllerEnvironment> ctx) throws IOException {
- observer.preGetSchema(ctx, schemaName);
- }
- });
- }
-
- public void preCreateSchema(final String schemaName) throws IOException {
-
- execOperation(new CoprocessorOperation<PhoenixMetaDataControllerEnvironment>() {
- @Override
- public void call(MetaDataEndpointObserver observer,
- ObserverContext<PhoenixMetaDataControllerEnvironment> ctx) throws IOException {
- observer.preCreateSchema(ctx, schemaName);
- }
- });
- }
-
- public void preDropSchema(final String schemaName) throws IOException {
- execOperation(new CoprocessorOperation<PhoenixMetaDataControllerEnvironment>() {
- @Override
- public void call(MetaDataEndpointObserver observer,
- ObserverContext<PhoenixMetaDataControllerEnvironment> ctx) throws IOException {
- observer.preDropSchema(ctx, schemaName);
- }
- });
- }
-
- public void preIndexUpdate(final String tenantId, final String indexName, final TableName physicalTableName,
- final TableName parentPhysicalTableName, final PIndexState newState) throws IOException {
- execOperation(new CoprocessorOperation<PhoenixMetaDataControllerEnvironment>() {
- @Override
- public void call(MetaDataEndpointObserver observer,
- ObserverContext<PhoenixMetaDataControllerEnvironment> ctx) throws IOException {
- observer.preIndexUpdate(ctx, tenantId, indexName, physicalTableName, parentPhysicalTableName, newState);
- }
- });
- }
-}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/5003ac30/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
index ba6371b..0fc138f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
@@ -18,7 +18,6 @@
package org.apache.phoenix.index;
import java.io.IOException;
-import java.security.PrivilegedExceptionAction;
import java.sql.SQLException;
import java.util.Collection;
import java.util.Collections;
@@ -40,7 +39,6 @@ import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult;
import org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode;
@@ -163,12 +161,12 @@ public class PhoenixIndexFailurePolicy extends DelegateIndexFailurePolicy {
}
private long handleFailureWithExceptions(Multimap<HTableInterfaceReference, Mutation> attempted,
- final Exception cause) throws Throwable {
+ Exception cause) throws Throwable {
Set<HTableInterfaceReference> refs = attempted.asMap().keySet();
- final Map<String, Long> indexTableNames = new HashMap<String, Long>(refs.size());
+ Map<String, Long> indexTableNames = new HashMap<String, Long>(refs.size());
// start by looking at all the tables to which we attempted to write
long timestamp = 0;
- final boolean leaveIndexActive = blockDataTableWritesOnFailure || !disableIndexOnFailure;
+ boolean leaveIndexActive = blockDataTableWritesOnFailure || !disableIndexOnFailure;
// if using TrackingParallelWriter, we know which indexes failed and only disable those
Set<HTableInterfaceReference> failedTables = cause instanceof MultiIndexWriteFailureException
? new HashSet<HTableInterfaceReference>(((MultiIndexWriteFailureException)cause).getFailedTables())
@@ -212,66 +210,55 @@ public class PhoenixIndexFailurePolicy extends DelegateIndexFailurePolicy {
return timestamp;
}
- final PIndexState newState = disableIndexOnFailure ? PIndexState.DISABLE : PIndexState.PENDING_ACTIVE;
- final long fTimestamp=timestamp;
+ PIndexState newState = disableIndexOnFailure ? PIndexState.DISABLE : PIndexState.PENDING_ACTIVE;
// for all the index tables that we've found, try to disable them and if that fails, try to
- return User.runAsLoginUser(new PrivilegedExceptionAction<Long>() {
- @Override
- public Long run() throws Exception {
- for (Map.Entry<String, Long> tableTimeElement : indexTableNames.entrySet()) {
- String indexTableName = tableTimeElement.getKey();
- long minTimeStamp = tableTimeElement.getValue();
- // We need a way of differentiating the block writes to data table case from
- // the leave index active case. In either case, we need to know the time stamp
- // at which writes started failing so we can rebuild from that point. If we
- // keep the index active *and* have a positive INDEX_DISABLE_TIMESTAMP_BYTES,
- // then writes to the data table will be blocked (this is client side logic
- // and we can't change this in a minor release). So we use the sign of the
- // time stamp to differentiate.
- if (!disableIndexOnFailure && !blockDataTableWritesOnFailure) {
- minTimeStamp *= -1;
- }
- // Disable the index by using the updateIndexState method of MetaDataProtocol end point coprocessor.
- try (HTableInterface systemTable = env.getTable(SchemaUtil.getPhysicalTableName(
- PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, env.getConfiguration()))) {
- MetaDataMutationResult result = IndexUtil.updateIndexState(indexTableName, minTimeStamp,
- systemTable, newState);
- if (result.getMutationCode() == MutationCode.TABLE_NOT_FOUND) {
- LOG.info("Index " + indexTableName + " has been dropped. Ignore uncommitted mutations");
- continue;
- }
- if (result.getMutationCode() != MutationCode.TABLE_ALREADY_EXISTS) {
- if (leaveIndexActive) {
- LOG.warn("Attempt to update INDEX_DISABLE_TIMESTAMP " + " failed with code = "
- + result.getMutationCode());
- // If we're not disabling the index, then we don't want to throw as throwing
- // will lead to the RS being shutdown.
- if (blockDataTableWritesOnFailure) { throw new DoNotRetryIOException(
- "Attempt to update INDEX_DISABLE_TIMESTAMP failed."); }
- } else {
- LOG.warn("Attempt to disable index " + indexTableName + " failed with code = "
- + result.getMutationCode() + ". Will use default failure policy instead.");
- throw new DoNotRetryIOException("Attempt to disable " + indexTableName + " failed.");
- }
- }
- if (leaveIndexActive)
- LOG.info("Successfully update INDEX_DISABLE_TIMESTAMP for " + indexTableName
- + " due to an exception while writing updates.", cause);
- else
- LOG.info("Successfully disabled index " + indexTableName
- + " due to an exception while writing updates.", cause);
- } catch (Throwable t) {
- if (t instanceof Exception) {
- throw (Exception)t;
- } else {
- throw new Exception(t);
+ for (Map.Entry<String, Long> tableTimeElement :indexTableNames.entrySet()){
+ String indexTableName = tableTimeElement.getKey();
+ long minTimeStamp = tableTimeElement.getValue();
+ // We need a way of differentiating the block writes to data table case from
+ // the leave index active case. In either case, we need to know the time stamp
+ // at which writes started failing so we can rebuild from that point. If we
+ // keep the index active *and* have a positive INDEX_DISABLE_TIMESTAMP_BYTES,
+ // then writes to the data table will be blocked (this is client side logic
+ // and we can't change this in a minor release). So we use the sign of the
+ // time stamp to differentiate.
+ if (!disableIndexOnFailure && !blockDataTableWritesOnFailure) {
+ minTimeStamp *= -1;
+ }
+ // Disable the index by using the updateIndexState method of MetaDataProtocol end point coprocessor.
+ try (HTableInterface systemTable = env.getTable(SchemaUtil
+ .getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, env.getConfiguration()))) {
+ MetaDataMutationResult result = IndexUtil.updateIndexState(indexTableName, minTimeStamp,
+ systemTable, newState);
+ if (result.getMutationCode() == MutationCode.TABLE_NOT_FOUND) {
+ LOG.info("Index " + indexTableName + " has been dropped. Ignore uncommitted mutations");
+ continue;
+ }
+ if (result.getMutationCode() != MutationCode.TABLE_ALREADY_EXISTS) {
+ if (leaveIndexActive) {
+ LOG.warn("Attempt to update INDEX_DISABLE_TIMESTAMP " + " failed with code = "
+ + result.getMutationCode());
+ // If we're not disabling the index, then we don't want to throw as throwing
+ // will lead to the RS being shutdown.
+ if (blockDataTableWritesOnFailure) {
+ throw new DoNotRetryIOException("Attempt to update INDEX_DISABLE_TIMESTAMP failed.");
}
- }
+ } else {
+ LOG.warn("Attempt to disable index " + indexTableName + " failed with code = "
+ + result.getMutationCode() + ". Will use default failure policy instead.");
+ throw new DoNotRetryIOException("Attempt to disable " + indexTableName + " failed.");
+ }
}
- // Return the cell time stamp (note they should all be the same)
- return fTimestamp;
+ if (leaveIndexActive)
+ LOG.info("Successfully update INDEX_DISABLE_TIMESTAMP for " + indexTableName + " due to an exception while writing updates.",
+ cause);
+ else
+ LOG.info("Successfully disabled index " + indexTableName + " due to an exception while writing updates.",
+ cause);
}
- });
+ }
+ // Return the cell time stamp (note they should all be the same)
+ return timestamp;
}
private Collection<? extends String> getLocalIndexNames(HTableInterfaceReference ref,
http://git-wip-us.apache.org/repos/asf/phoenix/blob/5003ac30/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 0c4e951..6ddcc7e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -868,8 +868,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
}
}
}
- if ((SchemaUtil.isStatsTable(tableName) || SchemaUtil.isMetaTable(tableName))
- && !descriptor.hasCoprocessor(MultiRowMutationEndpoint.class.getName())) {
+ if (SchemaUtil.isStatsTable(tableName) && !descriptor.hasCoprocessor(MultiRowMutationEndpoint.class.getName())) {
descriptor.addCoprocessor(MultiRowMutationEndpoint.class.getName(),
null, priority, null);
}
@@ -2489,8 +2488,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
final TableName mutexTableName = TableName.valueOf(
PhoenixDatabaseMetaData.SYSTEM_MUTEX_NAME_BYTES);
List<TableName> systemTables = getSystemTableNames(admin);
- if (systemTables.contains(mutexTableName) || admin.tableExists( TableName.valueOf(
- PhoenixDatabaseMetaData.SYSTEM_SCHEMA_NAME,PhoenixDatabaseMetaData.SYSTEM_MUTEX_TABLE_NAME))) {
+ if (systemTables.contains(mutexTableName)) {
logger.debug("System mutex table already appears to exist, not creating it");
return;
}
@@ -2507,15 +2505,8 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
put.add(PhoenixDatabaseMetaData.SYSTEM_MUTEX_FAMILY_NAME_BYTES, UPGRADE_MUTEX, UPGRADE_MUTEX_UNLOCKED);
sysMutexTable.put(put);
}
- } catch (TableExistsException | AccessDeniedException e) {
+ } catch (TableExistsException e) {
// Ignore
- }catch(PhoenixIOException e){
- if(e.getCause()!=null && e.getCause() instanceof AccessDeniedException)
- {
- //Ignore
- }else{
- throw e;
- }
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/5003ac30/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
index b9ed734..a4a4124 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
@@ -260,10 +260,6 @@ public interface QueryServices extends SQLCloseable {
//currently BASE64 and ASCII is supported
public static final String UPLOAD_BINARY_DATA_TYPE_ENCODING = "phoenix.upload.binaryDataType.encoding";
- // Toggle for server-written updates to SYSTEM.CATALOG
- public static final String PHOENIX_ACLS_ENABLED = "phoenix.acls.enabled";
- public static final String PHOENIX_AUTOMATIC_GRANT_ENABLED = "phoenix.security.automatic.grant.enabled";
- public static final String PHOENIX_SECURITY_PERMISSION_STRICT_MODE_ENABLED = "phoenix.security.strict.mode.enabled";
public static final String INDEX_ASYNC_BUILD_ENABLED = "phoenix.index.async.build.enabled";
http://git-wip-us.apache.org/repos/asf/phoenix/blob/5003ac30/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
index a586c28..af6a054 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
@@ -58,14 +58,11 @@ import static org.apache.phoenix.query.QueryServices.MAX_TENANT_MEMORY_PERC_ATTR
import static org.apache.phoenix.query.QueryServices.MIN_STATS_UPDATE_FREQ_MS_ATTRIB;
import static org.apache.phoenix.query.QueryServices.MUTATE_BATCH_SIZE_ATTRIB;
import static org.apache.phoenix.query.QueryServices.NUM_RETRIES_FOR_SCHEMA_UPDATE_CHECK;
-import static org.apache.phoenix.query.QueryServices.PHOENIX_ACLS_ENABLED;
-import static org.apache.phoenix.query.QueryServices.PHOENIX_AUTOMATIC_GRANT_ENABLED;
import static org.apache.phoenix.query.QueryServices.PHOENIX_QUERY_SERVER_CLUSTER_BASE_PATH;
import static org.apache.phoenix.query.QueryServices.PHOENIX_QUERY_SERVER_LOADBALANCER_ENABLED;
import static org.apache.phoenix.query.QueryServices.PHOENIX_QUERY_SERVER_SERVICE_NAME;
import static org.apache.phoenix.query.QueryServices.PHOENIX_QUERY_SERVER_ZK_ACL_PASSWORD;
import static org.apache.phoenix.query.QueryServices.PHOENIX_QUERY_SERVER_ZK_ACL_USERNAME;
-import static org.apache.phoenix.query.QueryServices.PHOENIX_SECURITY_PERMISSION_STRICT_MODE_ENABLED;
import static org.apache.phoenix.query.QueryServices.QUEUE_SIZE_ATTRIB;
import static org.apache.phoenix.query.QueryServices.REGIONSERVER_INFO_PORT_ATTRIB;
import static org.apache.phoenix.query.QueryServices.RENEW_LEASE_ENABLED;
@@ -319,11 +316,6 @@ public class QueryServicesOptions {
public static final int DEFAULT_CLIENT_CONNECTION_MAX_ALLOWED_CONNECTIONS = 0;
public static final boolean DEFAULT_STATS_COLLECTION_ENABLED = true;
public static final boolean DEFAULT_USE_STATS_FOR_PARALLELIZATION = true;
-
- //Security defaults
- public static final boolean DEFAULT_PHOENIX_ACLS_ENABLED = false;
- public static final boolean DEFAULT_PHOENIX_AUTOMATIC_GRANT_ENABLED = false;
- public static final boolean DEFAULT_PHOENIX_SECURITY_PERMISSION_STRICT_MODE_ENABLED = true;
//default update cache frequency
public static final int DEFAULT_UPDATE_CACHE_FREQUENCY = 0;
@@ -421,11 +413,7 @@ public class QueryServicesOptions {
.setIfUnset(TRACING_BATCH_SIZE, DEFAULT_TRACING_BATCH_SIZE)
.setIfUnset(TRACING_THREAD_POOL_SIZE, DEFAULT_TRACING_THREAD_POOL_SIZE)
.setIfUnset(STATS_COLLECTION_ENABLED, DEFAULT_STATS_COLLECTION_ENABLED)
- .setIfUnset(USE_STATS_FOR_PARALLELIZATION, DEFAULT_USE_STATS_FOR_PARALLELIZATION)
- .setIfUnset(UPLOAD_BINARY_DATA_TYPE_ENCODING, DEFAULT_UPLOAD_BINARY_DATA_TYPE_ENCODING)
- .setIfUnset(PHOENIX_ACLS_ENABLED, DEFAULT_PHOENIX_ACLS_ENABLED)
- .setIfUnset(PHOENIX_AUTOMATIC_GRANT_ENABLED, DEFAULT_PHOENIX_AUTOMATIC_GRANT_ENABLED)
- .setIfUnset(PHOENIX_SECURITY_PERMISSION_STRICT_MODE_ENABLED, DEFAULT_PHOENIX_SECURITY_PERMISSION_STRICT_MODE_ENABLED);
+ .setIfUnset(USE_STATS_FOR_PARALLELIZATION, DEFAULT_USE_STATS_FOR_PARALLELIZATION);
// HBase sets this to 1, so we reset it to something more appropriate.
// Hopefully HBase will change this, because we can't know if a user set
// it to 1, so we'll change it.
http://git-wip-us.apache.org/repos/asf/phoenix/blob/5003ac30/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java
index 8956862..3ae3183 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java
@@ -25,7 +25,6 @@ import java.io.DataInput;
import java.io.DataInputStream;
import java.io.EOFException;
import java.io.IOException;
-import java.security.PrivilegedExceptionAction;
import java.sql.Date;
import java.util.ArrayList;
import java.util.List;
@@ -47,7 +46,6 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.Mut
import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MultiRowMutationService;
import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MutateRowsRequest;
import org.apache.hadoop.hbase.regionserver.Region;
-import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.coprocessor.MetaDataProtocol;
import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
@@ -210,31 +208,23 @@ public class StatisticsWriter implements Closeable {
}
}
- public void commitStats(final List<Mutation> mutations, final StatisticsCollector statsCollector)
- throws IOException {
- User.runAsLoginUser(new PrivilegedExceptionAction<Void>() {
- @Override
- public Void run() throws Exception {
- commitLastStatsUpdatedTime(statsCollector);
- if (mutations.size() > 0) {
- byte[] row = mutations.get(0).getRow();
- MutateRowsRequest.Builder mrmBuilder = MutateRowsRequest.newBuilder();
- for (Mutation m : mutations) {
- mrmBuilder.addMutationRequest(ProtobufUtil.toMutation(getMutationType(m), m));
- }
- MutateRowsRequest mrm = mrmBuilder.build();
- CoprocessorRpcChannel channel = statsWriterTable.coprocessorService(row);
- MultiRowMutationService.BlockingInterface service = MultiRowMutationService
- .newBlockingStub(channel);
- try {
- service.mutateRows(null, mrm);
- } catch (ServiceException ex) {
- ProtobufUtil.toIOException(ex);
- }
- }
- return null;
+ public void commitStats(List<Mutation> mutations, StatisticsCollector statsCollector) throws IOException {
+ commitLastStatsUpdatedTime(statsCollector);
+ if (mutations.size() > 0) {
+ byte[] row = mutations.get(0).getRow();
+ MutateRowsRequest.Builder mrmBuilder = MutateRowsRequest.newBuilder();
+ for (Mutation m : mutations) {
+ mrmBuilder.addMutationRequest(ProtobufUtil.toMutation(getMutationType(m), m));
}
- });
+ MutateRowsRequest mrm = mrmBuilder.build();
+ CoprocessorRpcChannel channel = statsWriterTable.coprocessorService(row);
+ MultiRowMutationService.BlockingInterface service = MultiRowMutationService.newBlockingStub(channel);
+ try {
+ service.mutateRows(null, mrm);
+ } catch (ServiceException ex) {
+ ProtobufUtil.toIOException(ex);
+ }
+ }
}
private Put getLastStatsUpdatedTimePut(long timeStamp) {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/5003ac30/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
index 3c52d89..5e8e3bc 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
@@ -59,7 +59,6 @@ import org.apache.phoenix.schema.PColumn;
import org.apache.phoenix.schema.PName;
import org.apache.phoenix.schema.PNameFactory;
import org.apache.phoenix.schema.PTable;
-import org.apache.phoenix.schema.PTable.IndexType;
import org.apache.phoenix.schema.PTable.LinkType;
import org.apache.phoenix.schema.PTableType;
import org.apache.phoenix.schema.SequenceKey;
@@ -227,16 +226,6 @@ public class MetaDataUtil {
}
return null;
}
-
- public static boolean isNameSpaceMapped(List<Mutation> tableMetaData, KeyValueBuilder builder,
- ImmutableBytesWritable value) {
- if (getMutationValue(getPutOnlyTableHeaderRow(tableMetaData),
- PhoenixDatabaseMetaData.IS_NAMESPACE_MAPPED_BYTES, builder, value)) {
- return (boolean)PBoolean.INSTANCE.toObject(ByteUtil.copyKeyBytesIfNecessary(value));
- }
- return false;
- }
-
public static long getParentSequenceNumber(List<Mutation> tableMetaData) {
return getSequenceNumber(getParentTableHeaderRow(tableMetaData));
@@ -680,11 +669,4 @@ public class MetaDataUtil {
byte[] physicalTableName = Bytes.toBytes(SchemaUtil.getTableNameFromFullName(view.getPhysicalName().getString()));
return SchemaUtil.getTableKey(ByteUtil.EMPTY_BYTE_ARRAY, physicalTableSchemaName, physicalTableName);
}
-
- public static IndexType getIndexType(List<Mutation> tableMetaData, KeyValueBuilder builder,
- ImmutableBytesWritable value) {
- if (getMutationValue(getPutOnlyTableHeaderRow(tableMetaData), PhoenixDatabaseMetaData.INDEX_TYPE_BYTES, builder,
- value)) { return IndexType.fromSerializedValue(value.get()[value.getOffset()]); }
- return null;
- }
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/5003ac30/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
index 47b4b43..51f6ff9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
@@ -20,11 +20,9 @@ package org.apache.phoenix.util;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkNotNull;
import static com.google.common.base.Strings.isNullOrEmpty;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.IS_NAMESPACE_MAPPED_BYTES;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_FUNCTION_NAME_BYTES;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_STATS_NAME_BYTES;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES;
import java.sql.Connection;
import java.sql.DatabaseMetaData;
@@ -44,10 +42,8 @@ import java.util.TreeSet;
import javax.annotation.Nullable;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
import org.apache.hadoop.hbase.util.Bytes;
@@ -80,7 +76,6 @@ import org.apache.phoenix.schema.SaltingUtil;
import org.apache.phoenix.schema.SortOrder;
import org.apache.phoenix.schema.TableProperty;
import org.apache.phoenix.schema.ValueSchema.Field;
-import org.apache.phoenix.schema.types.PBoolean;
import org.apache.phoenix.schema.types.PDataType;
import org.apache.phoenix.schema.types.PVarbinary;
import org.apache.phoenix.schema.types.PVarchar;
@@ -1135,11 +1130,4 @@ public class SchemaUtil {
}
return false;
}
-
- public static boolean isNamespaceMapped(Result currentResult) {
- Cell isNamespaceMappedCell = currentResult.getColumnLatestCell(TABLE_FAMILY_BYTES, IS_NAMESPACE_MAPPED_BYTES);
- return isNamespaceMappedCell!=null && (boolean) PBoolean.INSTANCE.toObject(isNamespaceMappedCell.getValue());
- }
-
-
}
[19/37] phoenix git commit: PHOENIX-4290 Full table scan performed
for DELETE with table having immutable indexes
Posted by ja...@apache.org.
PHOENIX-4290 Full table scan performed for DELETE with table having immutable indexes
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/e0df4b2e
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/e0df4b2e
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/e0df4b2e
Branch: refs/heads/4.x-HBase-1.1
Commit: e0df4b2e6c2f386336f660301a093e295f489ec4
Parents: 969b79c
Author: James Taylor <jt...@salesforce.com>
Authored: Mon Oct 30 19:25:53 2017 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Wed Nov 15 10:46:39 2017 -0800
----------------------------------------------------------------------
.../org/apache/phoenix/end2end/DeleteIT.java | 134 ++-
.../phoenix/end2end/index/ImmutableIndexIT.java | 22 +-
.../end2end/index/IndexMaintenanceIT.java | 18 +-
.../org/apache/phoenix/tx/TxCheckpointIT.java | 18 +-
.../apache/phoenix/compile/DeleteCompiler.java | 849 ++++++++++---------
.../apache/phoenix/compile/FromCompiler.java | 49 +-
.../compile/TupleProjectionCompiler.java | 2 +-
.../phoenix/exception/SQLExceptionCode.java | 1 -
.../apache/phoenix/execute/MutationState.java | 4 +-
.../apache/phoenix/index/IndexMaintainer.java | 35 +-
.../apache/phoenix/optimize/QueryOptimizer.java | 2 +-
.../org/apache/phoenix/schema/PTableImpl.java | 10 +
.../java/org/apache/phoenix/util/IndexUtil.java | 18 +-
.../phoenix/compile/QueryCompilerTest.java | 27 -
14 files changed, 643 insertions(+), 546 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/e0df4b2e/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java
index 09e1021..aa4d36e 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java
@@ -19,7 +19,6 @@ package org.apache.phoenix.end2end;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
import java.sql.Connection;
import java.sql.Date;
@@ -33,7 +32,10 @@ import java.util.Arrays;
import java.util.Collections;
import java.util.List;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.util.QueryUtil;
+import org.apache.phoenix.util.TestUtil;
import org.junit.Test;
@@ -136,18 +138,25 @@ public class DeleteIT extends ParallelStatsDisabledIT {
rs.close();
}
- private static void assertIndexUsed (Connection conn, String query, String indexName, boolean expectedToBeUsed) throws SQLException {
- assertIndexUsed(conn, query, Collections.emptyList(), indexName, expectedToBeUsed);
+ private static void assertIndexUsed (Connection conn, String query, String indexName, boolean expectedToBeUsed, boolean local) throws SQLException {
+ assertIndexUsed(conn, query, Collections.emptyList(), indexName, expectedToBeUsed, local);
}
- private static void assertIndexUsed (Connection conn, String query, List<Object> binds, String indexName, boolean expectedToBeUsed) throws SQLException {
+ private static void assertIndexUsed (Connection conn, String query, List<Object> binds, String indexName, boolean expectedToBeUsed, boolean local) throws SQLException {
PreparedStatement stmt = conn.prepareStatement("EXPLAIN " + query);
for (int i = 0; i < binds.size(); i++) {
stmt.setObject(i+1, binds.get(i));
}
ResultSet rs = stmt.executeQuery();
String explainPlan = QueryUtil.getExplainPlan(rs);
- assertEquals(expectedToBeUsed, explainPlan.contains(" SCAN OVER " + indexName));
+ // It's very difficult currently to check if a local index is being used
+ // This check is brittle as it checks that the index ID appears in the range scan
+ // TODO: surface QueryPlan from MutationPlan
+ if (local) {
+ assertEquals(expectedToBeUsed, explainPlan.contains(indexName + " [1]") || explainPlan.contains(indexName + " [1,"));
+ } else {
+ assertEquals(expectedToBeUsed, explainPlan.contains(" SCAN OVER " + indexName));
+ }
}
private void testDeleteRange(boolean autoCommit, boolean createIndex) throws Exception {
@@ -190,9 +199,7 @@ public class DeleteIT extends ParallelStatsDisabledIT {
PreparedStatement stmt;
conn.setAutoCommit(autoCommit);
deleteStmt = "DELETE FROM " + tableName + " WHERE i >= ? and i < ?";
- if(!local) {
- assertIndexUsed(conn, deleteStmt, Arrays.<Object>asList(5,10), indexInUse, false);
- }
+ assertIndexUsed(conn, deleteStmt, Arrays.<Object>asList(5,10), indexInUse, false, local);
stmt = conn.prepareStatement(deleteStmt);
stmt.setInt(1, 5);
stmt.setInt(2, 10);
@@ -202,7 +209,7 @@ public class DeleteIT extends ParallelStatsDisabledIT {
}
String query = "SELECT count(*) FROM " + tableName;
- assertIndexUsed(conn, query, indexInUse, createIndex);
+ assertIndexUsed(conn, query, indexInUse, createIndex, local);
query = "SELECT count(*) FROM " + tableName;
rs = conn.createStatement().executeQuery(query);
assertTrue(rs.next());
@@ -210,9 +217,7 @@ public class DeleteIT extends ParallelStatsDisabledIT {
deleteStmt = "DELETE FROM " + tableName + " WHERE j IS NULL";
stmt = conn.prepareStatement(deleteStmt);
- if(!local) {
- assertIndexUsed(conn, deleteStmt, indexInUse, createIndex);
- }
+ assertIndexUsed(conn, deleteStmt, indexInUse, createIndex, local);
int deleteCount = stmt.executeUpdate();
assertEquals(3, deleteCount);
if (!autoCommit) {
@@ -254,40 +259,40 @@ public class DeleteIT extends ParallelStatsDisabledIT {
}
@Test
- public void testDeleteAllFromTableWithIndexAutoCommitSalting() throws SQLException {
+ public void testDeleteAllFromTableWithIndexAutoCommitSalting() throws Exception {
testDeleteAllFromTableWithIndex(true, true, false);
}
@Test
- public void testDeleteAllFromTableWithLocalIndexAutoCommitSalting() throws SQLException {
+ public void testDeleteAllFromTableWithLocalIndexAutoCommitSalting() throws Exception {
testDeleteAllFromTableWithIndex(true, true, true);
}
@Test
- public void testDeleteAllFromTableWithIndexAutoCommitNoSalting() throws SQLException {
+ public void testDeleteAllFromTableWithIndexAutoCommitNoSalting() throws Exception {
testDeleteAllFromTableWithIndex(true, false);
}
@Test
- public void testDeleteAllFromTableWithIndexNoAutoCommitNoSalting() throws SQLException {
+ public void testDeleteAllFromTableWithIndexNoAutoCommitNoSalting() throws Exception {
testDeleteAllFromTableWithIndex(false,false);
}
@Test
- public void testDeleteAllFromTableWithIndexNoAutoCommitSalted() throws SQLException {
+ public void testDeleteAllFromTableWithIndexNoAutoCommitSalted() throws Exception {
testDeleteAllFromTableWithIndex(false, true, false);
}
@Test
- public void testDeleteAllFromTableWithLocalIndexNoAutoCommitSalted() throws SQLException {
+ public void testDeleteAllFromTableWithLocalIndexNoAutoCommitSalted() throws Exception {
testDeleteAllFromTableWithIndex(false, true, true);
}
- private void testDeleteAllFromTableWithIndex(boolean autoCommit, boolean isSalted) throws SQLException {
+ private void testDeleteAllFromTableWithIndex(boolean autoCommit, boolean isSalted) throws Exception {
testDeleteAllFromTableWithIndex(autoCommit, isSalted, false);
}
- private void testDeleteAllFromTableWithIndex(boolean autoCommit, boolean isSalted, boolean localIndex) throws SQLException {
+ private void testDeleteAllFromTableWithIndex(boolean autoCommit, boolean isSalted, boolean localIndex) throws Exception {
Connection con = null;
try {
con = DriverManager.getConnection(getUrl());
@@ -334,6 +339,8 @@ public class DeleteIT extends ParallelStatsDisabledIT {
con.commit();
}
+ TestUtil.dumpTable(con.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(tableName)));
+
ResultSet rs = con.createStatement().executeQuery("SELECT /*+ NO_INDEX */ count(*) FROM " + tableName);
assertTrue(rs.next());
assertEquals(0, rs.getLong(1));
@@ -354,16 +361,16 @@ public class DeleteIT extends ParallelStatsDisabledIT {
}
@Test
- public void testDeleteRowFromTableWithImmutableIndex() throws SQLException {
- testDeleteRowFromTableWithImmutableIndex(false);
+ public void testDeleteRowFromTableWithImmutableIndex() throws Exception {
+ testDeleteRowFromTableWithImmutableIndex(false, true);
}
@Test
- public void testDeleteRowFromTableWithImmutableLocalIndex() throws SQLException {
- testDeleteRowFromTableWithImmutableIndex(true);
+ public void testDeleteRowFromTableWithImmutableLocalIndex() throws Exception {
+ testDeleteRowFromTableWithImmutableIndex(true, false);
}
- public void testDeleteRowFromTableWithImmutableIndex(boolean localIndex) throws SQLException {
+ public void testDeleteRowFromTableWithImmutableIndex(boolean localIndex, boolean useCoveredIndex) throws Exception {
Connection con = null;
try {
boolean autoCommit = false;
@@ -375,6 +382,7 @@ public class DeleteIT extends ParallelStatsDisabledIT {
String tableName = generateUniqueName();
String indexName1 = generateUniqueName();
String indexName2 = generateUniqueName();
+ String indexName3 = useCoveredIndex? generateUniqueName() : null;
stm.execute("CREATE TABLE IF NOT EXISTS " + tableName + " (" +
"HOST CHAR(2) NOT NULL," +
@@ -387,6 +395,9 @@ public class DeleteIT extends ParallelStatsDisabledIT {
"CONSTRAINT PK PRIMARY KEY (HOST, DOMAIN, FEATURE, \"DATE\")) IMMUTABLE_ROWS=true");
stm.execute("CREATE " + (localIndex ? "LOCAL" : "") + " INDEX " + indexName1 + " ON " + tableName + " (\"DATE\", FEATURE)");
stm.execute("CREATE " + (localIndex ? "LOCAL" : "") + " INDEX " + indexName2 + " ON " + tableName + " (\"DATE\", FEATURE, USAGE.DB)");
+ if (useCoveredIndex) {
+ stm.execute("CREATE " + (localIndex ? "LOCAL" : "") + " INDEX " + indexName3 + " ON " + tableName + " (STATS.ACTIVE_VISITOR) INCLUDE (USAGE.CORE, USAGE.DB)");
+ }
stm.close();
Date date = new Date(0);
@@ -400,39 +411,48 @@ public class DeleteIT extends ParallelStatsDisabledIT {
psInsert.setLong(6, 2L);
psInsert.setLong(7, 3);
psInsert.execute();
- psInsert.close();
if (!autoCommit) {
con.commit();
}
- psInsert = con.prepareStatement("DELETE FROM " + tableName + " WHERE (HOST, DOMAIN, FEATURE, \"DATE\") = (?,?,?,?)");
- psInsert.setString(1, "AA");
- psInsert.setString(2, "BB");
- psInsert.setString(3, "CC");
- psInsert.setDate(4, date);
- psInsert.execute();
+ PreparedStatement psDelete = con.prepareStatement("DELETE FROM " + tableName + " WHERE (HOST, DOMAIN, FEATURE, \"DATE\") = (?,?,?,?)");
+ psDelete.setString(1, "AA");
+ psDelete.setString(2, "BB");
+ psDelete.setString(3, "CC");
+ psDelete.setDate(4, date);
+ psDelete.execute();
if (!autoCommit) {
con.commit();
}
- ResultSet rs = con.createStatement().executeQuery("SELECT /*+ NO_INDEX */ count(*) FROM " + tableName);
- assertTrue(rs.next());
- assertEquals(0, rs.getLong(1));
+ assertDeleted(con, tableName, indexName1, indexName2, indexName3);
- rs = con.createStatement().executeQuery("SELECT count(*) FROM " + indexName1);
- assertTrue(rs.next());
- assertEquals(0, rs.getLong(1));
+ psInsert.execute();
+ if (!autoCommit) {
+ con.commit();
+ }
- stm.execute("DROP INDEX " + indexName1 + " ON " + tableName);
- stm.execute("DROP INDEX " + indexName2 + " ON " + tableName);
+ psDelete = con.prepareStatement("DELETE FROM " + tableName + " WHERE USAGE.DB=2");
+ psDelete.execute();
+ if (!autoCommit) {
+ con.commit();
+ }
+
+ assertDeleted(con, tableName, indexName1, indexName2, indexName3);
+
+ psInsert.execute();
+ if (!autoCommit) {
+ con.commit();
+ }
- stm.execute("CREATE " + (localIndex ? "LOCAL" : "") + " INDEX " + indexName1 + " ON " + tableName + " (USAGE.DB)");
- stm.execute("CREATE " + (localIndex ? "LOCAL" : "") + " INDEX " + indexName2 + " ON " + tableName + " (USAGE.DB, \"DATE\")");
- try{
- psInsert = con.prepareStatement("DELETE FROM " + tableName + " WHERE USAGE.DB=2");
- } catch(Exception e) {
- fail("There should not be any exception while deleting row");
+ psDelete = con.prepareStatement("DELETE FROM " + tableName + " WHERE ACTIVE_VISITOR=3");
+ psDelete.execute();
+ if (!autoCommit) {
+ con.commit();
}
+
+ assertDeleted(con, tableName, indexName1, indexName2, indexName3);
+
} finally {
try {
con.close();
@@ -440,6 +460,28 @@ public class DeleteIT extends ParallelStatsDisabledIT {
}
}
}
+
+ private static void assertDeleted(Connection con, String tableName, String indexName1, String indexName2, String indexName3)
+ throws SQLException {
+ ResultSet rs;
+ rs = con.createStatement().executeQuery("SELECT /*+ NO_INDEX */ count(*) FROM " + tableName);
+ assertTrue(rs.next());
+ assertEquals(0, rs.getLong(1));
+
+ rs = con.createStatement().executeQuery("SELECT count(*) FROM " + indexName1);
+ assertTrue(rs.next());
+ assertEquals(0, rs.getLong(1));
+
+ rs = con.createStatement().executeQuery("SELECT count(*) FROM " + indexName2);
+ assertTrue(rs.next());
+ assertEquals(0, rs.getLong(1));
+
+ if (indexName3 != null) {
+ rs = con.createStatement().executeQuery("SELECT count(*) FROM " + indexName3);
+ assertTrue(rs.next());
+ assertEquals(0, rs.getLong(1));
+ }
+ }
@Test
http://git-wip-us.apache.org/repos/asf/phoenix/blob/e0df4b2e/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ImmutableIndexIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ImmutableIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ImmutableIndexIT.java
index 9eb5440..e0398c7 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ImmutableIndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ImmutableIndexIT.java
@@ -20,7 +20,6 @@ package org.apache.phoenix.end2end.index;
import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
import java.sql.Connection;
import java.sql.DriverManager;
@@ -51,7 +50,6 @@ import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.phoenix.end2end.BaseUniqueNamesOwnClusterIT;
-import org.apache.phoenix.exception.SQLExceptionCode;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.query.BaseTest;
import org.apache.phoenix.query.QueryServices;
@@ -149,18 +147,14 @@ public class ImmutableIndexIT extends BaseUniqueNamesOwnClusterIT {
conn.setAutoCommit(true);
String dml = "DELETE from " + fullTableName + " WHERE long_col2 = 4";
- try {
- conn.createStatement().execute(dml);
- if (!localIndex) {
- fail();
- }
- } catch (SQLException e) {
- if (localIndex) {
- throw e;
- }
- assertEquals(SQLExceptionCode.INVALID_FILTER_ON_IMMUTABLE_ROWS.getErrorCode(),
- e.getErrorCode());
- }
+ conn.createStatement().execute(dml);
+
+ rs = conn.createStatement().executeQuery("SELECT COUNT(*) FROM " + fullTableName);
+ assertTrue(rs.next());
+ assertEquals(2, rs.getInt(1));
+ rs = conn.createStatement().executeQuery("SELECT COUNT(*) FROM " + fullIndexName);
+ assertTrue(rs.next());
+ assertEquals(2, rs.getInt(1));
conn.createStatement().execute("DROP TABLE " + fullTableName);
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/e0df4b2e/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexMaintenanceIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexMaintenanceIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexMaintenanceIT.java
index d5895ae..9ff5a35 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexMaintenanceIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexMaintenanceIT.java
@@ -23,7 +23,6 @@ import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
import java.math.BigDecimal;
import java.sql.Connection;
@@ -36,7 +35,6 @@ import java.util.Properties;
import org.apache.commons.lang.StringUtils;
import org.apache.phoenix.end2end.ParallelStatsDisabledIT;
-import org.apache.phoenix.exception.SQLExceptionCode;
import org.apache.phoenix.query.QueryConstants;
import org.apache.phoenix.util.DateUtil;
import org.apache.phoenix.util.PropertiesUtil;
@@ -341,22 +339,10 @@ public class IndexMaintenanceIT extends ParallelStatsDisabledIT {
assertEquals(2, rs.getInt(1));
conn.setAutoCommit(true);
- String dml = "DELETE from " + fullDataTableName + " WHERE long_col2 = 2";
- try {
- conn.createStatement().execute(dml);
- if (!mutable && !localIndex) {
- fail();
- }
- } catch (SQLException e) {
- if (mutable || localIndex) {
- throw e;
- }
- assertEquals(SQLExceptionCode.INVALID_FILTER_ON_IMMUTABLE_ROWS.getErrorCode(), e.getErrorCode());
- }
+ conn.createStatement().execute("DELETE from " + fullDataTableName + " WHERE long_col2 = 2");
if (!mutable) {
- dml = "DELETE from " + fullDataTableName + " WHERE 2*long_col2 = 4";
- conn.createStatement().execute(dml);
+ conn.createStatement().execute("DELETE from " + fullDataTableName + " WHERE 2*long_col2 = 4");
}
rs = conn.createStatement().executeQuery("SELECT COUNT(*) FROM " + fullDataTableName);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/e0df4b2e/phoenix-core/src/it/java/org/apache/phoenix/tx/TxCheckpointIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/tx/TxCheckpointIT.java b/phoenix-core/src/it/java/org/apache/phoenix/tx/TxCheckpointIT.java
index 989a97e..bf39dfe 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/tx/TxCheckpointIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/tx/TxCheckpointIT.java
@@ -311,7 +311,6 @@ public class TxCheckpointIT extends ParallelStatsDisabledIT {
String tableName = "TBL_" + generateUniqueName();
String indexName = "IDX_" + generateUniqueName();
String fullTableName = SchemaUtil.getTableName(tableName, tableName);
- Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
ResultSet rs;
try (Connection conn = getConnection()) {
conn.setAutoCommit(false);
@@ -400,6 +399,23 @@ public class TxCheckpointIT extends ParallelStatsDisabledIT {
assertTrue(rs.next());
assertEquals(1,rs.getLong(1));
assertFalse(rs.next());
+
+ conn.createStatement().execute("drop index " + indexName + " on " + fullTableName + "1");
+ conn.createStatement().execute("delete from " + fullTableName + "1 where id1=fk1b AND fk1b=id1");
+ conn.createStatement().execute("delete from " + fullTableName + "1 where id1 in (select fk1a from " + fullTableName + "1 join " + fullTableName + "2 on (fk2=id1))");
+ assertEquals(PhoenixVisibilityLevel.SNAPSHOT_EXCLUDE_CURRENT, state.getVisibilityLevel());
+ assertNotEquals(wp, state.getWritePointer()); // Make sure write ptr moved
+
+ rs = conn.createStatement().executeQuery("select /*+ NO_INDEX */ id1 from " + fullTableName + "1");
+ assertTrue(rs.next());
+ assertEquals(1,rs.getLong(1));
+ assertFalse(rs.next());
+
+ rs = conn.createStatement().executeQuery("select /*+ INDEX(DEMO IDX) */ id1 from " + fullTableName + "1");
+ assertTrue(rs.next());
+ assertEquals(1,rs.getLong(1));
+ assertFalse(rs.next());
+
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/e0df4b2e/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
index eb252d3..73689d5 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
@@ -19,12 +19,11 @@ package org.apache.phoenix.compile;
import static org.apache.phoenix.execute.MutationState.RowTimestampColInfo.NULL_ROWTIMESTAMP_INFO;
import static org.apache.phoenix.util.NumberUtil.add;
+import java.io.IOException;
import java.sql.ParameterMetaData;
import java.sql.SQLException;
-import java.util.Arrays;
-import java.util.Collection;
+import java.util.ArrayList;
import java.util.Collections;
-import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
@@ -34,19 +33,20 @@ import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.util.Pair;
import org.apache.phoenix.cache.ServerCacheClient;
import org.apache.phoenix.cache.ServerCacheClient.ServerCache;
import org.apache.phoenix.compile.GroupByCompiler.GroupBy;
import org.apache.phoenix.compile.OrderByCompiler.OrderBy;
import org.apache.phoenix.coprocessor.BaseScannerRegionObserver;
-import org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult;
import org.apache.phoenix.exception.SQLExceptionCode;
import org.apache.phoenix.exception.SQLExceptionInfo;
import org.apache.phoenix.execute.AggregatePlan;
-import org.apache.phoenix.execute.BaseQueryPlan;
import org.apache.phoenix.execute.MutationState;
import org.apache.phoenix.execute.MutationState.RowMutationState;
import org.apache.phoenix.filter.SkipScanFilter;
+import org.apache.phoenix.hbase.index.ValueGetter;
+import org.apache.phoenix.hbase.index.covered.update.ColumnReference;
import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
import org.apache.phoenix.index.IndexMaintainer;
import org.apache.phoenix.index.PhoenixIndexCodec;
@@ -64,20 +64,20 @@ import org.apache.phoenix.parse.NamedTableNode;
import org.apache.phoenix.parse.ParseNode;
import org.apache.phoenix.parse.ParseNodeFactory;
import org.apache.phoenix.parse.SelectStatement;
+import org.apache.phoenix.parse.TableName;
import org.apache.phoenix.query.ConnectionQueryServices;
import org.apache.phoenix.query.KeyRange;
import org.apache.phoenix.query.QueryConstants;
import org.apache.phoenix.query.QueryServices;
import org.apache.phoenix.query.QueryServicesOptions;
-import org.apache.phoenix.schema.MetaDataClient;
-import org.apache.phoenix.schema.MetaDataEntityNotFoundException;
+import org.apache.phoenix.schema.DelegateColumn;
import org.apache.phoenix.schema.PColumn;
import org.apache.phoenix.schema.PIndexState;
import org.apache.phoenix.schema.PName;
import org.apache.phoenix.schema.PRow;
import org.apache.phoenix.schema.PTable;
import org.apache.phoenix.schema.PTable.IndexType;
-import org.apache.phoenix.schema.PTableKey;
+import org.apache.phoenix.schema.PTableImpl;
import org.apache.phoenix.schema.PTableType;
import org.apache.phoenix.schema.ReadOnlyTableException;
import org.apache.phoenix.schema.SortOrder;
@@ -105,9 +105,11 @@ public class DeleteCompiler {
this.operation = operation;
}
- private static MutationState deleteRows(StatementContext childContext, TableRef targetTableRef, List<TableRef> indexTableRefs, ResultIterator iterator, RowProjector projector, TableRef sourceTableRef) throws SQLException {
- PTable table = targetTableRef.getTable();
- PhoenixStatement statement = childContext.getStatement();
+ private static MutationState deleteRows(StatementContext context, ResultIterator iterator, QueryPlan bestPlan, TableRef projectedTableRef, List<TableRef> otherTableRefs) throws SQLException {
+ RowProjector projector = bestPlan.getProjector();
+ TableRef tableRef = bestPlan.getTableRef();
+ PTable table = tableRef.getTable();
+ PhoenixStatement statement = context.getStatement();
PhoenixConnection connection = statement.getConnection();
PName tenantId = connection.getTenantId();
byte[] tenantIdBytes = null;
@@ -123,9 +125,9 @@ public class DeleteCompiler {
List<Map<ImmutableBytesPtr,RowMutationState>> indexMutations = null;
// If indexTableRef is set, we're deleting the rows from both the index table and
// the data table through a single query to save executing an additional one.
- if (!indexTableRefs.isEmpty()) {
- indexMutations = Lists.newArrayListWithExpectedSize(indexTableRefs.size());
- for (int i = 0; i < indexTableRefs.size(); i++) {
+ if (!otherTableRefs.isEmpty()) {
+ indexMutations = Lists.newArrayListWithExpectedSize(otherTableRefs.size());
+ for (int i = 0; i < otherTableRefs.size(); i++) {
indexMutations.add(Maps.<ImmutableBytesPtr,RowMutationState>newHashMapWithExpectedSize(batchSize));
}
}
@@ -140,38 +142,84 @@ public class DeleteCompiler {
if (isMultiTenant) {
values[offset++] = tenantIdBytes;
}
- try (PhoenixResultSet rs = new PhoenixResultSet(iterator, projector, childContext)) {
- int rowCount = 0;
- while (rs.next()) {
- ImmutableBytesPtr ptr = new ImmutableBytesPtr(); // allocate new as this is a key in a Map
- // Use tuple directly, as projector would not have all the PK columns from
- // our index table inside of our projection. Since the tables are equal,
- // there's no transation required.
- if (sourceTableRef.equals(targetTableRef)) {
- rs.getCurrentRow().getKey(ptr);
- } else {
- for (int i = offset; i < values.length; i++) {
- byte[] byteValue = rs.getBytes(i+1-offset);
- // The ResultSet.getBytes() call will have inverted it - we need to invert it back.
- // TODO: consider going under the hood and just getting the bytes
- if (pkColumns.get(i).getSortOrder() == SortOrder.DESC) {
- byte[] tempByteValue = Arrays.copyOf(byteValue, byteValue.length);
- byteValue = SortOrder.invert(byteValue, 0, tempByteValue, 0, byteValue.length);
+ try (final PhoenixResultSet rs = new PhoenixResultSet(iterator, projector, context)) {
+ ValueGetter getter = null;
+ if (!otherTableRefs.isEmpty()) {
+ getter = new ValueGetter() {
+ final ImmutableBytesWritable valuePtr = new ImmutableBytesWritable();
+ final ImmutableBytesWritable rowKeyPtr = new ImmutableBytesWritable();
+
+ @Override
+ public ImmutableBytesWritable getLatestValue(ColumnReference ref, long ts) throws IOException {
+ Cell cell = rs.getCurrentRow().getValue(ref.getFamily(), ref.getQualifier());
+ if (cell == null) {
+ return null;
}
- values[i] = byteValue;
+ valuePtr.set(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
+ return valuePtr;
+ }
+
+ @Override
+ public byte[] getRowKey() {
+ rs.getCurrentRow().getKey(rowKeyPtr);
+ return ByteUtil.copyKeyBytesIfNecessary(rowKeyPtr);
+ }
+ };
+ }
+ IndexMaintainer scannedIndexMaintainer = null;
+ IndexMaintainer[] maintainers = null;
+ PTable dataTable = table;
+ if (table.getType() == PTableType.INDEX) {
+ if (!otherTableRefs.isEmpty()) {
+ // The data table is always the last one in the list if it's
+ // not chosen as the best of the possible plans.
+ dataTable = otherTableRefs.get(otherTableRefs.size()-1).getTable();
+ scannedIndexMaintainer = IndexMaintainer.create(dataTable, table, connection);
+ }
+ maintainers = new IndexMaintainer[otherTableRefs.size()];
+ for (int i = 0; i < otherTableRefs.size(); i++) {
+ // Create IndexMaintainer based on projected table (i.e. SELECT expressions) so that client-side
+ // expressions are used instead of server-side ones.
+ PTable otherTable = otherTableRefs.get(i).getTable();
+ if (otherTable.getType() == PTableType.INDEX) {
+ // In this case, we'll convert from index row -> data row -> other index row
+ maintainers[i] = IndexMaintainer.create(dataTable, otherTable, connection);
+ } else {
+ maintainers[i] = scannedIndexMaintainer;
}
- table.newKey(ptr, values);
}
+ } else if (!otherTableRefs.isEmpty()) {
+ dataTable = table;
+ maintainers = new IndexMaintainer[otherTableRefs.size()];
+ for (int i = 0; i < otherTableRefs.size(); i++) {
+ // Create IndexMaintainer based on projected table (i.e. SELECT expressions) so that client-side
+ // expressions are used instead of server-side ones.
+ maintainers[i] = IndexMaintainer.create(projectedTableRef.getTable(), otherTableRefs.get(i).getTable(), connection);
+ }
+
+ }
+ byte[][] viewConstants = IndexUtil.getViewConstants(dataTable);
+ int rowCount = 0;
+ while (rs.next()) {
+ ImmutableBytesPtr rowKeyPtr = new ImmutableBytesPtr(); // allocate new as this is a key in a Map
+ rs.getCurrentRow().getKey(rowKeyPtr);
// When issuing deletes, we do not care about the row time ranges. Also, if the table had a row timestamp column, then the
- // row key will already have its value.
- mutations.put(ptr, new RowMutationState(PRow.DELETE_MARKER, statement.getConnection().getStatementExecutionCounter(), NULL_ROWTIMESTAMP_INFO, null));
- for (int i = 0; i < indexTableRefs.size(); i++) {
+ // row key will already have its value.
+ // Check for otherTableRefs being empty required when deleting directly from the index
+ if (otherTableRefs.isEmpty() || table.getIndexType() != IndexType.LOCAL) {
+ mutations.put(rowKeyPtr, new RowMutationState(PRow.DELETE_MARKER, statement.getConnection().getStatementExecutionCounter(), NULL_ROWTIMESTAMP_INFO, null));
+ }
+ for (int i = 0; i < otherTableRefs.size(); i++) {
+ PTable otherTable = otherTableRefs.get(i).getTable();
ImmutableBytesPtr indexPtr = new ImmutableBytesPtr(); // allocate new as this is a key in a Map
- rs.getCurrentRow().getKey(indexPtr);
// Translate the data table row to the index table row
- if (sourceTableRef.getTable().getType() != PTableType.INDEX) {
- IndexMaintainer maintainer = indexTableRefs.get(i).getTable().getIndexMaintainer(table, connection);
- indexPtr.set(maintainer.buildRowKey(null, indexPtr, null, null, HConstants.LATEST_TIMESTAMP));
+ if (table.getType() == PTableType.INDEX) {
+ indexPtr.set(scannedIndexMaintainer.buildDataRowKey(rowKeyPtr, viewConstants));
+ if (otherTable.getType() == PTableType.INDEX) {
+ indexPtr.set(maintainers[i].buildRowKey(getter, indexPtr, null, null, HConstants.LATEST_TIMESTAMP));
+ }
+ } else {
+ indexPtr.set(maintainers[i].buildRowKey(getter, rowKeyPtr, null, null, HConstants.LATEST_TIMESTAMP));
}
indexMutations.get(i).put(indexPtr, new RowMutationState(PRow.DELETE_MARKER, statement.getConnection().getStatementExecutionCounter(), NULL_ROWTIMESTAMP_INFO, null));
}
@@ -181,10 +229,10 @@ public class DeleteCompiler {
rowCount++;
// Commit a batch if auto commit is true and we're at our batch size
if (isAutoCommit && rowCount % batchSize == 0) {
- MutationState state = new MutationState(targetTableRef, mutations, 0, maxSize, maxSizeBytes, connection);
+ MutationState state = new MutationState(tableRef, mutations, 0, maxSize, maxSizeBytes, connection);
connection.getMutationState().join(state);
- for (int i = 0; i < indexTableRefs.size(); i++) {
- MutationState indexState = new MutationState(indexTableRefs.get(i), indexMutations.get(i), 0, maxSize, maxSizeBytes, connection);
+ for (int i = 0; i < otherTableRefs.size(); i++) {
+ MutationState indexState = new MutationState(otherTableRefs.get(i), indexMutations.get(i), 0, maxSize, maxSizeBytes, connection);
connection.getMutationState().join(indexState);
}
connection.getMutationState().send();
@@ -197,10 +245,9 @@ public class DeleteCompiler {
// If auto commit is true, this last batch will be committed upon return
int nCommittedRows = isAutoCommit ? (rowCount / batchSize * batchSize) : 0;
- MutationState state = new MutationState(targetTableRef, mutations, nCommittedRows, maxSize, maxSizeBytes, connection);
- for (int i = 0; i < indexTableRefs.size(); i++) {
- // To prevent the counting of these index rows, we have a negative for remainingRows.
- MutationState indexState = new MutationState(indexTableRefs.get(i), indexMutations.get(i), 0, maxSize, maxSizeBytes, connection);
+ MutationState state = new MutationState(tableRef, mutations, nCommittedRows, maxSize, maxSizeBytes, connection);
+ for (int i = 0; i < otherTableRefs.size(); i++) {
+ MutationState indexState = new MutationState(otherTableRefs.get(i), indexMutations.get(i), 0, maxSize, maxSizeBytes, connection);
state.join(indexState);
}
return state;
@@ -208,10 +255,9 @@ public class DeleteCompiler {
}
private static class DeletingParallelIteratorFactory extends MutatingParallelIteratorFactory {
- private RowProjector projector;
- private TableRef targetTableRef;
- private List<TableRef> indexTableRefs;
- private TableRef sourceTableRef;
+ private QueryPlan queryPlan;
+ private List<TableRef> otherTableRefs;
+ private TableRef projectedTableRef;
private DeletingParallelIteratorFactory(PhoenixConnection connection) {
super(connection);
@@ -225,41 +271,36 @@ public class DeleteCompiler {
* need to be captured are already getting collected in the parent statement context enclosed in the result
* iterator being used for reading rows out.
*/
- StatementContext ctx = new StatementContext(statement, false);
- MutationState state = deleteRows(ctx, targetTableRef, indexTableRefs, iterator, projector, sourceTableRef);
+ StatementContext context = new StatementContext(statement, false);
+ MutationState state = deleteRows(context, iterator, queryPlan, projectedTableRef, otherTableRefs);
return state;
}
- public void setTargetTableRef(TableRef tableRef) {
- this.targetTableRef = tableRef;
+ public void setQueryPlan(QueryPlan queryPlan) {
+ this.queryPlan = queryPlan;
}
- public void setSourceTableRef(TableRef tableRef) {
- this.sourceTableRef = tableRef;
+ public void setOtherTableRefs(List<TableRef> otherTableRefs) {
+ this.otherTableRefs = otherTableRefs;
}
- public void setRowProjector(RowProjector projector) {
- this.projector = projector;
- }
-
- public void setIndexTargetTableRefs(List<TableRef> indexTableRefs) {
- this.indexTableRefs = indexTableRefs;
+ public void setProjectedTableRef(TableRef projectedTableRef) {
+ this.projectedTableRef = projectedTableRef;
}
-
}
- private Map<PTableKey, PTable> getNonDisabledGlobalImmutableIndexes(TableRef tableRef) {
+ private List<PTable> getNonDisabledGlobalImmutableIndexes(TableRef tableRef) {
PTable table = tableRef.getTable();
if (table.isImmutableRows() && !table.getIndexes().isEmpty()) {
- Map<PTableKey, PTable> nonDisabledIndexes = new HashMap<PTableKey, PTable>(table.getIndexes().size());
+ List<PTable> nonDisabledIndexes = Lists.newArrayListWithExpectedSize(table.getIndexes().size());
for (PTable index : table.getIndexes()) {
if (index.getIndexState() != PIndexState.DISABLE && index.getIndexType() == IndexType.GLOBAL) {
- nonDisabledIndexes.put(index.getKey(), index);
+ nonDisabledIndexes.add(index);
}
}
return nonDisabledIndexes;
}
- return Collections.emptyMap();
+ return Collections.emptyList();
}
private class MultiDeleteMutationPlan implements MutationPlan {
@@ -361,189 +402,151 @@ public class DeleteCompiler {
}
}
- private static boolean hasNonPKIndexedColumns(Collection<PTable> immutableIndexes) {
- for (PTable index : immutableIndexes) {
- for (PColumn column : index.getPKColumns()) {
- if (!IndexUtil.isDataPKColumn(column)) {
- return true;
- }
- }
- }
- return false;
- }
-
public MutationPlan compile(DeleteStatement delete) throws SQLException {
final PhoenixConnection connection = statement.getConnection();
final boolean isAutoCommit = connection.getAutoCommit();
- final boolean hasLimit = delete.getLimit() != null;
+ final boolean hasPostProcessing = delete.getLimit() != null;
final ConnectionQueryServices services = connection.getQueryServices();
List<QueryPlan> queryPlans;
NamedTableNode tableNode = delete.getTable();
String tableName = tableNode.getName().getTableName();
String schemaName = tableNode.getName().getSchemaName();
- boolean retryOnce = !isAutoCommit;
- TableRef tableRefToBe;
- boolean noQueryReqd = false;
- boolean runOnServer = false;
SelectStatement select = null;
ColumnResolver resolverToBe = null;
- Map<PTableKey, PTable> immutableIndex = Collections.emptyMap();
- DeletingParallelIteratorFactory parallelIteratorFactory;
- QueryPlan dataPlanToBe = null;
- while (true) {
- try {
- resolverToBe = FromCompiler.getResolverForMutation(delete, connection);
- tableRefToBe = resolverToBe.getTables().get(0);
- PTable table = tableRefToBe.getTable();
- // Cannot update:
- // - read-only VIEW
- // - transactional table with a connection having an SCN
- // TODO: SchemaUtil.isReadOnly(PTable, connection)?
- if (table.getType() == PTableType.VIEW && table.getViewType().isReadOnly()) {
- throw new ReadOnlyTableException(schemaName,tableName);
- }
- else if (table.isTransactional() && connection.getSCN() != null) {
- throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_SPECIFY_SCN_FOR_TXN_TABLE).setSchemaName(schemaName)
- .setTableName(tableName).build().buildException();
- }
-
- immutableIndex = getNonDisabledGlobalImmutableIndexes(tableRefToBe);
- boolean mayHaveImmutableIndexes = !immutableIndex.isEmpty();
- noQueryReqd = !hasLimit;
- // Can't run on same server for transactional data, as we need the row keys for the data
- // that is being upserted for conflict detection purposes.
- runOnServer = isAutoCommit && noQueryReqd && !table.isTransactional();
- HintNode hint = delete.getHint();
- if (runOnServer && !delete.getHint().hasHint(Hint.USE_INDEX_OVER_DATA_TABLE)) {
- hint = HintNode.create(hint, Hint.USE_DATA_OVER_INDEX_TABLE);
- }
+ DeletingParallelIteratorFactory parallelIteratorFactoryToBe;
+ resolverToBe = FromCompiler.getResolverForMutation(delete, connection);
+ final TableRef targetTableRef = resolverToBe.getTables().get(0);
+ PTable table = targetTableRef.getTable();
+ // Cannot update:
+ // - read-only VIEW
+ // - transactional table with a connection having an SCN
+ // TODO: SchemaUtil.isReadOnly(PTable, connection)?
+ if (table.getType() == PTableType.VIEW && table.getViewType().isReadOnly()) {
+ throw new ReadOnlyTableException(schemaName,tableName);
+ }
+ else if (table.isTransactional() && connection.getSCN() != null) {
+ throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_SPECIFY_SCN_FOR_TXN_TABLE).setSchemaName(schemaName)
+ .setTableName(tableName).build().buildException();
+ }
- List<AliasedNode> aliasedNodes = Lists.newArrayListWithExpectedSize(table.getPKColumns().size());
- boolean isSalted = table.getBucketNum() != null;
- boolean isMultiTenant = connection.getTenantId() != null && table.isMultiTenant();
- boolean isSharedViewIndex = table.getViewIndexId() != null;
- for (int i = (isSalted ? 1 : 0) + (isMultiTenant ? 1 : 0) + (isSharedViewIndex ? 1 : 0); i < table.getPKColumns().size(); i++) {
- PColumn column = table.getPKColumns().get(i);
- aliasedNodes.add(FACTORY.aliasedNode(null, FACTORY.column(null, '"' + column.getName().getString() + '"', null)));
- }
- select = FACTORY.select(delete.getTable(), hint, false, aliasedNodes, delete.getWhere(),
- Collections.<ParseNode> emptyList(), null, delete.getOrderBy(), delete.getLimit(), null,
- delete.getBindCount(), false, false, Collections.<SelectStatement> emptyList(),
- delete.getUdfParseNodes());
- select = StatementNormalizer.normalize(select, resolverToBe);
- SelectStatement transformedSelect = SubqueryRewriter.transform(select, resolverToBe, connection);
- if (transformedSelect != select) {
- resolverToBe = FromCompiler.getResolverForQuery(transformedSelect, connection, false, delete.getTable().getName());
- select = StatementNormalizer.normalize(transformedSelect, resolverToBe);
- }
- parallelIteratorFactory = hasLimit ? null : new DeletingParallelIteratorFactory(connection);
- QueryOptimizer optimizer = new QueryOptimizer(services);
- QueryCompiler compiler = new QueryCompiler(statement, select, resolverToBe, Collections.<PColumn>emptyList(), parallelIteratorFactory, new SequenceManager(statement));
- dataPlanToBe = compiler.compile();
- queryPlans = Lists.newArrayList(mayHaveImmutableIndexes
- ? optimizer.getApplicablePlans(dataPlanToBe, statement, select, resolverToBe, Collections.<PColumn>emptyList(), parallelIteratorFactory)
- : optimizer.getBestPlan(dataPlanToBe, statement, select, resolverToBe, Collections.<PColumn>emptyList(), parallelIteratorFactory));
- if (mayHaveImmutableIndexes) { // FIXME: this is ugly
- // Lookup the table being deleted from in the cache, as it's possible that the
- // optimizer updated the cache if it found indexes that were out of date.
- // If the index was marked as disabled, it should not be in the list
- // of immutable indexes.
- table = connection.getTable(new PTableKey(table.getTenantId(), table.getName().getString()));
- tableRefToBe.setTable(table);
- immutableIndex = getNonDisabledGlobalImmutableIndexes(tableRefToBe);
- }
- } catch (MetaDataEntityNotFoundException e) {
- // Catch column/column family not found exception, as our meta data may
- // be out of sync. Update the cache once and retry if we were out of sync.
- // Otherwise throw, as we'll just get the same error next time.
- if (retryOnce) {
- retryOnce = false;
- MetaDataMutationResult result = new MetaDataClient(connection).updateCache(schemaName, tableName);
- if (result.wasUpdated()) {
- continue;
- }
- }
- throw e;
- }
- break;
- }
- boolean isBuildingImmutable = false;
- final boolean hasImmutableIndexes = !immutableIndex.isEmpty();
- if (hasImmutableIndexes) {
- for (PTable index : immutableIndex.values()){
- if (index.getIndexState() == PIndexState.BUILDING) {
- isBuildingImmutable = true;
- break;
- }
- }
+ List<PTable> immutableIndexes = getNonDisabledGlobalImmutableIndexes(targetTableRef);
+ final boolean hasImmutableIndexes = !immutableIndexes.isEmpty();
+
+ boolean isSalted = table.getBucketNum() != null;
+ boolean isMultiTenant = connection.getTenantId() != null && table.isMultiTenant();
+ boolean isSharedViewIndex = table.getViewIndexId() != null;
+ int pkColumnOffset = (isSalted ? 1 : 0) + (isMultiTenant ? 1 : 0) + (isSharedViewIndex ? 1 : 0);
+ final int pkColumnCount = table.getPKColumns().size() - pkColumnOffset;
+ int selectColumnCount = pkColumnCount;
+ for (PTable index : immutableIndexes) {
+ selectColumnCount += index.getPKColumns().size() - pkColumnCount;
}
- final QueryPlan dataPlan = dataPlanToBe;
- // tableRefs is parallel with queryPlans
- TableRef[] tableRefs = new TableRef[hasImmutableIndexes ? immutableIndex.size() : 1];
- if (hasImmutableIndexes) {
- int i = 0;
- Iterator<QueryPlan> plans = queryPlans.iterator();
- while (plans.hasNext()) {
- QueryPlan plan = plans.next();
- PTable table = plan.getTableRef().getTable();
- if (table.getType() == PTableType.INDEX) { // index plans
- tableRefs[i++] = plan.getTableRef();
- immutableIndex.remove(table.getKey());
- } else if (!isBuildingImmutable) { // data plan
- /*
- * If we have immutable indexes that we need to maintain, don't execute the data plan
- * as we can save a query by piggy-backing on any of the other index queries, since the
- * PK columns that we need are always in each index row.
- */
- plans.remove();
+ List<PColumn> projectedColumns = Lists.newArrayListWithExpectedSize(selectColumnCount + pkColumnOffset);
+ List<AliasedNode> aliasedNodes = Lists.newArrayListWithExpectedSize(selectColumnCount);
+ for (int i = isSalted ? 1 : 0; i < pkColumnOffset; i++) {
+ PColumn column = table.getPKColumns().get(i);
+ projectedColumns.add(column);
+ }
+ for (int i = pkColumnOffset; i < table.getPKColumns().size(); i++) {
+ PColumn column = table.getPKColumns().get(i);
+ projectedColumns.add(column);
+ aliasedNodes.add(FACTORY.aliasedNode(null, FACTORY.column(null, '"' + column.getName().getString() + '"', null)));
+ }
+ // Project all non PK indexed columns so that we can do the proper index maintenance
+ for (PTable index : table.getIndexes()) {
+ IndexMaintainer maintainer = index.getIndexMaintainer(table, connection);
+ // Go through maintainer as it handles functional indexes correctly
+ for (Pair<String,String> columnInfo : maintainer.getIndexedColumnInfo()) {
+ String familyName = columnInfo.getFirst();
+ if (familyName != null) {
+ String columnName = columnInfo.getSecond();
+ boolean hasNoColumnFamilies = table.getColumnFamilies().isEmpty();
+ PColumn column = hasNoColumnFamilies ? table.getColumnForColumnName(columnName) : table.getColumnFamily(familyName).getPColumnForColumnName(columnName);
+ projectedColumns.add(column);
+ aliasedNodes.add(FACTORY.aliasedNode(null, FACTORY.column(hasNoColumnFamilies ? null : TableName.create(null, familyName), '"' + columnName + '"', null)));
}
}
- /*
- * If we have any immutable indexes remaining, then that means that the plan for that index got filtered out
- * because it could not be executed. This would occur if a column in the where clause is not found in the
- * immutable index.
- */
- if (!immutableIndex.isEmpty()) {
- Collection<PTable> immutableIndexes = immutableIndex.values();
- if (!isBuildingImmutable || hasNonPKIndexedColumns(immutableIndexes)) {
- throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_FILTER_ON_IMMUTABLE_ROWS).setSchemaName(tableRefToBe.getTable().getSchemaName().getString())
- .setTableName(tableRefToBe.getTable().getTableName().getString()).build().buildException();
+ }
+ select = FACTORY.select(delete.getTable(), delete.getHint(), false, aliasedNodes, delete.getWhere(),
+ Collections.<ParseNode> emptyList(), null, delete.getOrderBy(), delete.getLimit(), null,
+ delete.getBindCount(), false, false, Collections.<SelectStatement> emptyList(),
+ delete.getUdfParseNodes());
+ select = StatementNormalizer.normalize(select, resolverToBe);
+
+ SelectStatement transformedSelect = SubqueryRewriter.transform(select, resolverToBe, connection);
+ boolean hasPreProcessing = transformedSelect != select;
+ if (transformedSelect != select) {
+ resolverToBe = FromCompiler.getResolverForQuery(transformedSelect, connection, false, delete.getTable().getName());
+ select = StatementNormalizer.normalize(transformedSelect, resolverToBe);
+ }
+ final boolean hasPreOrPostProcessing = hasPreProcessing || hasPostProcessing;
+ boolean noQueryReqd = !hasPreOrPostProcessing;
+ // No limit and no sub queries, joins, etc in where clause
+ // Can't run on same server for transactional data, as we need the row keys for the data
+ // that is being upserted for conflict detection purposes.
+ // If we have immutable indexes, we'd increase the number of bytes scanned by executing
+ // separate queries against each index, so better to drive from a single table in that case.
+ boolean runOnServer = isAutoCommit && !hasPreOrPostProcessing && !table.isTransactional() && !hasImmutableIndexes;
+ HintNode hint = delete.getHint();
+ if (runOnServer && !delete.getHint().hasHint(Hint.USE_INDEX_OVER_DATA_TABLE)) {
+ select = SelectStatement.create(select, HintNode.create(hint, Hint.USE_DATA_OVER_INDEX_TABLE));
+ }
+
+ parallelIteratorFactoryToBe = hasPreOrPostProcessing ? null : new DeletingParallelIteratorFactory(connection);
+ QueryOptimizer optimizer = new QueryOptimizer(services);
+ QueryCompiler compiler = new QueryCompiler(statement, select, resolverToBe, Collections.<PColumn>emptyList(), parallelIteratorFactoryToBe, new SequenceManager(statement));
+ final QueryPlan dataPlan = compiler.compile();
+ // TODO: the select clause should know that there's a sub query, but doesn't seem to currently
+ queryPlans = Lists.newArrayList(!immutableIndexes.isEmpty()
+ ? optimizer.getApplicablePlans(dataPlan, statement, select, resolverToBe, Collections.<PColumn>emptyList(), parallelIteratorFactoryToBe)
+ : optimizer.getBestPlan(dataPlan, statement, select, resolverToBe, Collections.<PColumn>emptyList(), parallelIteratorFactoryToBe));
+ // Filter out any local indexes that don't contain all indexed columns.
+ // We have to do this manually because local indexes are still used
+ // when referenced columns aren't in the index, so they won't be
+ // filtered by the optimizer.
+ queryPlans = new ArrayList<>(queryPlans);
+ Iterator<QueryPlan> iterator = queryPlans.iterator();
+ while (iterator.hasNext()) {
+ QueryPlan plan = iterator.next();
+ if (plan.getTableRef().getTable().getIndexType() == IndexType.LOCAL) {
+ if (!plan.getContext().getDataColumns().isEmpty()) {
+ iterator.remove();
}
- runOnServer = false;
- }
+ }
}
- List<TableRef> buildingImmutableIndexes = Lists.newArrayListWithExpectedSize(immutableIndex.values().size());
- for (PTable index : immutableIndex.values()) {
- buildingImmutableIndexes.add(new TableRef(index, dataPlan.getTableRef().getTimeStamp(), dataPlan.getTableRef().getLowerBoundTimeStamp()));
+ if (queryPlans.isEmpty()) {
+ queryPlans = Collections.singletonList(dataPlan);
}
- // Make sure the first plan is targeting deletion from the data table
- // In the case of an immutable index, we'll also delete from the index.
- final TableRef dataTableRef = tableRefs[0] = tableRefToBe;
- /*
- * Create a mutationPlan for each queryPlan. One plan will be for the deletion of the rows
- * from the data table, while the others will be for deleting rows from immutable indexes.
- */
- List<MutationPlan> mutationPlans = Lists.newArrayListWithExpectedSize(tableRefs.length);
- for (int i = 0; i < tableRefs.length; i++) {
- final TableRef tableRef = tableRefs[i];
- final QueryPlan plan = queryPlans.get(i);
- if (!plan.getTableRef().equals(tableRef) || !(plan instanceof BaseQueryPlan)) {
- runOnServer = false;
- noQueryReqd = false; // FIXME: why set this to false in this case?
- }
-
- final int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB,QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
- final int maxSizeBytes = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB,QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE_BYTES);
-
- final StatementContext context = plan.getContext();
- // If we're doing a query for a set of rows with no where clause, then we don't need to contact the server at all.
- // A simple check of the none existence of a where clause in the parse node is not sufficient, as the where clause
- // may have been optimized out. Instead, we check that there's a single SkipScanFilter
- if (noQueryReqd
- && (!context.getScan().hasFilter()
- || context.getScan().getFilter() instanceof SkipScanFilter)
- && context.getScanRanges().isPointLookup()) {
+ runOnServer &= queryPlans.get(0).getTableRef().getTable().getType() != PTableType.INDEX;
+
+ // We need to have all indexed columns available in all immutable indexes in order
+ // to generate the delete markers from the query. We also cannot have any filters
+ // except for our SkipScanFilter for point lookups.
+ // A simple check of the non existence of a where clause in the parse node is not sufficient, as the where clause
+ // may have been optimized out. Instead, we check that there's a single SkipScanFilter
+ // If we can generate a plan for every index, that means all the required columns are available in every index,
+ // hence we can drive the delete from any of the plans.
+ noQueryReqd &= queryPlans.size() == 1 + immutableIndexes.size();
+ int queryPlanIndex = 0;
+ while (noQueryReqd && queryPlanIndex < queryPlans.size()) {
+ QueryPlan plan = queryPlans.get(queryPlanIndex++);
+ StatementContext context = plan.getContext();
+ noQueryReqd &= (!context.getScan().hasFilter()
+ || context.getScan().getFilter() instanceof SkipScanFilter)
+ && context.getScanRanges().isPointLookup();
+ }
+
+ final int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB,QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
+ final int maxSizeBytes = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB,QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE_BYTES);
+
+ // If we're doing a query for a set of rows with no where clause, then we don't need to contact the server at all.
+ if (noQueryReqd) {
+ // Create a mutationPlan for each queryPlan. One plan will be for the deletion of the rows
+ // from the data table, while the others will be for deleting rows from immutable indexes.
+ List<MutationPlan> mutationPlans = Lists.newArrayListWithExpectedSize(queryPlans.size());
+ for (final QueryPlan plan : queryPlans) {
+ final StatementContext context = plan.getContext();
mutationPlans.add(new MutationPlan() {
@Override
@@ -561,7 +564,7 @@ public class DeleteCompiler {
while (iterator.hasNext()) {
mutation.put(new ImmutableBytesPtr(iterator.next().getLowerRange()), new RowMutationState(PRow.DELETE_MARKER, statement.getConnection().getStatementExecutionCounter(), NULL_ROWTIMESTAMP_INFO, null));
}
- return new MutationState(tableRef, mutation, 0, maxSize, maxSizeBytes, connection);
+ return new MutationState(context.getCurrentTable(), mutation, 0, maxSize, maxSizeBytes, connection);
}
@Override
@@ -576,7 +579,7 @@ public class DeleteCompiler {
@Override
public TableRef getTargetRef() {
- return dataTableRef;
+ return dataPlan.getTableRef();
}
@Override
@@ -605,202 +608,230 @@ public class DeleteCompiler {
return 0l;
}
});
- } else if (runOnServer) {
- // TODO: better abstraction
- Scan scan = context.getScan();
- scan.setAttribute(BaseScannerRegionObserver.DELETE_AGG, QueryConstants.TRUE);
+ }
+ return new MultiDeleteMutationPlan(mutationPlans);
+ } else if (runOnServer) {
+ // TODO: better abstraction
+ final StatementContext context = dataPlan.getContext();
+ Scan scan = context.getScan();
+ scan.setAttribute(BaseScannerRegionObserver.DELETE_AGG, QueryConstants.TRUE);
+
+ // Build an ungrouped aggregate query: select COUNT(*) from <table> where <where>
+ // The coprocessor will delete each row returned from the scan
+ // Ignoring ORDER BY, since with auto commit on and no limit makes no difference
+ SelectStatement aggSelect = SelectStatement.create(SelectStatement.COUNT_ONE, delete.getHint());
+ RowProjector projectorToBe = ProjectionCompiler.compile(context, aggSelect, GroupBy.EMPTY_GROUP_BY);
+ context.getAggregationManager().compile(context, GroupBy.EMPTY_GROUP_BY);
+ if (dataPlan.getProjector().projectEveryRow()) {
+ projectorToBe = new RowProjector(projectorToBe,true);
+ }
+ final RowProjector projector = projectorToBe;
+ final QueryPlan aggPlan = new AggregatePlan(context, select, dataPlan.getTableRef(), projector, null, null,
+ OrderBy.EMPTY_ORDER_BY, null, GroupBy.EMPTY_GROUP_BY, null);
+ return new MutationPlan() {
+ @Override
+ public ParameterMetaData getParameterMetaData() {
+ return context.getBindManager().getParameterMetaData();
+ }
+
+ @Override
+ public StatementContext getContext() {
+ return context;
+ }
+
+ @Override
+ public TableRef getTargetRef() {
+ return dataPlan.getTableRef();
+ }
- // Build an ungrouped aggregate query: select COUNT(*) from <table> where <where>
- // The coprocessor will delete each row returned from the scan
- // Ignoring ORDER BY, since with auto commit on and no limit makes no difference
- SelectStatement aggSelect = SelectStatement.create(SelectStatement.COUNT_ONE, delete.getHint());
- RowProjector projectorToBe = ProjectionCompiler.compile(context, aggSelect, GroupBy.EMPTY_GROUP_BY);
- context.getAggregationManager().compile(context, GroupBy.EMPTY_GROUP_BY);
- if (plan.getProjector().projectEveryRow()) {
- projectorToBe = new RowProjector(projectorToBe,true);
- }
- final RowProjector projector = projectorToBe;
- final QueryPlan aggPlan = new AggregatePlan(context, select, tableRef, projector, null, null,
- OrderBy.EMPTY_ORDER_BY, null, GroupBy.EMPTY_GROUP_BY, null);
- mutationPlans.add(new MutationPlan() {
- @Override
- public ParameterMetaData getParameterMetaData() {
- return context.getBindManager().getParameterMetaData();
- }
+ @Override
+ public Set<TableRef> getSourceRefs() {
+ return dataPlan.getSourceRefs();
+ }
- @Override
- public StatementContext getContext() {
- return context;
- }
+ @Override
+ public Operation getOperation() {
+ return operation;
+ }
- @Override
- public TableRef getTargetRef() {
- return dataTableRef;
- }
-
- @Override
- public Set<TableRef> getSourceRefs() {
- return dataPlan.getSourceRefs();
- }
-
- @Override
- public Operation getOperation() {
- return operation;
- }
-
- @Override
- public MutationState execute() throws SQLException {
- // TODO: share this block of code with UPSERT SELECT
- ImmutableBytesWritable ptr = context.getTempPtr();
- PTable table = tableRef.getTable();
- table.getIndexMaintainers(ptr, context.getConnection());
- byte[] txState = table.isTransactional() ? connection.getMutationState().encodeTransaction() : ByteUtil.EMPTY_BYTE_ARRAY;
- ServerCache cache = null;
- try {
- if (ptr.getLength() > 0) {
- byte[] uuidValue = ServerCacheClient.generateId();
- context.getScan().setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue);
- context.getScan().setAttribute(PhoenixIndexCodec.INDEX_PROTO_MD, ptr.get());
- context.getScan().setAttribute(BaseScannerRegionObserver.TX_STATE, txState);
- }
- ResultIterator iterator = aggPlan.iterator();
+ @Override
+ public MutationState execute() throws SQLException {
+ // TODO: share this block of code with UPSERT SELECT
+ ImmutableBytesWritable ptr = context.getTempPtr();
+ PTable table = dataPlan.getTableRef().getTable();
+ table.getIndexMaintainers(ptr, context.getConnection());
+ byte[] txState = table.isTransactional() ? connection.getMutationState().encodeTransaction() : ByteUtil.EMPTY_BYTE_ARRAY;
+ ServerCache cache = null;
try {
- Tuple row = iterator.next();
- final long mutationCount = (Long)projector.getColumnProjector(0).getValue(row, PLong.INSTANCE, ptr);
- return new MutationState(maxSize, maxSizeBytes, connection) {
- @Override
- public long getUpdateCount() {
- return mutationCount;
- }
- };
+ if (ptr.getLength() > 0) {
+ byte[] uuidValue = ServerCacheClient.generateId();
+ context.getScan().setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue);
+ context.getScan().setAttribute(PhoenixIndexCodec.INDEX_PROTO_MD, ptr.get());
+ context.getScan().setAttribute(BaseScannerRegionObserver.TX_STATE, txState);
+ }
+ ResultIterator iterator = aggPlan.iterator();
+ try {
+ Tuple row = iterator.next();
+ final long mutationCount = (Long)projector.getColumnProjector(0).getValue(row, PLong.INSTANCE, ptr);
+ return new MutationState(maxSize, maxSizeBytes, connection) {
+ @Override
+ public long getUpdateCount() {
+ return mutationCount;
+ }
+ };
+ } finally {
+ iterator.close();
+ }
} finally {
- iterator.close();
- }
- } finally {
- if (cache != null) {
- cache.close();
+ if (cache != null) {
+ cache.close();
+ }
}
}
- }
+
+ @Override
+ public ExplainPlan getExplainPlan() throws SQLException {
+ List<String> queryPlanSteps = aggPlan.getExplainPlan().getPlanSteps();
+ List<String> planSteps = Lists.newArrayListWithExpectedSize(queryPlanSteps.size()+1);
+ planSteps.add("DELETE ROWS");
+ planSteps.addAll(queryPlanSteps);
+ return new ExplainPlan(planSteps);
+ }
- @Override
- public ExplainPlan getExplainPlan() throws SQLException {
- List<String> queryPlanSteps = aggPlan.getExplainPlan().getPlanSteps();
- List<String> planSteps = Lists.newArrayListWithExpectedSize(queryPlanSteps.size()+1);
- planSteps.add("DELETE ROWS");
- planSteps.addAll(queryPlanSteps);
- return new ExplainPlan(planSteps);
- }
-
- @Override
- public Long getEstimatedRowsToScan() throws SQLException {
- return aggPlan.getEstimatedRowsToScan();
- }
-
- @Override
- public Long getEstimatedBytesToScan() throws SQLException {
- return aggPlan.getEstimatedBytesToScan();
- }
-
- @Override
- public Long getEstimateInfoTimestamp() throws SQLException {
- return aggPlan.getEstimateInfoTimestamp();
- }
- });
- } else {
- List<TableRef> immutableIndexRefsToBe = Lists.newArrayListWithExpectedSize(dataPlan.getTableRef().getTable().getIndexes().size());
- if (!buildingImmutableIndexes.isEmpty()) {
- immutableIndexRefsToBe = buildingImmutableIndexes;
- } else if (hasImmutableIndexes && !plan.getTableRef().equals(tableRef)) {
- immutableIndexRefsToBe = Collections.singletonList(plan.getTableRef());
- }
- final List<TableRef> immutableIndexRefs = immutableIndexRefsToBe;
- final DeletingParallelIteratorFactory parallelIteratorFactory2 = parallelIteratorFactory;
- mutationPlans.add( new MutationPlan() {
- @Override
- public ParameterMetaData getParameterMetaData() {
- return context.getBindManager().getParameterMetaData();
- }
+ @Override
+ public Long getEstimatedRowsToScan() throws SQLException {
+ return aggPlan.getEstimatedRowsToScan();
+ }
- @Override
- public StatementContext getContext() {
- return context;
- }
+ @Override
+ public Long getEstimatedBytesToScan() throws SQLException {
+ return aggPlan.getEstimatedBytesToScan();
+ }
+ @Override
+ public Long getEstimateInfoTimestamp() throws SQLException {
+ return aggPlan.getEstimateInfoTimestamp();
+ }
+ };
+ } else {
+ final DeletingParallelIteratorFactory parallelIteratorFactory = parallelIteratorFactoryToBe;
+ List<PColumn> adjustedProjectedColumns = Lists.newArrayListWithExpectedSize(projectedColumns.size());
+ final int offset = table.getBucketNum() == null ? 0 : 1;
+ for (int i = 0; i < projectedColumns.size(); i++) {
+ final int position = i;
+ adjustedProjectedColumns.add(new DelegateColumn(projectedColumns.get(i)) {
@Override
- public TableRef getTargetRef() {
- return dataTableRef;
- }
-
- @Override
- public Set<TableRef> getSourceRefs() {
- return dataPlan.getSourceRefs();
+ public int getPosition() {
+ return position + offset;
}
+ });
+ }
+ PTable projectedTable = PTableImpl.makePTable(table, PTableType.PROJECTED, adjustedProjectedColumns);
+ final TableRef projectedTableRef = new TableRef(projectedTable, targetTableRef.getLowerBoundTimeStamp(), targetTableRef.getTimeStamp());
+
+ QueryPlan bestPlanToBe = dataPlan;
+ for (QueryPlan plan : queryPlans) {
+ PTable planTable = plan.getTableRef().getTable();
+ if (planTable.getIndexState() != PIndexState.BUILDING) {
+ bestPlanToBe = plan;
+ break;
+ }
+ }
+ final QueryPlan bestPlan = bestPlanToBe;
+ final List<TableRef>otherTableRefs = Lists.newArrayListWithExpectedSize(immutableIndexes.size());
+ for (PTable index : immutableIndexes) {
+ if (!bestPlan.getTableRef().getTable().equals(index)) {
+ otherTableRefs.add(new TableRef(index, targetTableRef.getLowerBoundTimeStamp(), targetTableRef.getTimeStamp()));
+ }
+ }
+
+ if (!bestPlan.getTableRef().getTable().equals(targetTableRef.getTable())) {
+ otherTableRefs.add(projectedTableRef);
+ }
+ final StatementContext context = bestPlan.getContext();
+ return new MutationPlan() {
+ @Override
+ public ParameterMetaData getParameterMetaData() {
+ return context.getBindManager().getParameterMetaData();
+ }
- @Override
- public Operation getOperation() {
- return operation;
- }
+ @Override
+ public StatementContext getContext() {
+ return context;
+ }
- @Override
- public MutationState execute() throws SQLException {
- ResultIterator iterator = plan.iterator();
- try {
- if (!hasLimit) {
- Tuple tuple;
- long totalRowCount = 0;
- if (parallelIteratorFactory2 != null) {
- parallelIteratorFactory2.setRowProjector(plan.getProjector());
- parallelIteratorFactory2.setTargetTableRef(tableRef);
- parallelIteratorFactory2.setSourceTableRef(plan.getTableRef());
- parallelIteratorFactory2.setIndexTargetTableRefs(immutableIndexRefs);
- }
- while ((tuple=iterator.next()) != null) {// Runs query
- Cell kv = tuple.getValue(0);
- totalRowCount += PLong.INSTANCE.getCodec().decodeLong(kv.getValueArray(), kv.getValueOffset(), SortOrder.getDefault());
- }
- // Return total number of rows that have been delete. In the case of auto commit being off
- // the mutations will all be in the mutation state of the current connection.
- MutationState state = new MutationState(maxSize, maxSizeBytes, connection, totalRowCount);
+ @Override
+ public TableRef getTargetRef() {
+ return targetTableRef;
+ }
- // set the read metrics accumulated in the parent context so that it can be published when the mutations are committed.
- state.setReadMetricQueue(plan.getContext().getReadMetricsQueue());
+ @Override
+ public Set<TableRef> getSourceRefs() {
+ return dataPlan.getSourceRefs();
+ }
- return state;
- } else {
- return deleteRows(plan.getContext(), tableRef, immutableIndexRefs, iterator, plan.getProjector(), plan.getTableRef());
+ @Override
+ public Operation getOperation() {
+ return operation;
+ }
+
+ @Override
+ public MutationState execute() throws SQLException {
+ ResultIterator iterator = bestPlan.iterator();
+ try {
+ if (!hasPreOrPostProcessing) {
+ Tuple tuple;
+ long totalRowCount = 0;
+ if (parallelIteratorFactory != null) {
+ parallelIteratorFactory.setQueryPlan(bestPlan);
+ parallelIteratorFactory.setOtherTableRefs(otherTableRefs);
+ parallelIteratorFactory.setProjectedTableRef(projectedTableRef);
}
- } finally {
- iterator.close();
+ while ((tuple=iterator.next()) != null) {// Runs query
+ Cell kv = tuple.getValue(0);
+ totalRowCount += PLong.INSTANCE.getCodec().decodeLong(kv.getValueArray(), kv.getValueOffset(), SortOrder.getDefault());
+ }
+ // Return total number of rows that have been deleted from the table. In the case of auto commit being off
+ // the mutations will all be in the mutation state of the current connection. We need to divide by the
+ // total number of tables we updated as otherwise the client will get an unexpected result
+ MutationState state = new MutationState(maxSize, maxSizeBytes, connection, totalRowCount / ((bestPlan.getTableRef().getTable().getIndexType() == IndexType.LOCAL && !otherTableRefs.isEmpty() ? 0 : 1) + otherTableRefs.size()));
+
+ // set the read metrics accumulated in the parent context so that it can be published when the mutations are committed.
+ state.setReadMetricQueue(context.getReadMetricsQueue());
+
+ return state;
+ } else {
+ return deleteRows(context, iterator, bestPlan, projectedTableRef, otherTableRefs);
}
+ } finally {
+ iterator.close();
}
-
- @Override
- public ExplainPlan getExplainPlan() throws SQLException {
- List<String> queryPlanSteps = plan.getExplainPlan().getPlanSteps();
- List<String> planSteps = Lists.newArrayListWithExpectedSize(queryPlanSteps.size()+1);
- planSteps.add("DELETE ROWS");
- planSteps.addAll(queryPlanSteps);
- return new ExplainPlan(planSteps);
- }
+ }
- @Override
- public Long getEstimatedRowsToScan() throws SQLException {
- return plan.getEstimatedRowsToScan();
- }
+ @Override
+ public ExplainPlan getExplainPlan() throws SQLException {
+ List<String> queryPlanSteps = bestPlan.getExplainPlan().getPlanSteps();
+ List<String> planSteps = Lists.newArrayListWithExpectedSize(queryPlanSteps.size()+1);
+ planSteps.add("DELETE ROWS");
+ planSteps.addAll(queryPlanSteps);
+ return new ExplainPlan(planSteps);
+ }
- @Override
- public Long getEstimatedBytesToScan() throws SQLException {
- return plan.getEstimatedBytesToScan();
- }
+ @Override
+ public Long getEstimatedRowsToScan() throws SQLException {
+ return bestPlan.getEstimatedRowsToScan();
+ }
- @Override
- public Long getEstimateInfoTimestamp() throws SQLException {
- return plan.getEstimateInfoTimestamp();
- }
- });
- }
+ @Override
+ public Long getEstimatedBytesToScan() throws SQLException {
+ return bestPlan.getEstimatedBytesToScan();
+ }
+
+ @Override
+ public Long getEstimateInfoTimestamp() throws SQLException {
+ return bestPlan.getEstimateInfoTimestamp();
+ }
+ };
}
- return mutationPlans.size() == 1 ? mutationPlans.get(0) : new MultiDeleteMutationPlan(mutationPlans);
}
}
\ No newline at end of file
[23/37] phoenix git commit: PHOENIX-4343 In CREATE TABLE allow
setting guide post width only on base data tables
Posted by ja...@apache.org.
PHOENIX-4343 In CREATE TABLE allow setting guide post width only on base data tables
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/21606e5e
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/21606e5e
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/21606e5e
Branch: refs/heads/4.x-HBase-1.1
Commit: 21606e5e33c5b5aec448ca269bf1d3617a269049
Parents: 637b24f
Author: Samarth Jain <sa...@apache.org>
Authored: Wed Nov 1 23:21:01 2017 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Wed Nov 15 10:46:40 2017 -0800
----------------------------------------------------------------------
.../apache/phoenix/end2end/CreateTableIT.java | 73 ++++++++++++++++++++
.../end2end/ExplainPlanWithStatsEnabledIT.java | 2 +-
.../phoenix/exception/SQLExceptionCode.java | 2 +-
.../apache/phoenix/schema/MetaDataClient.java | 7 ++
4 files changed, 82 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/21606e5e/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java
index 93bb02b..1abc653 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java
@@ -27,6 +27,7 @@ import static org.junit.Assert.fail;
import java.sql.Connection;
import java.sql.DriverManager;
+import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
@@ -35,6 +36,7 @@ import java.util.Properties;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.GlobalPermissionOrBuilder;
import org.apache.hadoop.hbase.regionserver.BloomType;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.exception.SQLExceptionCode;
@@ -743,4 +745,75 @@ public class CreateTableIT extends ParallelStatsDisabledIT {
}
conn2.close();
}
+
+ @Test
+ public void testSettingGuidePostWidth() throws Exception {
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ String dataTable = generateUniqueName();
+ int guidePostWidth = 20;
+ String ddl =
+ "CREATE TABLE " + dataTable + " (k INTEGER PRIMARY KEY, a bigint, b bigint)"
+ + " GUIDE_POSTS_WIDTH=" + guidePostWidth;
+ conn.createStatement().execute(ddl);
+ assertEquals(20, checkGuidePostWidth(dataTable));
+ String viewName = "V_" + generateUniqueName();
+ ddl =
+ "CREATE VIEW " + viewName + " AS SELECT * FROM " + dataTable
+ + " GUIDE_POSTS_WIDTH=" + guidePostWidth;
+ try {
+ conn.createStatement().execute(ddl);
+ } catch (SQLException e) {
+ assertEquals(SQLExceptionCode.CANNOT_SET_GUIDE_POST_WIDTH.getErrorCode(),
+ e.getErrorCode());
+ }
+
+ // let the view creation go through
+ ddl = "CREATE VIEW " + viewName + " AS SELECT * FROM " + dataTable;
+ conn.createStatement().execute(ddl);
+
+ String globalIndex = "GI_" + generateUniqueName();
+ ddl =
+ "CREATE INDEX " + globalIndex + " ON " + dataTable
+ + "(a) INCLUDE (b) GUIDE_POSTS_WIDTH = " + guidePostWidth;
+ try {
+ conn.createStatement().execute(ddl);
+ } catch (SQLException e) {
+ assertEquals(SQLExceptionCode.CANNOT_SET_GUIDE_POST_WIDTH.getErrorCode(),
+ e.getErrorCode());
+ }
+ String localIndex = "LI_" + generateUniqueName();
+ ddl =
+ "CREATE LOCAL INDEX " + localIndex + " ON " + dataTable
+ + "(b) INCLUDE (a) GUIDE_POSTS_WIDTH = " + guidePostWidth;
+ try {
+ conn.createStatement().execute(ddl);
+ } catch (SQLException e) {
+ assertEquals(SQLExceptionCode.CANNOT_SET_GUIDE_POST_WIDTH.getErrorCode(),
+ e.getErrorCode());
+ }
+ String viewIndex = "VI_" + generateUniqueName();
+ ddl =
+ "CREATE LOCAL INDEX " + viewIndex + " ON " + dataTable
+ + "(b) INCLUDE (a) GUIDE_POSTS_WIDTH = " + guidePostWidth;
+ try {
+ conn.createStatement().execute(ddl);
+ } catch (SQLException e) {
+ assertEquals(SQLExceptionCode.CANNOT_SET_GUIDE_POST_WIDTH.getErrorCode(),
+ e.getErrorCode());
+ }
+ }
+ }
+
+ private int checkGuidePostWidth(String tableName) throws Exception {
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ String query =
+ "SELECT GUIDE_POSTS_WIDTH FROM SYSTEM.CATALOG WHERE TABLE_NAME = ? AND COLUMN_FAMILY IS NULL AND COLUMN_NAME IS NULL";
+ PreparedStatement stmt = conn.prepareStatement(query);
+ stmt.setString(1, tableName);
+ ResultSet rs = stmt.executeQuery();
+ assertTrue(rs.next());
+ return rs.getInt(1);
+ }
+ }
+
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/21606e5e/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExplainPlanWithStatsEnabledIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExplainPlanWithStatsEnabledIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExplainPlanWithStatsEnabledIT.java
index b5e4588..e76b147 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExplainPlanWithStatsEnabledIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExplainPlanWithStatsEnabledIT.java
@@ -73,7 +73,7 @@ public class ExplainPlanWithStatsEnabledIT extends ParallelStatsEnabledIT {
throws Exception {
try (Connection conn = DriverManager.getConnection(getUrl())) {
conn.createStatement().execute("CREATE INDEX " + indexName + " ON " + table
- + " (c1.a) INCLUDE (c2.b) GUIDE_POSTS_WIDTH = " + guidePostWidth);
+ + " (c1.a) INCLUDE (c2.b) ");
conn.createStatement().execute("UPDATE STATISTICS " + indexName);
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/21606e5e/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java b/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
index cfeb212..e51fd9f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
@@ -378,7 +378,7 @@ public enum SQLExceptionCode {
MAX_COLUMNS_EXCEEDED(1136, "XCL36", "The number of columns exceed the maximum supported by the table's qualifier encoding scheme"),
INVALID_IMMUTABLE_STORAGE_SCHEME_AND_COLUMN_QUALIFIER_BYTES(1137, "XCL37", "If IMMUTABLE_STORAGE_SCHEME property is not set to ONE_CELL_PER_COLUMN COLUMN_ENCODED_BYTES cannot be 0"),
INVALID_IMMUTABLE_STORAGE_SCHEME_CHANGE(1138, "XCL38", "IMMUTABLE_STORAGE_SCHEME property cannot be changed from/to ONE_CELL_PER_COLUMN "),
-
+ CANNOT_SET_GUIDE_POST_WIDTH(1139, "XCL39", "Guide post width can only be set on base data tables"),
/**
* Implementation defined class. Phoenix internal error. (errorcode 20, sqlstate INT).
*/
http://git-wip-us.apache.org/repos/asf/phoenix/blob/21606e5e/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index 7ce2167..338b325 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -1949,6 +1949,13 @@ public class MetaDataClient {
}
String autoPartitionSeq = (String) TableProperty.AUTO_PARTITION_SEQ.getValue(tableProps);
Long guidePostsWidth = (Long) TableProperty.GUIDE_POSTS_WIDTH.getValue(tableProps);
+
+ // We only allow setting guide post width for a base table
+ if (guidePostsWidth != null && tableType != PTableType.TABLE) {
+ throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_SET_GUIDE_POST_WIDTH)
+ .setSchemaName(schemaName).setTableName(tableName).build().buildException();
+ }
+
Boolean storeNullsProp = (Boolean) TableProperty.STORE_NULLS.getValue(tableProps);
if (storeNullsProp == null) {
if (parent == null) {
[18/37] phoenix git commit: PHOENIX-4290 Full table scan performed
for DELETE with table having immutable indexes
Posted by ja...@apache.org.
http://git-wip-us.apache.org/repos/asf/phoenix/blob/e0df4b2e/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
index f88b34b..b5293bb 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
@@ -17,8 +17,6 @@
*/
package org.apache.phoenix.compile;
-import static org.apache.phoenix.schema.PTable.QualifierEncodingScheme.NON_ENCODED_QUALIFIERS;
-
import java.sql.SQLException;
import java.sql.SQLFeatureNotSupportedException;
import java.util.ArrayList;
@@ -66,6 +64,7 @@ import org.apache.phoenix.schema.ColumnNotFoundException;
import org.apache.phoenix.schema.ColumnRef;
import org.apache.phoenix.schema.FunctionNotFoundException;
import org.apache.phoenix.schema.MetaDataClient;
+import org.apache.phoenix.schema.MetaDataEntityNotFoundException;
import org.apache.phoenix.schema.PColumn;
import org.apache.phoenix.schema.PColumnFamily;
import org.apache.phoenix.schema.PColumnFamilyImpl;
@@ -73,9 +72,9 @@ import org.apache.phoenix.schema.PColumnImpl;
import org.apache.phoenix.schema.PName;
import org.apache.phoenix.schema.PNameFactory;
import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTable.ImmutableStorageScheme;
import org.apache.phoenix.schema.PTable.IndexType;
import org.apache.phoenix.schema.PTable.QualifierEncodingScheme;
-import org.apache.phoenix.schema.PTable.ImmutableStorageScheme;
import org.apache.phoenix.schema.PTableImpl;
import org.apache.phoenix.schema.PTableKey;
import org.apache.phoenix.schema.PTableType;
@@ -871,7 +870,9 @@ public class FromCompiler {
TableRef tableRef = iterator.next();
try {
PColumnFamily columnFamily = tableRef.getTable().getColumnFamily(cfName);
- if (theColumnFamilyRef != null) { throw new TableNotFoundException(cfName); }
+ if (columnFamily == null) {
+ throw new TableNotFoundException(cfName);
+ }
theColumnFamilyRef = new ColumnFamilyRef(tableRef, columnFamily);
} catch (ColumnFamilyNotFoundException e) {}
}
@@ -914,10 +915,42 @@ public class FromCompiler {
PColumn column = tableRef.getTable().getColumnForColumnName(colName);
return new ColumnRef(tableRef, column.getPosition());
} catch (TableNotFoundException e) {
- // Try using the tableName as a columnFamily reference instead
- ColumnFamilyRef cfRef = resolveColumnFamily(schemaName, tableName);
- PColumn column = cfRef.getFamily().getPColumnForColumnName(colName);
- return new ColumnRef(cfRef.getTableRef(), column.getPosition());
+ TableRef theTableRef = null;
+ PColumn theColumn = null;
+ PColumnFamily theColumnFamily = null;
+ if (schemaName != null) {
+ try {
+ // Try schemaName as the tableName and use tableName as column family name
+ theTableRef = resolveTable(null, schemaName);
+ theColumnFamily = theTableRef.getTable().getColumnFamily(tableName);
+ theColumn = theColumnFamily.getPColumnForColumnName(colName);
+ } catch (MetaDataEntityNotFoundException e2) {
+ }
+ }
+ if (theColumn == null) {
+ // Try using the tableName as a columnFamily reference instead
+ // and resolve column in each column family.
+ Iterator<TableRef> iterator = tables.iterator();
+ while (iterator.hasNext()) {
+ TableRef tableRef = iterator.next();
+ try {
+ PColumnFamily columnFamily = tableRef.getTable().getColumnFamily(tableName);
+ PColumn column = columnFamily.getPColumnForColumnName(colName);
+ if (theColumn != null) {
+ throw new AmbiguousColumnException(colName);
+ }
+ theTableRef = tableRef;
+ theColumnFamily = columnFamily;
+ theColumn = column;
+ } catch (MetaDataEntityNotFoundException e1) {
+ }
+ }
+ if (theColumn == null) {
+ throw new ColumnNotFoundException(colName);
+ }
+ }
+ ColumnFamilyRef cfRef = new ColumnFamilyRef(theTableRef, theColumnFamily);
+ return new ColumnRef(cfRef.getTableRef(), theColumn.getPosition());
}
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/e0df4b2e/phoenix-core/src/main/java/org/apache/phoenix/compile/TupleProjectionCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/TupleProjectionCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/TupleProjectionCompiler.java
index 4ebca90..796dad0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/TupleProjectionCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/TupleProjectionCompiler.java
@@ -153,7 +153,7 @@ public class TupleProjectionCompiler {
PTableType.PROJECTED, table.getIndexState(), table.getTimeStamp(), table.getSequenceNumber(),
table.getPKName(), table.getBucketNum(), projectedColumns, table.getParentSchemaName(),
table.getParentTableName(), table.getIndexes(), table.isImmutableRows(), Collections.<PName> emptyList(),
- null, null, table.isWALDisabled(), table.isMultiTenant(), table.getStoreNulls(), table.getViewType(),
+ table.getDefaultFamilyName(), table.getViewStatement(), table.isWALDisabled(), table.isMultiTenant(), table.getStoreNulls(), table.getViewType(),
table.getViewIndexId(),
table.getIndexType(), table.rowKeyOrderOptimizable(), table.isTransactional(), table.getUpdateCacheFrequency(),
table.getIndexDisableTimestamp(), table.isNamespaceMapped(), table.getAutoPartitionSeqName(), table.isAppendOnlySchema(), table.getImmutableStorageScheme(), table.getEncodingScheme(), table.getEncodedCQCounter(), table.useStatsForParallelization());
http://git-wip-us.apache.org/repos/asf/phoenix/blob/e0df4b2e/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java b/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
index f25f7f1..cfeb212 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
@@ -244,7 +244,6 @@ public enum SQLExceptionCode {
SET_UNSUPPORTED_PROP_ON_ALTER_TABLE(1025, "42Y83", "Unsupported property set in ALTER TABLE command."),
CANNOT_ADD_NOT_NULLABLE_COLUMN(1038, "42Y84", "Only nullable columns may be added for a pre-existing table."),
NO_MUTABLE_INDEXES(1026, "42Y85", "Mutable secondary indexes are only supported for HBase version " + MetaDataUtil.decodeHBaseVersionAsString(PhoenixDatabaseMetaData.MUTABLE_SI_VERSION_THRESHOLD) + " and above."),
- INVALID_FILTER_ON_IMMUTABLE_ROWS(1027, "42Y86", "All columns referenced in a WHERE clause must be available in every index for a table with immutable rows."),
INVALID_INDEX_STATE_TRANSITION(1028, "42Y87", "Invalid index state transition."),
INVALID_MUTABLE_INDEX_CONFIG(1029, "42Y88", "Mutable secondary indexes must have the "
+ IndexManagementUtil.WAL_EDIT_CODEC_CLASS_KEY + " property set to "
http://git-wip-us.apache.org/repos/asf/phoenix/blob/e0df4b2e/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
index df069a6..9c26575 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
@@ -189,7 +189,9 @@ public class MutationState implements SQLCloseable {
public MutationState(TableRef table, Map<ImmutableBytesPtr,RowMutationState> mutations, long sizeOffset, long maxSize, long maxSizeBytes, PhoenixConnection connection) throws SQLException {
this(maxSize, maxSizeBytes, connection, false, null, sizeOffset);
- this.mutations.put(table, mutations);
+ if (!mutations.isEmpty()) {
+ this.mutations.put(table, mutations);
+ }
this.numRows = mutations.size();
throwIfTooBig();
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/e0df4b2e/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java b/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
index b4566a4..500ac4b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
@@ -366,7 +366,6 @@ public class IndexMaintainer implements Writable, Iterable<ColumnReference> {
private IndexMaintainer(final PTable dataTable, final PTable index, PhoenixConnection connection) {
this(dataTable.getRowKeySchema(), dataTable.getBucketNum() != null);
- assert(dataTable.getType() == PTableType.SYSTEM || dataTable.getType() == PTableType.TABLE || dataTable.getType() == PTableType.VIEW);
this.rowKeyOrderOptimizable = index.rowKeyOrderOptimizable();
this.isMultiTenant = dataTable.isMultiTenant();
this.viewIndexId = index.getViewIndexId() == null ? null : MetaDataUtil.getViewIndexIdDataType().toBytes(index.getViewIndexId());
@@ -411,15 +410,14 @@ public class IndexMaintainer implements Writable, Iterable<ColumnReference> {
int nDataPKColumns = dataRowKeySchema.getFieldCount() - dataPosOffset;
// For indexes on views, we need to remember which data columns are "constants"
// These are the values in a VIEW where clause. For these, we don't put them in the
- // index, as they're the same for every row in the index.
- if (dataTable.getType() == PTableType.VIEW) {
- List<PColumn>dataPKColumns = dataTable.getPKColumns();
- for (int i = dataPosOffset; i < dataPKColumns.size(); i++) {
- PColumn dataPKColumn = dataPKColumns.get(i);
- if (dataPKColumn.getViewConstant() != null) {
- bitSet.set(i);
- nDataPKColumns--;
- }
+ // index, as they're the same for every row in the index. The data table can be
+ // either a VIEW or PROJECTED
+ List<PColumn>dataPKColumns = dataTable.getPKColumns();
+ for (int i = dataPosOffset; i < dataPKColumns.size(); i++) {
+ PColumn dataPKColumn = dataPKColumns.get(i);
+ if (dataPKColumn.getViewConstant() != null) {
+ bitSet.set(i);
+ nDataPKColumns--;
}
}
this.indexTableName = indexTableName;
@@ -543,11 +541,14 @@ public class IndexMaintainer implements Writable, Iterable<ColumnReference> {
for (int i = 0; i < index.getColumnFamilies().size(); i++) {
PColumnFamily family = index.getColumnFamilies().get(i);
for (PColumn indexColumn : family.getColumns()) {
- PColumn dataColumn = IndexUtil.getDataColumn(dataTable, indexColumn.getName().getString());
- byte[] dataColumnCq = dataColumn.getColumnQualifierBytes();
- byte[] indexColumnCq = indexColumn.getColumnQualifierBytes();
- this.coveredColumnsMap.put(new ColumnReference(dataColumn.getFamilyName().getBytes(), dataColumnCq),
- new ColumnReference(indexColumn.getFamilyName().getBytes(), indexColumnCq));
+ PColumn dataColumn = IndexUtil.getDataColumnOrNull(dataTable, indexColumn.getName().getString());
+ // This can happen during deletion where we don't need covered columns
+ if (dataColumn != null) {
+ byte[] dataColumnCq = dataColumn.getColumnQualifierBytes();
+ byte[] indexColumnCq = indexColumn.getColumnQualifierBytes();
+ this.coveredColumnsMap.put(new ColumnReference(dataColumn.getFamilyName().getBytes(), dataColumnCq),
+ new ColumnReference(indexColumn.getFamilyName().getBytes(), indexColumnCq));
+ }
}
}
this.estimatedIndexRowKeyBytes = estimateIndexRowKeyByteSize(indexColByteSize);
@@ -758,8 +759,10 @@ public class IndexMaintainer implements Writable, Iterable<ColumnReference> {
int minLength = length - maxTrailingNulls;
byte[] dataRowKey = stream.getBuffer();
// Remove trailing nulls
- while (length > minLength && dataRowKey[length-1] == QueryConstants.SEPARATOR_BYTE) {
+ int index = dataRowKeySchema.getFieldCount() - 1;
+ while (index >= 0 && !dataRowKeySchema.getField(index).getDataType().isFixedWidth() && length > minLength && dataRowKey[length-1] == QueryConstants.SEPARATOR_BYTE) {
length--;
+ index--;
}
// TODO: need to capture nDataSaltBuckets instead of just a boolean. For now,
// we store this in nIndexSaltBuckets, as we only use this function for local indexes
http://git-wip-us.apache.org/repos/asf/phoenix/blob/e0df4b2e/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java b/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java
index ca7ff2c..b3df50b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java
@@ -429,7 +429,7 @@ public class QueryOptimizer {
});
- return bestCandidates;
+ return stopAtBestPlan ? bestCandidates.subList(0, 1) : bestCandidates;
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/e0df4b2e/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
index ae91d17..1cf61a2 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
@@ -295,6 +295,16 @@ public class PTableImpl implements PTable {
table.getIndexDisableTimestamp(), table.isNamespaceMapped(), table.getAutoPartitionSeqName(), table.isAppendOnlySchema(), table.getImmutableStorageScheme(), table.getEncodingScheme(), table.getEncodedCQCounter(), table.useStatsForParallelization());
}
+ public static PTableImpl makePTable(PTable table, PTableType type, Collection<PColumn> columns) throws SQLException {
+ return new PTableImpl(
+ table.getTenantId(), table.getSchemaName(), table.getTableName(), type, table.getIndexState(), table.getTimeStamp(),
+ table.getSequenceNumber(), table.getPKName(), table.getBucketNum(), columns, table.getParentSchemaName(), table.getParentTableName(),
+ table.getIndexes(), table.isImmutableRows(), table.getPhysicalNames(), table.getDefaultFamilyName(), table.getViewStatement(),
+ table.isWALDisabled(), table.isMultiTenant(), table.getStoreNulls(), table.getViewType(), table.getViewIndexId(), table.getIndexType(),
+ table.getBaseColumnCount(), table.rowKeyOrderOptimizable(), table.isTransactional(), table.getUpdateCacheFrequency(),
+ table.getIndexDisableTimestamp(), table.isNamespaceMapped(), table.getAutoPartitionSeqName(), table.isAppendOnlySchema(), table.getImmutableStorageScheme(), table.getEncodingScheme(), table.getEncodedCQCounter(), table.useStatsForParallelization());
+ }
+
public static PTableImpl makePTable(PTable table, Collection<PColumn> columns, PName defaultFamily) throws SQLException {
return new PTableImpl(
table.getTenantId(), table.getSchemaName(), table.getTableName(), table.getType(), table.getIndexState(), table.getTimeStamp(),
http://git-wip-us.apache.org/repos/asf/phoenix/blob/e0df4b2e/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
index 1b6f9d5..b23ea1b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
@@ -207,27 +207,35 @@ public class IndexUtil {
}
public static PColumn getDataColumn(PTable dataTable, String indexColumnName) {
+ PColumn column = getDataColumnOrNull(dataTable, indexColumnName);
+ if (column == null) {
+ throw new IllegalArgumentException("Could not find column \"" + SchemaUtil.getColumnName(getDataColumnFamilyName(indexColumnName), getDataColumnName(indexColumnName)) + " in " + dataTable);
+ }
+ return column;
+ }
+
+ public static PColumn getDataColumnOrNull(PTable dataTable, String indexColumnName) {
int pos = indexColumnName.indexOf(INDEX_COLUMN_NAME_SEP);
if (pos < 0) {
- throw new IllegalArgumentException("Could not find expected '" + INDEX_COLUMN_NAME_SEP + "' separator in index column name of \"" + indexColumnName + "\"");
+ return null;
}
if (pos == 0) {
try {
return dataTable.getPKColumn(indexColumnName.substring(1));
} catch (ColumnNotFoundException e) {
- throw new IllegalArgumentException("Could not find PK column \"" + indexColumnName.substring(pos+1) + "\" in index column name of \"" + indexColumnName + "\"", e);
+ return null;
}
}
PColumnFamily family;
try {
family = dataTable.getColumnFamily(getDataColumnFamilyName(indexColumnName));
} catch (ColumnFamilyNotFoundException e) {
- throw new IllegalArgumentException("Could not find column family \"" + indexColumnName.substring(0, pos) + "\" in index column name of \"" + indexColumnName + "\"", e);
+ return null;
}
try {
return family.getPColumnForColumnName(indexColumnName.substring(pos+1));
} catch (ColumnNotFoundException e) {
- throw new IllegalArgumentException("Could not find column \"" + indexColumnName.substring(pos+1) + "\" in index column name of \"" + indexColumnName + "\"", e);
+ return null;
}
}
@@ -686,7 +694,7 @@ public class IndexUtil {
}
public static byte[][] getViewConstants(PTable dataTable) {
- if (dataTable.getType() != PTableType.VIEW) return null;
+ if (dataTable.getType() != PTableType.VIEW && dataTable.getType() != PTableType.PROJECTED) return null;
int dataPosOffset = (dataTable.getBucketNum() != null ? 1 : 0) + (dataTable.isMultiTenant() ? 1 : 0);
ImmutableBytesWritable ptr = new ImmutableBytesWritable();
List<byte[]> viewConstants = new ArrayList<byte[]>();
http://git-wip-us.apache.org/repos/asf/phoenix/blob/e0df4b2e/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java b/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
index ca4be2f..b3c7dca 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
@@ -1235,33 +1235,6 @@ public class QueryCompilerTest extends BaseConnectionlessQueryTest {
}
@Test
- public void testDeleteFromImmutableWithKV() throws Exception {
- String ddl = "CREATE TABLE t (k1 VARCHAR, v1 VARCHAR, v2 VARCHAR CONSTRAINT pk PRIMARY KEY(k1)) immutable_rows=true";
- String indexDDL = "CREATE INDEX i ON t (v1)";
- Connection conn = DriverManager.getConnection(getUrl());
- try {
- conn.createStatement().execute(ddl);
- assertImmutableRows(conn, "T", true);
- conn.createStatement().execute(indexDDL);
- assertImmutableRows(conn, "I", true);
- conn.createStatement().execute("DELETE FROM t WHERE v2 = 'foo'");
- fail();
- } catch (SQLException e) {
- assertEquals(SQLExceptionCode.INVALID_FILTER_ON_IMMUTABLE_ROWS.getErrorCode(), e.getErrorCode());
- }
- // Test with one index having the referenced key value column, but one not having it.
- // Still should fail
- try {
- indexDDL = "CREATE INDEX i2 ON t (v2)";
- conn.createStatement().execute(indexDDL);
- conn.createStatement().execute("DELETE FROM t WHERE v2 = 'foo'");
- fail();
- } catch (SQLException e) {
- assertEquals(SQLExceptionCode.INVALID_FILTER_ON_IMMUTABLE_ROWS.getErrorCode(), e.getErrorCode());
- }
- }
-
- @Test
public void testInvalidNegativeArrayIndex() throws Exception {
String query = "SELECT a_double_array[-20] FROM table_with_array";
Connection conn = DriverManager.getConnection(getUrl());
[14/37] phoenix git commit: Revert "PHOENIX-4322 DESC primary key
column with variable length does not work in SkipScanFilter"
Posted by ja...@apache.org.
Revert "PHOENIX-4322 DESC primary key column with variable length does not work in SkipScanFilter"
This reverts commit b0220fa7522fd7e1848ad428a47121b205dec504.
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/969b79c2
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/969b79c2
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/969b79c2
Branch: refs/heads/4.x-HBase-1.1
Commit: 969b79c22a377a0faf0d6195cdbcc878fffba36b
Parents: 0ac0549
Author: James Taylor <jt...@salesforce.com>
Authored: Mon Oct 30 19:24:51 2017 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Wed Nov 15 10:46:39 2017 -0800
----------------------------------------------------------------------
.../src/it/java/org/apache/phoenix/end2end/SortOrderIT.java | 9 ---------
.../src/main/java/org/apache/phoenix/util/ScanUtil.java | 7 ++-----
2 files changed, 2 insertions(+), 14 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/969b79c2/phoenix-core/src/it/java/org/apache/phoenix/end2end/SortOrderIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SortOrderIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SortOrderIT.java
index 58bbabb..655dbb1 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SortOrderIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SortOrderIT.java
@@ -167,15 +167,6 @@ public class SortOrderIT extends ParallelStatsDisabledIT {
runQueryTest(ddl, upsert("oid", "code"), insertedRows, new Object[][]{{"o2", 2}}, new WhereCondition("oid", "IN", "('o2')"),
table);
}
-
- @Test
- public void inDescCompositePK3() throws Exception {
- String table = generateUniqueName();
- String ddl = "CREATE table " + table + " (oid INTEGER NOT NULL, code VARCHAR NOT NULL constraint pk primary key (oid DESC, code DESC))";
- Object[][] insertedRows = new Object[][]{{1, "o1"}, {2, "o2"}, {3, "o3"}};
- runQueryTest(ddl, upsert("oid", "code"), insertedRows, new Object[][]{{2, "o2"}, {1, "o1"}},
- new WhereCondition("(oid, code)", "IN", "((1, 'o1'), (2, 'o2'))"), table);
- }
@Test
public void likeDescCompositePK1() throws Exception {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/969b79c2/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java
index 8ab4f20..a844226 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java
@@ -431,11 +431,8 @@ public class ScanUtil {
anyInclusiveUpperRangeKey |= !range.isSingleKey() && inclusiveUpper;
// A null or empty byte array is always represented as a zero byte
byte sepByte = SchemaUtil.getSeparatorByte(schema.rowKeyOrderOptimizable(), bytes.length == 0, field);
- // The result of an RVC evaluation can come with a trailing separator already, so we
- // should avoid adding another one.
- if ( !isFixedWidth
- && ( bytes.length == 0 || key[offset - 1] != sepByte )
- && ( sepByte == QueryConstants.DESC_SEPARATOR_BYTE
+
+ if ( !isFixedWidth && ( sepByte == QueryConstants.DESC_SEPARATOR_BYTE
|| ( !exclusiveUpper
&& (fieldIndex < schema.getMaxFields() || inclusiveUpper || exclusiveLower) ) ) ) {
key[offset++] = sepByte;
[15/37] phoenix git commit: PHOENIX-4322 DESC primary key column with
variable length does not work in SkipScanFilter (fix test failures)
Posted by ja...@apache.org.
PHOENIX-4322 DESC primary key column with variable length does not work in SkipScanFilter (fix test failures)
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/1d85ffa6
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/1d85ffa6
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/1d85ffa6
Branch: refs/heads/4.x-HBase-1.1
Commit: 1d85ffa61254102035e419d38e100cff5be54a98
Parents: 3df249c
Author: maryannxue <ma...@gmail.com>
Authored: Mon Oct 30 15:13:43 2017 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Wed Nov 15 10:46:39 2017 -0800
----------------------------------------------------------------------
phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/1d85ffa6/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java
index 8ab4f20..3fe8ad3 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java
@@ -434,7 +434,7 @@ public class ScanUtil {
// The result of an RVC evaluation can come with a trailing separator already, so we
// should avoid adding another one.
if ( !isFixedWidth
- && ( bytes.length == 0 || key[offset - 1] != sepByte )
+ && ( bytes.length == 0 || slotSpan[i] == 0 || key[offset - 1] != sepByte )
&& ( sepByte == QueryConstants.DESC_SEPARATOR_BYTE
|| ( !exclusiveUpper
&& (fieldIndex < schema.getMaxFields() || inclusiveUpper || exclusiveLower) ) ) ) {
[33/37] phoenix git commit: PHOENIX-4287 Add null check for parent
name
Posted by ja...@apache.org.
PHOENIX-4287 Add null check for parent name
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/a50aab00
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/a50aab00
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/a50aab00
Branch: refs/heads/4.x-HBase-1.1
Commit: a50aab0063dbbb95d7e21922871aaac18fdc90e1
Parents: f974679
Author: Samarth Jain <sa...@apache.org>
Authored: Thu Nov 2 17:52:32 2017 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Wed Nov 15 10:46:41 2017 -0800
----------------------------------------------------------------------
.../java/org/apache/phoenix/iterate/BaseResultIterators.java | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/a50aab00/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
index 18f28e2..eb09813 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
@@ -1246,9 +1246,10 @@ public abstract class BaseResultIterators extends ExplainTable implements Result
}
/*
* For a view index, we use the property set on view. For indexes on base table, whether
- * global or local, we use the property set on the base table.
+ * global or local, we use the property set on the base table. Null check needed when
+ * dropping local indexes.
*/
- if (table.getType() == PTableType.INDEX) {
+ if (table.getType() == PTableType.INDEX && table.getParentName() != null) {
PhoenixConnection conn = context.getConnection();
String parentTableName = table.getParentName().getString();
try {
[13/37] phoenix git commit: PHOENIX-4277 Treat delete markers
consistently with puts for point-in-time scans
Posted by ja...@apache.org.
PHOENIX-4277 Treat delete markers consistently with puts for point-in-time scans
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/7c21a83d
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/7c21a83d
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/7c21a83d
Branch: refs/heads/4.x-HBase-1.1
Commit: 7c21a83df97878f421464147d29a9dfd2d636870
Parents: 8b360e2
Author: James Taylor <jt...@salesforce.com>
Authored: Sun Oct 29 15:19:23 2017 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Wed Nov 15 10:46:34 2017 -0800
----------------------------------------------------------------------
.../phoenix/end2end/PointInTimeQueryIT.java | 2 +-
.../hadoop/hbase/regionserver/ScanInfoUtil.java | 35 ++++++++++++++++++++
.../coprocessor/BaseScannerRegionObserver.java | 21 ++++++++++++
.../apache/phoenix/util/TransactionUtil.java | 7 ++--
4 files changed, 62 insertions(+), 3 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7c21a83d/phoenix-core/src/it/java/org/apache/phoenix/end2end/PointInTimeQueryIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/PointInTimeQueryIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/PointInTimeQueryIT.java
index c53e523..ed3e8a9 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/PointInTimeQueryIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/PointInTimeQueryIT.java
@@ -63,7 +63,7 @@ public class PointInTimeQueryIT extends BaseQueryIT {
public PointInTimeQueryIT(String idxDdl, boolean columnEncoded)
throws Exception {
// These queries fail without KEEP_DELETED_CELLS=true
- super(idxDdl, columnEncoded, true);
+ super(idxDdl, columnEncoded, false);
}
@Test
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7c21a83d/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/ScanInfoUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/ScanInfoUtil.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/ScanInfoUtil.java
new file mode 100644
index 0000000..9d61437
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/ScanInfoUtil.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.hadoop.hbase.KeepDeletedCells;
+
+public class ScanInfoUtil {
+ private ScanInfoUtil() {
+ }
+
+ public static boolean isKeepDeletedCells(ScanInfo scanInfo) {
+ return scanInfo.getKeepDeletedCells() != KeepDeletedCells.FALSE;
+ }
+
+ public static ScanInfo cloneScanInfoWithKeepDeletedCells(ScanInfo scanInfo) {
+ return new ScanInfo(scanInfo.getFamily(), Math.max(scanInfo.getMinVersions(), 1),
+ scanInfo.getMaxVersions(), scanInfo.getTtl(), KeepDeletedCells.TRUE,
+ scanInfo.getTimeToPurgeDeletes(), scanInfo.getComparator());
+ }
+}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7c21a83d/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
index 1b95058..8aa9532 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
@@ -19,10 +19,12 @@ package org.apache.phoenix.coprocessor;
import java.io.IOException;
import java.util.List;
+import java.util.NavigableSet;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CoprocessorEnvironment;
import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.NotServingRegionException;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
@@ -30,10 +32,15 @@ import org.apache.hadoop.hbase.coprocessor.ObserverContext;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.io.TimeRange;
+import org.apache.hadoop.hbase.regionserver.KeyValueScanner;
import org.apache.hadoop.hbase.regionserver.Region;
import org.apache.hadoop.hbase.regionserver.RegionScanner;
+import org.apache.hadoop.hbase.regionserver.ScanInfo;
+import org.apache.hadoop.hbase.regionserver.ScanInfoUtil;
import org.apache.hadoop.hbase.regionserver.ScannerContext;
import org.apache.hadoop.hbase.regionserver.ScannerContextUtil;
+import org.apache.hadoop.hbase.regionserver.Store;
+import org.apache.hadoop.hbase.regionserver.StoreScanner;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.htrace.Span;
import org.apache.htrace.Trace;
@@ -48,6 +55,7 @@ import org.apache.phoenix.schema.types.PUnsignedTinyint;
import org.apache.phoenix.util.EncodedColumnsUtil;
import org.apache.phoenix.util.ScanUtil;
import org.apache.phoenix.util.ServerUtil;
+import org.apache.phoenix.util.TransactionUtil;
abstract public class BaseScannerRegionObserver extends BaseRegionObserver {
@@ -348,4 +356,17 @@ abstract public class BaseScannerRegionObserver extends BaseRegionObserver {
dataRegion, indexMaintainer, null, viewConstants, null, null, projector, ptr, useQualiferAsListIndex);
}
+ @Override
+ public KeyValueScanner preStoreScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
+ final Store store, final Scan scan, final NavigableSet<byte[]> targetCols,
+ final KeyValueScanner s) throws IOException {
+
+ if (scan.isRaw() || ScanInfoUtil.isKeepDeletedCells(store.getScanInfo()) || scan.getTimeRange().getMax() == HConstants.LATEST_TIMESTAMP || TransactionUtil.isTransactionalTimestamp(scan.getTimeRange().getMax())) {
+ return s;
+ }
+
+ ScanInfo scanInfo = ScanInfoUtil.cloneScanInfoWithKeepDeletedCells(store.getScanInfo());
+ return new StoreScanner(store, scanInfo, scan, targetCols,
+ c.getEnvironment().getRegion().getReadpoint(scan.getIsolationLevel()));
+ }
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7c21a83d/phoenix-core/src/main/java/org/apache/phoenix/util/TransactionUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/TransactionUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/TransactionUtil.java
index 01b775e..a99c700 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/TransactionUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/TransactionUtil.java
@@ -24,8 +24,6 @@ import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult;
-import org.apache.phoenix.exception.SQLExceptionCode;
-import org.apache.phoenix.exception.SQLExceptionInfo;
import org.apache.phoenix.execute.MutationState;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.schema.PTable;
@@ -33,11 +31,16 @@ import org.apache.phoenix.transaction.PhoenixTransactionContext;
import org.apache.phoenix.transaction.PhoenixTransactionalTable;
import org.apache.phoenix.transaction.TephraTransactionTable;
import org.apache.phoenix.transaction.TransactionFactory;
+import org.apache.tephra.util.TxUtils;
public class TransactionUtil {
private TransactionUtil() {
}
+ public static boolean isTransactionalTimestamp(long ts) {
+ return !TxUtils.isPreExistingVersion(ts);
+ }
+
public static boolean isDelete(Cell cell) {
return (CellUtil.matchingValue(cell, HConstants.EMPTY_BYTE_ARRAY));
}
[12/37] phoenix git commit: PHOENIX-4310 Remove unnecessary casts in
UngroupedAggregateRegionObserverIT
Posted by ja...@apache.org.
PHOENIX-4310 Remove unnecessary casts in UngroupedAggregateRegionObserverIT
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/3f453e15
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/3f453e15
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/3f453e15
Branch: refs/heads/4.x-HBase-1.1
Commit: 3f453e152c2315e68fb9afe702fd650e4d0d3bef
Parents: a49aed8
Author: James Taylor <jt...@salesforce.com>
Authored: Fri Oct 20 12:19:37 2017 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Wed Nov 15 10:41:23 2017 -0800
----------------------------------------------------------------------
.../end2end/UngroupedAggregateRegionObserverIT.java | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/3f453e15/phoenix-core/src/it/java/org/apache/phoenix/end2end/UngroupedAggregateRegionObserverIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UngroupedAggregateRegionObserverIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UngroupedAggregateRegionObserverIT.java
index 3efd40e..0ae1bb5 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UngroupedAggregateRegionObserverIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UngroupedAggregateRegionObserverIT.java
@@ -82,7 +82,7 @@ public class UngroupedAggregateRegionObserverIT extends ParallelStatsDisabledIT
stopCapturingIndexLog();
// uneventful - nothing should be logged
Mockito.verify(mockAppender, never())
- .doAppend((LoggingEvent) captorLoggingEvent.capture());
+ .doAppend(captorLoggingEvent.capture());
}
}
@@ -126,8 +126,8 @@ public class UngroupedAggregateRegionObserverIT extends ParallelStatsDisabledIT
ungroupedObserver.clearTsOnDisabledIndexes(tableToCompact);
stopCapturingIndexLog();
// an event should've been logged
- Mockito.verify(mockAppender).doAppend((LoggingEvent) captorLoggingEvent.capture());
- LoggingEvent loggingEvent = (LoggingEvent) captorLoggingEvent.getValue();
+ Mockito.verify(mockAppender).doAppend(captorLoggingEvent.capture());
+ LoggingEvent loggingEvent = captorLoggingEvent.getValue();
assertThat(loggingEvent.getLevel(), is(Level.INFO));
// index should be permanently disabled (disabletime of 0)
assertTrue(TestUtil.checkIndexState(pConn, indexTableFullName, PIndexState.DISABLE, 0L));
@@ -147,8 +147,8 @@ public class UngroupedAggregateRegionObserverIT extends ParallelStatsDisabledIT
ungroupedObserver.clearTsOnDisabledIndexes(nonPhoenixTable);
stopCapturingIndexLog();
// a debug level event should've been logged
- Mockito.verify(mockAppender).doAppend((LoggingEvent) captorLoggingEvent.capture());
- LoggingEvent loggingEvent = (LoggingEvent) captorLoggingEvent.getValue();
+ Mockito.verify(mockAppender).doAppend(captorLoggingEvent.capture());
+ LoggingEvent loggingEvent = captorLoggingEvent.getValue();
assertThat(loggingEvent.getLevel(), is(Level.DEBUG));
}
}
[30/37] phoenix git commit: PHOENIX-4237 Allow sorting on (Java)
collation keys for non-English locales (Shehzaad Nakhoda)
Posted by ja...@apache.org.
PHOENIX-4237 Allow sorting on (Java) collation keys for non-English locales (Shehzaad Nakhoda)
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/81019c64
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/81019c64
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/81019c64
Branch: refs/heads/4.x-HBase-1.1
Commit: 81019c644a1085f81ab1f84af46e411660320171
Parents: 5820ff4
Author: James Taylor <jt...@salesforce.com>
Authored: Fri Nov 3 09:17:29 2017 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Wed Nov 15 10:46:41 2017 -0800
----------------------------------------------------------------------
LICENSE | 43 ++--
phoenix-core/pom.xml | 5 +
.../phoenix/end2end/CollationKeyFunctionIT.java | 181 ++++++++++++++
.../phoenix/expression/ExpressionType.java | 4 +-
.../function/CollationKeyFunction.java | 199 +++++++++++++++
.../apache/phoenix/jdbc/PhoenixConnection.java | 3 +
.../apache/phoenix/util/VarBinaryFormatter.java | 52 ++++
.../function/CollationKeyFunctionTest.java | 243 +++++++++++++++++++
phoenix-server/pom.xml | 1 +
9 files changed, 713 insertions(+), 18 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/81019c64/LICENSE
----------------------------------------------------------------------
diff --git a/LICENSE b/LICENSE
index 08e5e10..7bd8ad1 100644
--- a/LICENSE
+++ b/LICENSE
@@ -236,23 +236,32 @@ Font Awesome fonts (http://fontawesome.io/)
3-Clause BSD License:
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+1. Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
---
http://git-wip-us.apache.org/repos/asf/phoenix/blob/81019c64/phoenix-core/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-core/pom.xml b/phoenix-core/pom.xml
index 6f3adb4..f82cddc 100644
--- a/phoenix-core/pom.xml
+++ b/phoenix-core/pom.xml
@@ -471,5 +471,10 @@
<artifactId>stream</artifactId>
<version>${stream.version}</version>
</dependency>
+ <dependency>
+ <groupId>com.salesforce.i18n</groupId>
+ <artifactId>i18n-util</artifactId>
+ <version>1.0.1</version>
+ </dependency>
</dependencies>
</project>
http://git-wip-us.apache.org/repos/asf/phoenix/blob/81019c64/phoenix-core/src/it/java/org/apache/phoenix/end2end/CollationKeyFunctionIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CollationKeyFunctionIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CollationKeyFunctionIT.java
new file mode 100644
index 0000000..efbab64
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CollationKeyFunctionIT.java
@@ -0,0 +1,181 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import static org.apache.phoenix.util.TestUtil.closeStmtAndConn;
+import static org.junit.Assert.assertEquals;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.text.Collator;
+
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * End2End test that tests the COLLATION_KEY in an ORDER BY clause
+ *
+ */
+public class CollationKeyFunctionIT extends ParallelStatsDisabledIT {
+
+ private String tableName;
+ private String[] dataArray = new String[] {
+ // (0-6) chinese characters
+ "\u963f", "\u55c4", "\u963e", "\u554a", "\u4ec8", "\u3d9a", "\u9f51",
+ // (7-13) western characters, some with accent
+ "a", "b", "ä", "A", "a", "ä", "A" };
+
+ @Before
+ public void initAndPopulateTable() throws Exception {
+ Connection conn = null;
+ PreparedStatement stmt = null;
+ tableName = generateUniqueName();
+ try {
+ conn = DriverManager.getConnection(getUrl());
+ String ddl = "CREATE TABLE " + tableName + " (id INTEGER PRIMARY KEY, data VARCHAR)";
+ conn.createStatement().execute(ddl);
+
+ // insert dataArray into the table, with the index into the array as
+ // the id
+ for (int i = 0; i < dataArray.length; i++) {
+ PreparedStatement ps = conn.prepareStatement("upsert into " + tableName + " values(?, ?)");
+ ps.setInt(1, i);
+ ps.setString(2, dataArray[i]);
+ ps.executeUpdate();
+ }
+ conn.commit();
+ } finally {
+ closeStmtAndConn(stmt, conn);
+ }
+ }
+
+ @Test
+ public void testZhSort() throws Exception {
+ queryWithCollKeyDefaultArgsWithExpectedOrder("zh", 0, 6, new Integer[] { 3, 0, 1, 6, 5, 4, 2 });
+ }
+
+ @Test
+ public void testZhTwSort() throws Exception {
+ queryWithCollKeyDefaultArgsWithExpectedOrder("zh_TW", 0, 6, new Integer[] { 0, 3, 4, 1, 5, 2, 6 });
+ }
+
+ @Test
+ public void testZhTwStrokeSort() throws Exception {
+ queryWithCollKeyDefaultArgsWithExpectedOrder("zh_TW_STROKE", 0, 6, new Integer[] { 4, 2, 0, 3, 1, 6, 5 });
+ }
+
+ @Test
+ public void testZhStrokeSort() throws Exception {
+ queryWithCollKeyDefaultArgsWithExpectedOrder("zh__STROKE", 0, 6, new Integer[] { 0, 1, 3, 4, 6, 2, 5 });
+ }
+
+ @Test
+ public void testZhPinyinSort() throws Exception {
+ queryWithCollKeyDefaultArgsWithExpectedOrder("zh__PINYIN", 0, 6, new Integer[] { 0, 1, 3, 4, 6, 2, 5 });
+ }
+
+ @Test
+ public void testUpperCaseSort() throws Exception {
+ queryWithCollKeyUpperCaseWithExpectedOrder("en", 7, 13, new Integer[] { 7, 10, 11, 13, 9, 12, 8 });
+ }
+
+ @Test
+ public void testPrimaryStrengthSort() throws Exception {
+ queryWithCollKeyWithStrengthWithExpectedOrder("en", Collator.PRIMARY, false, 7, 13,
+ new Integer[] { 7, 9, 10, 11, 12, 13, 8 });
+ }
+
+ @Test
+ public void testSecondaryStrengthSort() throws Exception {
+ queryWithCollKeyWithStrengthWithExpectedOrder("en", Collator.SECONDARY, false, 7, 13,
+ new Integer[] { 7, 10, 11, 13, 9, 12, 8 });
+ }
+
+ @Test
+ public void testTertiaryStrengthSort() throws Exception {
+ queryWithCollKeyWithStrengthWithExpectedOrder("en", Collator.TERTIARY, false, 7, 13,
+ new Integer[] { 7, 11, 10, 13, 9, 12, 8 });
+ }
+
+ @Test
+ public void testTertiaryStrengthSortDesc() throws Exception {
+ queryWithCollKeyWithStrengthWithExpectedOrder("en", Collator.TERTIARY, true, 7, 13,
+ new Integer[] { 8, 12, 9, 13, 10, 11, 7 });
+ }
+
+
+ /**
+ * Issue a query ordered by the collation key (with COLLATION_KEY called
+ * with default args) of the data column according to the provided
+ * localeString, and compare the ID and data columns to the expected order.
+ *
+ * @param expectedIndexOrder
+ * an array of indexes into the dataArray in the order we expect.
+ * This is the same as the ID column
+ * @throws SQLException
+ */
+ private void queryWithCollKeyDefaultArgsWithExpectedOrder(String localeString, Integer beginIndex, Integer endIndex,
+ Integer[] expectedIndexOrder) throws Exception {
+ String query = String.format(
+ "SELECT id, data FROM %s WHERE ID BETWEEN %d AND %d ORDER BY COLLATION_KEY(data, '%s')", tableName,
+ beginIndex, endIndex, localeString);
+ queryWithExpectedOrder(query, expectedIndexOrder);
+ }
+
+ /**
+ * Same as above, except the upperCase collator argument is set to true
+ */
+ private void queryWithCollKeyUpperCaseWithExpectedOrder(String localeString, Integer beginIndex, Integer endIndex,
+ Integer[] expectedIndexOrder) throws Exception {
+ String query = String.format(
+ "SELECT id, data FROM %s WHERE ID BETWEEN %d AND %d ORDER BY COLLATION_KEY(data, '%s', true), id",
+ tableName, beginIndex, endIndex, localeString);
+ queryWithExpectedOrder(query, expectedIndexOrder);
+ }
+
+ /**
+ * Same as above, except the collator strength is set
+ */
+ private void queryWithCollKeyWithStrengthWithExpectedOrder(String localeString, Integer strength, boolean isDescending,
+ Integer beginIndex, Integer endIndex, Integer[] expectedIndexOrder) throws Exception {
+ String sortOrder = isDescending ? "DESC" : "";
+
+ String query = String.format(
+ "SELECT id, data FROM %s WHERE ID BETWEEN %d AND %d ORDER BY COLLATION_KEY(data, '%s', false, %d) %s, id %s",
+ tableName, beginIndex, endIndex, localeString, strength, sortOrder, sortOrder);
+ queryWithExpectedOrder(query, expectedIndexOrder);
+ }
+
+ private void queryWithExpectedOrder(String query, Integer[] expectedIndexOrder) throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ PreparedStatement ps = conn.prepareStatement(query);
+ ResultSet rs = ps.executeQuery();
+ int i = 0;
+ while (rs.next()) {
+ int expectedId = expectedIndexOrder[i];
+ assertEquals("For row " + i + ": The ID did not match the expected index", expectedId, rs.getInt(1));
+ assertEquals("For row " + i + ": The data did not match the expected entry from the data array",
+ dataArray[expectedId], rs.getString(2));
+ i++;
+ }
+ assertEquals("The result set returned a different number of rows from the data array", expectedIndexOrder.length, i);
+ }
+}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/81019c64/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
index 4f26e87..9a53eb1 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
@@ -42,6 +42,7 @@ import org.apache.phoenix.expression.function.CeilTimestampExpression;
import org.apache.phoenix.expression.function.CeilWeekExpression;
import org.apache.phoenix.expression.function.CeilYearExpression;
import org.apache.phoenix.expression.function.CoalesceFunction;
+import org.apache.phoenix.expression.function.CollationKeyFunction;
import org.apache.phoenix.expression.function.ConvertTimezoneFunction;
import org.apache.phoenix.expression.function.CountAggregateFunction;
import org.apache.phoenix.expression.function.DayOfMonthFunction;
@@ -294,7 +295,8 @@ public enum ExpressionType {
ArrayColumnExpression(SingleCellColumnExpression.class),
FirstValuesFunction(FirstValuesFunction.class),
LastValuesFunction(LastValuesFunction.class),
- DistinctCountHyperLogLogAggregateFunction(DistinctCountHyperLogLogAggregateFunction.class);
+ DistinctCountHyperLogLogAggregateFunction(DistinctCountHyperLogLogAggregateFunction.class),
+ CollationKeyFunction(CollationKeyFunction.class);
ExpressionType(Class<? extends Expression> clazz) {
this.clazz = clazz;
http://git-wip-us.apache.org/repos/asf/phoenix/blob/81019c64/phoenix-core/src/main/java/org/apache/phoenix/expression/function/CollationKeyFunction.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/CollationKeyFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/CollationKeyFunction.java
new file mode 100644
index 0000000..827f70a
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/CollationKeyFunction.java
@@ -0,0 +1,199 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.expression.function;
+
+import java.io.DataInput;
+import java.io.IOException;
+import java.sql.SQLException;
+import java.text.Collator;
+import java.util.List;
+import java.util.Locale;
+
+import org.apache.commons.lang.BooleanUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.expression.LiteralExpression;
+import org.apache.phoenix.parse.FunctionParseNode;
+import org.apache.phoenix.schema.tuple.Tuple;
+import org.apache.phoenix.schema.types.PBoolean;
+import org.apache.phoenix.schema.types.PDataType;
+import org.apache.phoenix.schema.types.PInteger;
+import org.apache.phoenix.schema.types.PVarbinary;
+import org.apache.phoenix.schema.types.PVarchar;
+import org.apache.phoenix.util.VarBinaryFormatter;
+
+import com.force.db.i18n.LinguisticSort;
+import com.force.i18n.LocaleUtils;
+
+/**
+ * A Phoenix Function that calculates a collation key for an input string based
+ * on a caller-provided locale and collator strength and decomposition settings.
+ *
+ * The locale should be specified as xx_yy_variant where xx is the ISO 639-1
+ * 2-letter language code, yy is the the ISO 3166 2-letter country code. Both
+ * countryCode and variant are optional. For example, zh_TW_STROKE, zh_TW and zh
+ * are all valid locale representations. Note the language code, country code
+ * and variant are used as arguments to the constructor of java.util.Locale.
+ *
+ * This function uses the open-source i18n-util package to obtain the collators
+ * it needs from the provided locale.
+ *
+ * The LinguisticSort implementation in i18n-util encapsulates sort-related
+ * functionality for a substantive list of locales. For each locale, it provides
+ * a collator and an Oracle-specific database function that can be used to sort
+ * strings according to the natural language rules of that locale.
+ *
+ * This function uses the collator returned by LinguisticSort.getCollator to
+ * produce a collation key for its input string. A user can expect that the
+ * sorting semantics of this function for a given locale is equivalent to the
+ * sorting behaviour of an Oracle query that is constructed using the Oracle
+ * functions returned by LinguisticSort for that locale.
+ *
+ * The optional third argument to the function is a boolean that specifies
+ * whether to use the upper-case collator (case-insensitive) returned by
+ * LinguisticSort.getUpperCaseCollator.
+ *
+ * The optional fourth and fifth arguments are used to set respectively the
+ * strength and composition of the collator returned by LinguisticSort using the
+ * setStrength and setDecomposition methods of java.text.Collator.
+ *
+ */
+@FunctionParseNode.BuiltInFunction(name = CollationKeyFunction.NAME, args = {
+ // input string
+ @FunctionParseNode.Argument(allowedTypes = { PVarchar.class }),
+ // ISO Code for Locale
+ @FunctionParseNode.Argument(allowedTypes = { PVarchar.class }, isConstant = true),
+ // whether to use special upper case collator
+ @FunctionParseNode.Argument(allowedTypes = { PBoolean.class }, defaultValue = "false", isConstant = true),
+ // collator strength
+ @FunctionParseNode.Argument(allowedTypes = { PInteger.class }, defaultValue = "null", isConstant = true),
+ // collator decomposition
+ @FunctionParseNode.Argument(allowedTypes = { PInteger.class }, defaultValue = "null", isConstant = true) })
+public class CollationKeyFunction extends ScalarFunction {
+
+ private static final Log LOG = LogFactory.getLog(CollationKeyFunction.class);
+
+ public static final String NAME = "COLLATION_KEY";
+
+ private Collator collator;
+
+ public CollationKeyFunction() {
+ }
+
+ public CollationKeyFunction(List<Expression> children) throws SQLException {
+ super(children);
+ initialize();
+ }
+
+ @Override
+ public void readFields(DataInput input) throws IOException {
+ super.readFields(input);
+ initialize();
+ }
+
+ @Override
+ public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) {
+ Expression expression = getChildren().get(0);
+ if (!expression.evaluate(tuple, ptr)) {
+ return false;
+ }
+ String inputString = (String) PVarchar.INSTANCE.toObject(ptr, expression.getSortOrder());
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("CollationKey inputString: " + inputString);
+ }
+ byte[] collationKeyByteArray = collator.getCollationKey(inputString).toByteArray();
+
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("CollationKey bytes: " + VarBinaryFormatter.INSTANCE.format(collationKeyByteArray));
+ }
+
+ ptr.set(collationKeyByteArray);
+ return true;
+ }
+
+ private void initialize() {
+ String localeISOCode = getLiteralValue(1, String.class);
+ Boolean useSpecialUpperCaseCollator = getLiteralValue(2, Boolean.class);
+ Integer collatorStrength = getLiteralValue(3, Integer.class);
+ Integer collatorDecomposition = getLiteralValue(4, Integer.class);
+
+ if (LOG.isTraceEnabled()) {
+ StringBuilder logInputsMessage = new StringBuilder();
+ logInputsMessage.append("Input (literal) arguments:").append("localeISOCode: " + localeISOCode)
+ .append(", useSpecialUpperCaseCollator: " + useSpecialUpperCaseCollator)
+ .append(", collatorStrength: " + collatorStrength)
+ .append(", collatorDecomposition: " + collatorDecomposition);
+ LOG.trace(logInputsMessage);
+ }
+
+ Locale locale = LocaleUtils.get().getLocaleByIsoCode(localeISOCode);
+
+ if (LOG.isTraceEnabled()) {
+ LOG.trace(String.format("Locale: " + locale.toLanguageTag()));
+ }
+
+ LinguisticSort linguisticSort = LinguisticSort.get(locale);
+
+ collator = BooleanUtils.isTrue(useSpecialUpperCaseCollator) ? linguisticSort.getUpperCaseCollator(false)
+ : linguisticSort.getCollator();
+
+ if (collatorStrength != null) {
+ collator.setStrength(collatorStrength);
+ }
+
+ if (collatorDecomposition != null) {
+ collator.setDecomposition(collatorDecomposition);
+ }
+
+ if (LOG.isTraceEnabled()) {
+ LOG.trace(String.format("Collator: [strength: %d, decomposition: %d], Special-Upper-Case: %s",
+ collator.getStrength(), collator.getDecomposition(),
+ BooleanUtils.isTrue(useSpecialUpperCaseCollator)));
+ }
+ }
+
+ @Override
+ public PDataType getDataType() {
+ return PVarbinary.INSTANCE;
+ }
+
+ @Override
+ public String getName() {
+ return NAME;
+ }
+
+ @Override
+ public boolean isThreadSafe() {
+ // ICU4J Collators are not thread-safe unless they are frozen.
+ // TODO: Look into calling freeze() on them to be able return true here.
+ return false;
+ }
+
+ private <T> T getLiteralValue(int childIndex, Class<T> type) {
+ Expression expression = getChildren().get(childIndex);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("child: " + childIndex + ", expression: " + expression);
+ }
+ // It's safe to assume expression is a LiteralExpression since
+ // only arguments marked as isConstant = true should be handled through
+ // this method.
+ return type.cast(((LiteralExpression) expression).getValue());
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/phoenix/blob/81019c64/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
index 730f754..4555190 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
@@ -105,6 +105,7 @@ import org.apache.phoenix.schema.types.PTimestamp;
import org.apache.phoenix.schema.types.PUnsignedDate;
import org.apache.phoenix.schema.types.PUnsignedTime;
import org.apache.phoenix.schema.types.PUnsignedTimestamp;
+import org.apache.phoenix.schema.types.PVarbinary;
import org.apache.phoenix.trace.util.Tracing;
import org.apache.phoenix.transaction.PhoenixTransactionContext;
import org.apache.phoenix.util.DateUtil;
@@ -116,6 +117,7 @@ import org.apache.phoenix.util.ReadOnlyProps;
import org.apache.phoenix.util.SQLCloseable;
import org.apache.phoenix.util.SQLCloseables;
import org.apache.phoenix.util.SchemaUtil;
+import org.apache.phoenix.util.VarBinaryFormatter;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Objects;
@@ -336,6 +338,7 @@ public class PhoenixConnection implements Connection, MetaDataMutated, SQLClosea
formatters.put(PUnsignedTimestamp.INSTANCE, timestampFormat);
formatters.put(PDecimal.INSTANCE,
FunctionArgumentType.NUMERIC.getFormatter(numberPattern));
+ formatters.put(PVarbinary.INSTANCE, VarBinaryFormatter.INSTANCE);
// We do not limit the metaData on a connection less than the global
// one,
// as there's not much that will be cached here.
http://git-wip-us.apache.org/repos/asf/phoenix/blob/81019c64/phoenix-core/src/main/java/org/apache/phoenix/util/VarBinaryFormatter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/VarBinaryFormatter.java b/phoenix-core/src/main/java/org/apache/phoenix/util/VarBinaryFormatter.java
new file mode 100644
index 0000000..7f0d030
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/VarBinaryFormatter.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.util;
+
+import java.text.FieldPosition;
+import java.text.Format;
+import java.text.ParsePosition;
+
+import org.apache.commons.codec.binary.Hex;
+
+/**
+ * A formatter that formats a byte array to a hexadecimal string
+ * (with each byte converted to a 2-digit hex sequence)
+ *
+ * @author snakhoda-sfdc
+ */
+public class VarBinaryFormatter extends Format {
+
+ private static final long serialVersionUID = -7940880118392024750L;
+
+ public static final VarBinaryFormatter INSTANCE = new VarBinaryFormatter();
+
+ @Override
+ public StringBuffer format(Object obj, StringBuffer toAppendTo, FieldPosition pos) {
+ if (!(obj instanceof byte[])) {
+ throw new IllegalArgumentException("VarBinaryFormatter can only format byte arrays");
+ }
+ String hexString = Hex.encodeHexString((byte[]) obj);
+ toAppendTo.append(hexString);
+ return toAppendTo;
+ }
+
+ @Override
+ public Object parseObject(String source, ParsePosition pos) {
+ return new UnsupportedOperationException();
+ }
+}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/81019c64/phoenix-core/src/test/java/org/apache/phoenix/expression/function/CollationKeyFunctionTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/function/CollationKeyFunctionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/function/CollationKeyFunctionTest.java
new file mode 100644
index 0000000..f57a937
--- /dev/null
+++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/function/CollationKeyFunctionTest.java
@@ -0,0 +1,243 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.expression.function;
+
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.fail;
+
+import java.text.Collator;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.List;
+
+import org.apache.commons.codec.binary.Hex;
+import org.apache.commons.lang.builder.ToStringBuilder;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.phoenix.expression.Expression;
+import org.apache.phoenix.expression.LiteralExpression;
+import org.apache.phoenix.schema.SortOrder;
+import org.apache.phoenix.schema.types.PBoolean;
+import org.apache.phoenix.schema.types.PInteger;
+import org.apache.phoenix.schema.types.PVarchar;
+import org.junit.Test;
+
+import com.google.common.collect.Lists;
+import com.google.common.primitives.UnsignedBytes;
+
+/**
+ * "Unit" tests for CollationKeyFunction
+ *
+ */
+public class CollationKeyFunctionTest {
+
+ private static String[] chineseChars = new String[] { "\u963f", "\u55c4", "\u963e", "\u554a", "\u4ec8", "\u3d9a",
+ "\u9f51" };
+
+ private static Comparator<byte[]> collationKeyComparator = UnsignedBytes.lexicographicalComparator();
+
+ private static Comparator<ByteArrayAndInteger> collationKeyAndIndexComparator = new Comparator<ByteArrayAndInteger>() {
+ @Override
+ public int compare(ByteArrayAndInteger o1, ByteArrayAndInteger o2) {
+ int compareResult = collationKeyComparator.compare(o1.byteArray, o2.byteArray);
+ if (compareResult == 0) {
+ compareResult = o1.integer.compareTo(o2.integer);
+ }
+ return compareResult;
+ }
+ };
+
+ private static class ByteArrayAndInteger {
+
+ private ByteArrayAndInteger(byte[] byteArray, Integer integer) {
+ super();
+ this.byteArray = byteArray;
+ this.integer = integer;
+ }
+
+ byte[] byteArray;
+ Integer integer;
+
+ public String toString() {
+ return ToStringBuilder.reflectionToString(this);
+ }
+
+ public static ByteArrayAndInteger findFirstIntegerMatch(List<ByteArrayAndInteger> list,
+ Integer matchingInteger) {
+ for (ByteArrayAndInteger entry : list) {
+ if (entry.integer.equals(matchingInteger)) {
+ return entry;
+ }
+ }
+ return null;
+ }
+ }
+
+ @Test
+ public void testZhSort() throws Exception {
+ testSortOrderNoEquals(chineseChars, "zh", Boolean.FALSE, null, null, new Integer[] { 3, 0, 1, 6, 5, 4, 2 });
+ }
+
+ @Test
+ public void testZhTwSort() throws Exception {
+ testSortOrderNoEquals(chineseChars, "zh_TW", Boolean.FALSE, null, null, new Integer[] { 0, 3, 4, 1, 5, 2, 6 });
+ }
+
+ @Test
+ public void testZhTwStrokeSort() throws Exception {
+ testSortOrderNoEquals(chineseChars, "zh_TW_STROKE", Boolean.FALSE, null, null,
+ new Integer[] { 4, 2, 0, 3, 1, 6, 5 });
+ }
+
+ @Test
+ public void testZhStrokeSort() throws Exception {
+ testSortOrderNoEquals(chineseChars, "zh__STROKE", Boolean.FALSE, null, null,
+ new Integer[] { 0, 1, 3, 4, 6, 2, 5 });
+ }
+
+ @Test
+ public void testZhPinyinSort() throws Exception {
+ testSortOrderNoEquals(chineseChars, "zh__PINYIN", Boolean.FALSE, null, null,
+ new Integer[] { 0, 1, 3, 4, 6, 2, 5 });
+ }
+
+ @Test
+ public void testUpperCaseCollationKeyBytes() throws Exception {
+ testCollationKeysEqual(new String[] { "abcdef", "ABCDEF", "aBcDeF" }, "en", Boolean.TRUE, null, null);
+ }
+
+ @Test
+ public void testEqualCollationKeysForPrimaryStrength() throws Exception {
+ // "a", "A", "ä" are considered equivalent
+ testCollationKeysEqual(new String[] { "a", "A", "ä" }, "en", Boolean.FALSE, Collator.PRIMARY, null);
+ testSortOrderNoEquals(new String[] { "b", "a" }, "en", Boolean.FALSE, Collator.PRIMARY, null,
+ new Integer[] { 1, 0 });
+
+ }
+
+ @Test
+ public void testCollationKeyBytesForSecondaryStrength() throws Exception {
+ // "a" and "A" are considered equivalent but not "ä"
+ testCollationKeysEqual(new String[] { "a", "A" }, "en", Boolean.FALSE, Collator.SECONDARY, null);
+ testSortOrderNoEquals(new String[] { "b", "a", "ä" }, "en", Boolean.FALSE, Collator.SECONDARY, null,
+ new Integer[] { 1, 2, 0 });
+ }
+
+ @Test
+ public void testCollationKeyBytesForTertiaryStrength() throws Exception {
+ // none of these are considered equivalent
+ testSortOrderNoEquals(new String[] { "b", "a", "ä", "A" }, "en", Boolean.FALSE, Collator.TERTIARY, null,
+ new Integer[] { 1, 3, 2, 0 });
+ }
+
+ /**
+ * Just test that changing the decomposition mode works for basic sorting.
+ * TODO: Actually test for the accented characters and languages where this
+ * actually matters.
+ */
+ @Test
+ public void testCollationKeyBytesForFullDecomposition() throws Exception {
+ testCollationKeysEqual(new String[] { "a", "A" }, "en", Boolean.FALSE, null, Collator.FULL_DECOMPOSITION);
+ }
+
+ /** HELPER METHODS **/
+ private void testSortOrderNoEquals(String[] inputStrings, String locale, Boolean uppercaseCollator,
+ Integer strength, Integer decomposition, Integer[] expectedOrder) throws Exception {
+ List<ByteArrayAndInteger> sortedCollationKeysAndIndexes = calculateCollationKeys(inputStrings, locale,
+ uppercaseCollator, strength, decomposition);
+ Collections.sort(sortedCollationKeysAndIndexes, collationKeyAndIndexComparator);
+ testCollationKeysNotEqual(inputStrings, sortedCollationKeysAndIndexes);
+
+ Integer[] sortedIndexes = new Integer[sortedCollationKeysAndIndexes.size()];
+ for (int i = 0; i < sortedIndexes.length; i++) {
+ sortedIndexes[i] = sortedCollationKeysAndIndexes.get(i).integer;
+ }
+ assertArrayEquals(expectedOrder, sortedIndexes);
+ }
+
+ private List<ByteArrayAndInteger> calculateCollationKeys(String[] inputStrings, String locale,
+ Boolean upperCaseCollator, Integer strength, Integer decomposition) throws Exception {
+ List<ByteArrayAndInteger> collationKeysAndIndexes = Lists.newArrayList();
+ for (int i = 0; i < inputStrings.length; i++) {
+ byte[] thisCollationKeyBytes = callFunction(inputStrings[i], locale, upperCaseCollator, strength,
+ decomposition, SortOrder.ASC);
+ collationKeysAndIndexes.add(new ByteArrayAndInteger(thisCollationKeyBytes, i));
+ }
+ return collationKeysAndIndexes;
+ }
+
+ private void testCollationKeysEqual(String[] inputStrings, String locale, Boolean upperCaseCollator,
+ Integer strength, Integer decomposition) throws Exception {
+ List<ByteArrayAndInteger> collationKeysAndIndexes = calculateCollationKeys(inputStrings, locale,
+ upperCaseCollator, strength, decomposition);
+
+ for (int i = 0, j = 1; i < inputStrings.length && j < inputStrings.length; i++, j++) {
+ byte[] iByteArray = ByteArrayAndInteger.findFirstIntegerMatch(collationKeysAndIndexes, i).byteArray;
+ byte[] jByteArray = ByteArrayAndInteger.findFirstIntegerMatch(collationKeysAndIndexes, j).byteArray;
+ boolean isPairEqual = collationKeyComparator.compare(iByteArray, jByteArray) == 0;
+ if (!isPairEqual) {
+ fail(String.format("Collation keys for inputStrings [%s] and [%s] ([%s], [%s]) were not equal",
+ inputStrings[i], inputStrings[j], Hex.encodeHexString(iByteArray),
+ Hex.encodeHexString(jByteArray)));
+ }
+ }
+ }
+
+ private void testCollationKeysNotEqual(String[] inputStrings, List<ByteArrayAndInteger> collationKeysAndIndexes)
+ throws Exception {
+ for (int i = 0; i < inputStrings.length; i++) {
+ for (int j = i + 1; j < inputStrings.length; j++) {
+ byte[] iByteArray = ByteArrayAndInteger.findFirstIntegerMatch(collationKeysAndIndexes, i).byteArray;
+ byte[] jByteArray = ByteArrayAndInteger.findFirstIntegerMatch(collationKeysAndIndexes, j).byteArray;
+ boolean isPairEqual = collationKeyComparator.compare(iByteArray, jByteArray) == 0;
+ if (isPairEqual) {
+ fail(String.format("Collation keys for inputStrings [%s] and [%s] ([%s], [%s]) were equal",
+ inputStrings[i], inputStrings[j], Hex.encodeHexString(iByteArray),
+ Hex.encodeHexString(jByteArray)));
+ }
+ }
+ }
+ }
+
+ private static byte[] callFunction(String inputStr, String localeIsoCode, Boolean upperCaseCollator,
+ Integer strength, Integer decomposition, SortOrder sortOrder) throws Exception {
+ LiteralExpression inputStrLiteral, localeIsoCodeLiteral, upperCaseBooleanLiteral, strengthLiteral,
+ decompositionLiteral;
+ inputStrLiteral = LiteralExpression.newConstant(inputStr, PVarchar.INSTANCE, sortOrder);
+ localeIsoCodeLiteral = LiteralExpression.newConstant(localeIsoCode, PVarchar.INSTANCE, sortOrder);
+ upperCaseBooleanLiteral = LiteralExpression.newConstant(upperCaseCollator, PBoolean.INSTANCE, sortOrder);
+ strengthLiteral = LiteralExpression.newConstant(strength, PInteger.INSTANCE, sortOrder);
+ decompositionLiteral = LiteralExpression.newConstant(decomposition, PInteger.INSTANCE, sortOrder);
+ return callFunction(inputStrLiteral, localeIsoCodeLiteral, upperCaseBooleanLiteral, strengthLiteral,
+ decompositionLiteral);
+
+ }
+
+ private static byte[] callFunction(LiteralExpression inputStrLiteral, LiteralExpression localeIsoCodeLiteral,
+ LiteralExpression upperCaseBooleanLiteral, LiteralExpression strengthLiteral,
+ LiteralExpression decompositionLiteral) throws Exception {
+ List<Expression> expressions = Lists.newArrayList((Expression) inputStrLiteral,
+ (Expression) localeIsoCodeLiteral, (Expression) upperCaseBooleanLiteral, (Expression) strengthLiteral,
+ (Expression) decompositionLiteral);
+ Expression collationKeyFunction = new CollationKeyFunction(expressions);
+ ImmutableBytesWritable ptr = new ImmutableBytesWritable();
+ boolean ret = collationKeyFunction.evaluate(null, ptr);
+ byte[] result = ret
+ ? (byte[]) collationKeyFunction.getDataType().toObject(ptr, collationKeyFunction.getSortOrder()) : null;
+ return result;
+ }
+}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/81019c64/phoenix-server/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-server/pom.xml b/phoenix-server/pom.xml
index 3576425..67832ad 100644
--- a/phoenix-server/pom.xml
+++ b/phoenix-server/pom.xml
@@ -134,6 +134,7 @@
<include>io.dropwizard.metrics:metrics-core</include>
<include>org.apache.thrift:libthrift</include>
<include>com.clearspring.analytics:stream</include>
+ <include>com.salesforce.i18n:i18n-util</include>
</includes>
<excludes>
<exclude>org.apache.phoenix:phoenix-server</exclude>
[06/37] phoenix git commit: PHOENIX-4289 UPDATE STATISTICS command
does not collect stats for local indexes
Posted by ja...@apache.org.
PHOENIX-4289 UPDATE STATISTICS command does not collect stats for local indexes
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/8b360e23
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/8b360e23
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/8b360e23
Branch: refs/heads/4.x-HBase-1.1
Commit: 8b360e23e910f28d4bb7bf2e0470e120ffc9ca85
Parents: 6c527c1
Author: Samarth Jain <sa...@apache.org>
Authored: Sun Oct 29 22:59:03 2017 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Wed Nov 15 10:41:23 2017 -0800
----------------------------------------------------------------------
.../end2end/ExplainPlanWithStatsEnabledIT.java | 13 ++++
.../phoenix/end2end/index/BaseLocalIndexIT.java | 3 +
.../phoenix/end2end/index/LocalIndexIT.java | 46 ++++++++++-
.../phoenix/iterate/BaseResultIterators.java | 4 +-
.../apache/phoenix/schema/MetaDataClient.java | 80 +++++++++++++-------
5 files changed, 115 insertions(+), 31 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/8b360e23/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExplainPlanWithStatsEnabledIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExplainPlanWithStatsEnabledIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExplainPlanWithStatsEnabledIT.java
index cd4555c..62538af 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExplainPlanWithStatsEnabledIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExplainPlanWithStatsEnabledIT.java
@@ -32,6 +32,7 @@ import java.util.List;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.query.BaseTest;
import org.apache.phoenix.schema.PTableKey;
import org.apache.phoenix.schema.TableNotFoundException;
import org.apache.phoenix.util.EnvironmentEdge;
@@ -306,6 +307,18 @@ public class ExplainPlanWithStatsEnabledIT extends ParallelStatsEnabledIT {
final Long estimatedRows;
final Long estimateInfoTs;
+ public Long getEstimatedBytes() {
+ return estimatedBytes;
+ }
+
+ public Long getEstimatedRows() {
+ return estimatedRows;
+ }
+
+ public Long getEstimateInfoTs() {
+ return estimateInfoTs;
+ }
+
Estimate(Long rows, Long bytes, Long ts) {
this.estimatedBytes = bytes;
this.estimatedRows = rows;
http://git-wip-us.apache.org/repos/asf/phoenix/blob/8b360e23/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/BaseLocalIndexIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/BaseLocalIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/BaseLocalIndexIT.java
index 30baec4..1659d73 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/BaseLocalIndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/BaseLocalIndexIT.java
@@ -59,6 +59,9 @@ public abstract class BaseLocalIndexIT extends BaseUniqueNamesOwnClusterIT {
serverProps.put(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, "true");
Map<String, String> clientProps = Maps.newHashMapWithExpectedSize(1);
clientProps.put(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, "true");
+ // setting update frequency to a large value to test out that we are
+ // generating stats for local indexes
+ clientProps.put(QueryServices.MIN_STATS_UPDATE_FREQ_MS_ATTRIB, "120000");
setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()), new ReadOnlyProps(clientProps.entrySet().iterator()));
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/8b360e23/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
index 48221ab..0dcf1d5 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
@@ -17,6 +17,7 @@
*/
package org.apache.phoenix.end2end.index;
+import static org.apache.phoenix.end2end.ExplainPlanWithStatsEnabledIT.getByteRowEstimates;
import static org.apache.phoenix.util.MetaDataUtil.getViewIndexSequenceName;
import static org.apache.phoenix.util.MetaDataUtil.getViewIndexSequenceSchemaName;
import static org.junit.Assert.assertArrayEquals;
@@ -55,8 +56,10 @@ import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.phoenix.compile.QueryPlan;
import org.apache.phoenix.coprocessor.BaseScannerRegionObserver;
+import org.apache.phoenix.end2end.ExplainPlanWithStatsEnabledIT.Estimate;
import org.apache.phoenix.hbase.index.IndexRegionSplitPolicy;
import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixResultSet;
import org.apache.phoenix.jdbc.PhoenixStatement;
import org.apache.phoenix.query.QueryConstants;
import org.apache.phoenix.schema.PNameFactory;
@@ -67,9 +70,10 @@ import org.apache.phoenix.schema.TableNotFoundException;
import org.apache.phoenix.util.QueryUtil;
import org.apache.phoenix.util.SchemaUtil;
import org.apache.phoenix.util.TestUtil;
-import org.junit.Ignore;
import org.junit.Test;
+import com.google.common.collect.Lists;
+
public class LocalIndexIT extends BaseLocalIndexIT {
public LocalIndexIT(boolean isNamespaceMapped) {
super(isNamespaceMapped);
@@ -714,4 +718,44 @@ public class LocalIndexIT extends BaseLocalIndexIT {
}
}
+ @Test // See https://issues.apache.org/jira/browse/PHOENIX-4289
+ public void testEstimatesWithLocalIndexes() throws Exception {
+ String tableName = generateUniqueName();
+ String indexName = "IDX_" + generateUniqueName();
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ int guidePostWidth = 20;
+ conn.createStatement()
+ .execute("CREATE TABLE " + tableName
+ + " (k INTEGER PRIMARY KEY, a bigint, b bigint)"
+ + " GUIDE_POSTS_WIDTH=" + guidePostWidth);
+ conn.createStatement().execute("upsert into " + tableName + " values (100,1,3)");
+ conn.createStatement().execute("upsert into " + tableName + " values (101,2,4)");
+ conn.createStatement().execute("upsert into " + tableName + " values (102,2,4)");
+ conn.createStatement().execute("upsert into " + tableName + " values (103,2,4)");
+ conn.createStatement().execute("upsert into " + tableName + " values (104,2,4)");
+ conn.createStatement().execute("upsert into " + tableName + " values (105,2,4)");
+ conn.createStatement().execute("upsert into " + tableName + " values (106,2,4)");
+ conn.createStatement().execute("upsert into " + tableName + " values (107,2,4)");
+ conn.createStatement().execute("upsert into " + tableName + " values (108,2,4)");
+ conn.createStatement().execute("upsert into " + tableName + " values (109,2,4)");
+ conn.commit();
+ conn.createStatement().execute(
+ "CREATE LOCAL INDEX " + indexName + " ON " + tableName + " (a) INCLUDE (b) ");
+ String ddl = "ALTER TABLE " + tableName + " SET USE_STATS_FOR_PARALLELIZATION = false";
+ conn.createStatement().execute(ddl);
+ conn.createStatement().execute("UPDATE STATISTICS " + tableName + "");
+ }
+ List<Object> binds = Lists.newArrayList();
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ String sql =
+ "SELECT COUNT(*) " + " FROM " + tableName;
+ ResultSet rs = conn.createStatement().executeQuery(sql);
+ assertTrue("Index " + indexName + " should have been used",
+ rs.unwrap(PhoenixResultSet.class).getStatement().getQueryPlan().getTableRef()
+ .getTable().getName().getString().equals(indexName));
+ Estimate info = getByteRowEstimates(conn, sql, binds);
+ assertEquals((Long) 10l, info.getEstimatedRows());
+ assertTrue(info.getEstimateInfoTs() > 0);
+ }
+ }
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/8b360e23/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
index f037a20..250cb48 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
@@ -491,9 +491,7 @@ public abstract class BaseResultIterators extends ExplainTable implements Result
scanId = new UUID(ThreadLocalRandom.current().nextLong(), ThreadLocalRandom.current().nextLong()).toString();
initializeScan(plan, perScanLimit, offset, scan);
- this.useStatsForParallelization =
- context.getConnection().getQueryServices().getConfiguration().getBoolean(
- USE_STATS_FOR_PARALLELIZATION, DEFAULT_USE_STATS_FOR_PARALLELIZATION);
+ this.useStatsForParallelization = table.useStatsForParallelization();
this.scans = getParallelScans();
List<KeyRange> splitRanges = Lists.newArrayListWithExpectedSize(scans.size() * ESTIMATED_GUIDEPOSTS_PER_REGION);
for (List<Scan> scanList : scans) {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/8b360e23/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index 0ce4246..701633b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -236,7 +236,6 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Strings;
-import com.google.common.collect.Iterators;
import com.google.common.collect.ListMultimap;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
@@ -1088,7 +1087,7 @@ public class MetaDataClient {
PTable table = resolver.getTables().get(0).getTable();
long rowCount = 0;
if (updateStatisticsStmt.updateColumns()) {
- rowCount += updateStatisticsInternal(table.getPhysicalName(), table, updateStatisticsStmt.getProps());
+ rowCount += updateStatisticsInternal(table.getPhysicalName(), table, updateStatisticsStmt.getProps(), true);
}
if (updateStatisticsStmt.updateIndex()) {
// TODO: If our table is a VIEW with multiple indexes or a TABLE with local indexes,
@@ -1096,25 +1095,50 @@ public class MetaDataClient {
// across all indexes in that case so that we don't re-calculate the same stats
// multiple times.
for (PTable index : table.getIndexes()) {
- rowCount += updateStatisticsInternal(index.getPhysicalName(), index, updateStatisticsStmt.getProps());
+ // If the table is a view, then we will end up calling update stats
+ // here for all the view indexes on it. We take care of local indexes later.
+ if (index.getIndexType() != IndexType.LOCAL) {
+ rowCount += updateStatisticsInternal(table.getPhysicalName(), index, updateStatisticsStmt.getProps(), true);
+ }
+ }
+ /*
+ * Update stats for local indexes. This takes care of local indexes on the the table
+ * as well as local indexes on any views on it.
+ */
+ PName physicalName = table.getPhysicalName();
+ List<byte[]> localCFs = MetaDataUtil.getLocalIndexColumnFamilies(connection, physicalName.getBytes());
+ if (!localCFs.isEmpty()) {
+ /*
+ * We need to pass checkLastStatsUpdateTime as false here. Local indexes are on the
+ * same table as the physical table. So when the user has requested to update stats
+ * for both table and indexes on it, we need to make sure that we don't re-check
+ * LAST_UPDATE_STATS time. If we don't do that then we will end up *not* collecting
+ * stats for local indexes which would be bad.
+ *
+ * Note, that this also means we don't have a way of controlling how often update
+ * stats can run for local indexes. Consider the case when the user calls UPDATE STATS TABLE
+ * followed by UPDATE STATS TABLE INDEX. When the second statement is being executed,
+ * this causes us to skip the check and execute stats collection possibly a bit too frequently.
+ */
+ rowCount += updateStatisticsInternal(physicalName, table, updateStatisticsStmt.getProps(), localCFs, false);
}
// If analyzing the indexes of a multi-tenant table or a table with view indexes
// then analyze all of those indexes too.
if (table.getType() != PTableType.VIEW) {
if (table.isMultiTenant() || MetaDataUtil.hasViewIndexTable(connection, table.getPhysicalName())) {
- final PName physicalName = PNameFactory.newName(MetaDataUtil.getViewIndexPhysicalName(table.getPhysicalName().getBytes()));
+ final PName viewIndexPhysicalTableName = PNameFactory.newName(MetaDataUtil.getViewIndexPhysicalName(table.getPhysicalName().getBytes()));
PTable indexLogicalTable = new DelegateTable(table) {
@Override
public PName getPhysicalName() {
- return physicalName;
+ return viewIndexPhysicalTableName;
}
};
- rowCount += updateStatisticsInternal(physicalName, indexLogicalTable, updateStatisticsStmt.getProps());
- }
- PName physicalName = table.getPhysicalName();
- List<byte[]> localCFs = MetaDataUtil.getLocalIndexColumnFamilies(connection, physicalName.getBytes());
- if (!localCFs.isEmpty()) {
- rowCount += updateStatisticsInternal(physicalName, table, updateStatisticsStmt.getProps(), localCFs);
+ /*
+ * Note for future maintainers: local indexes whether on a table or on a view,
+ * reside on the same physical table as the base table and not the view index
+ * table. So below call is collecting stats only for non-local view indexes.
+ */
+ rowCount += updateStatisticsInternal(viewIndexPhysicalTableName, indexLogicalTable, updateStatisticsStmt.getProps(), true);
}
}
}
@@ -1127,27 +1151,29 @@ public class MetaDataClient {
};
}
- private long updateStatisticsInternal(PName physicalName, PTable logicalTable, Map<String, Object> statsProps) throws SQLException {
- return updateStatisticsInternal(physicalName, logicalTable, statsProps, null);
+ private long updateStatisticsInternal(PName physicalName, PTable logicalTable, Map<String, Object> statsProps, boolean checkLastStatsUpdateTime) throws SQLException {
+ return updateStatisticsInternal(physicalName, logicalTable, statsProps, null, checkLastStatsUpdateTime);
}
- private long updateStatisticsInternal(PName physicalName, PTable logicalTable, Map<String, Object> statsProps, List<byte[]> cfs) throws SQLException {
+ private long updateStatisticsInternal(PName physicalName, PTable logicalTable, Map<String, Object> statsProps, List<byte[]> cfs, boolean checkLastStatsUpdateTime) throws SQLException {
ReadOnlyProps props = connection.getQueryServices().getProps();
final long msMinBetweenUpdates = props
.getLong(QueryServices.MIN_STATS_UPDATE_FREQ_MS_ATTRIB,
props.getLong(QueryServices.STATS_UPDATE_FREQ_MS_ATTRIB,
QueryServicesOptions.DEFAULT_STATS_UPDATE_FREQ_MS) / 2);
- byte[] tenantIdBytes = ByteUtil.EMPTY_BYTE_ARRAY;
Long scn = connection.getSCN();
// Always invalidate the cache
long clientTimeStamp = connection.getSCN() == null ? HConstants.LATEST_TIMESTAMP : scn;
- String query = "SELECT CURRENT_DATE()," + LAST_STATS_UPDATE_TIME + " FROM " + PhoenixDatabaseMetaData.SYSTEM_STATS_NAME
- + " WHERE " + PHYSICAL_NAME + "='" + physicalName.getString() + "' AND " + COLUMN_FAMILY
- + " IS NULL AND " + LAST_STATS_UPDATE_TIME + " IS NOT NULL";
- ResultSet rs = connection.createStatement().executeQuery(query);
long msSinceLastUpdate = Long.MAX_VALUE;
- if (rs.next()) {
- msSinceLastUpdate = rs.getLong(1) - rs.getLong(2);
+ if (checkLastStatsUpdateTime) {
+ String query = "SELECT CURRENT_DATE()," + LAST_STATS_UPDATE_TIME + " FROM " + PhoenixDatabaseMetaData.SYSTEM_STATS_NAME
+ + " WHERE " + PHYSICAL_NAME + "='" + physicalName.getString() + "' AND " + COLUMN_FAMILY
+ + " IS NULL AND " + LAST_STATS_UPDATE_TIME + " IS NOT NULL";
+ ResultSet rs = connection.createStatement().executeQuery(query);
+
+ if (rs.next()) {
+ msSinceLastUpdate = rs.getLong(1) - rs.getLong(2);
+ }
}
long rowCount = 0;
if (msSinceLastUpdate >= msMinBetweenUpdates) {
@@ -1976,14 +2002,14 @@ public class MetaDataClient {
}
}
- boolean useStatsForParallelization = true;
- Boolean useStatsForParallelizationProp = (Boolean) TableProperty.USE_STATS_FOR_PARALLELIZATION.getValue(tableProps);
+ boolean useStatsForParallelization =
+ connection.getQueryServices().getProps().getBoolean(
+ QueryServices.USE_STATS_FOR_PARALLELIZATION,
+ QueryServicesOptions.DEFAULT_USE_STATS_FOR_PARALLELIZATION);
+ Boolean useStatsForParallelizationProp =
+ (Boolean) TableProperty.USE_STATS_FOR_PARALLELIZATION.getValue(tableProps);
if (useStatsForParallelizationProp != null) {
useStatsForParallelization = useStatsForParallelizationProp;
- } else {
- useStatsForParallelization = connection.getQueryServices().getProps().getBoolean(
- QueryServices.USE_STATS_FOR_PARALLELIZATION,
- QueryServicesOptions.DEFAULT_USE_STATS_FOR_PARALLELIZATION);
}
boolean sharedTable = statement.getTableType() == PTableType.VIEW || allocateIndexId;
[02/37] phoenix git commit: PHOENIX-4294 Allow scalar function to
declare that it's not thread safe
Posted by ja...@apache.org.
PHOENIX-4294 Allow scalar function to declare that it's not thread safe
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/e2351ef4
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/e2351ef4
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/e2351ef4
Branch: refs/heads/4.x-HBase-1.1
Commit: e2351ef4a23ef63747fede4b80859d7b2f7f34f4
Parents: 7d2c1ed
Author: James Taylor <jt...@salesforce.com>
Authored: Wed Oct 18 09:28:31 2017 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Wed Nov 15 10:39:19 2017 -0800
----------------------------------------------------------------------
.../apache/phoenix/expression/function/ScalarFunction.java | 9 +++++++++
.../phoenix/expression/visitor/CloneExpressionVisitor.java | 2 +-
2 files changed, 10 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/e2351ef4/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ScalarFunction.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ScalarFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ScalarFunction.java
index 4f44cde..2a5fe44 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ScalarFunction.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ScalarFunction.java
@@ -87,4 +87,13 @@ public abstract class ScalarFunction extends FunctionExpression {
public KeyPart newKeyPart(KeyPart childPart) {
return null;
}
+
+ /**
+ * Used to determine if the same ScalarFunction instance may be
+ * used by multiple threads.
+ * @return true if function is thread safe and false otherwise.
+ */
+ public boolean isThreadSafe() {
+ return true;
+ }
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/e2351ef4/phoenix-core/src/main/java/org/apache/phoenix/expression/visitor/CloneExpressionVisitor.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/visitor/CloneExpressionVisitor.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/visitor/CloneExpressionVisitor.java
index e47fb64..c6d7c9e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/visitor/CloneExpressionVisitor.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/visitor/CloneExpressionVisitor.java
@@ -110,7 +110,7 @@ public abstract class CloneExpressionVisitor extends TraverseAllExpressionVisito
@Override
public Expression visitLeave(ScalarFunction node, List<Expression> l) {
- return isCloneNode(node, l) ? node.clone(l) : node;
+ return isCloneNode(node, l) || !node.isThreadSafe() ? node.clone(l) : node;
}
public Expression visitLeave(UDFExpression node, List<Expression> l) {
[27/37] phoenix git commit: PHOENIX-4332 Indexes should inherit guide
post width of the base data table
Posted by ja...@apache.org.
PHOENIX-4332 Indexes should inherit guide post width of the base data table
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/730f9588
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/730f9588
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/730f9588
Branch: refs/heads/4.x-HBase-1.1
Commit: 730f958846475bd3b8996cdb6d31bc5342e0e2eb
Parents: 21606e5
Author: Samarth Jain <sa...@apache.org>
Authored: Wed Nov 1 23:24:52 2017 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Wed Nov 15 10:46:40 2017 -0800
----------------------------------------------------------------------
...mnEncodedImmutableNonTxStatsCollectorIT.java | 1 +
...olumnEncodedImmutableTxStatsCollectorIT.java | 1 +
...lumnEncodedMutableNonTxStatsCollectorIT.java | 1 +
.../ColumnEncodedMutableTxStatsCollectorIT.java | 1 +
...mnEncodedImmutableNonTxStatsCollectorIT.java | 1 +
...olumnEncodedImmutableTxStatsCollectorIT.java | 1 +
.../phoenix/end2end/StatsCollectorIT.java | 734 ----------------
...SysTableNamespaceMappedStatsCollectorIT.java | 1 +
.../phoenix/schema/stats/StatsCollectorIT.java | 832 +++++++++++++++++++
.../stats/DefaultStatisticsCollector.java | 58 +-
10 files changed, 895 insertions(+), 736 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/730f9588/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnEncodedImmutableNonTxStatsCollectorIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnEncodedImmutableNonTxStatsCollectorIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnEncodedImmutableNonTxStatsCollectorIT.java
index d5d8442..eb01e89 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnEncodedImmutableNonTxStatsCollectorIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnEncodedImmutableNonTxStatsCollectorIT.java
@@ -20,6 +20,7 @@ package org.apache.phoenix.end2end;
import java.util.Arrays;
import java.util.Collection;
+import org.apache.phoenix.schema.stats.StatsCollectorIT;
import org.junit.runners.Parameterized.Parameters;
public class ColumnEncodedImmutableNonTxStatsCollectorIT extends StatsCollectorIT {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/730f9588/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnEncodedImmutableTxStatsCollectorIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnEncodedImmutableTxStatsCollectorIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnEncodedImmutableTxStatsCollectorIT.java
index 23b1654..4e90d70 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnEncodedImmutableTxStatsCollectorIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnEncodedImmutableTxStatsCollectorIT.java
@@ -20,6 +20,7 @@ package org.apache.phoenix.end2end;
import java.util.Arrays;
import java.util.Collection;
+import org.apache.phoenix.schema.stats.StatsCollectorIT;
import org.junit.runners.Parameterized.Parameters;
public class ColumnEncodedImmutableTxStatsCollectorIT extends StatsCollectorIT {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/730f9588/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnEncodedMutableNonTxStatsCollectorIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnEncodedMutableNonTxStatsCollectorIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnEncodedMutableNonTxStatsCollectorIT.java
index 24869a2..2a560db 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnEncodedMutableNonTxStatsCollectorIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnEncodedMutableNonTxStatsCollectorIT.java
@@ -20,6 +20,7 @@ package org.apache.phoenix.end2end;
import java.util.Arrays;
import java.util.Collection;
+import org.apache.phoenix.schema.stats.StatsCollectorIT;
import org.junit.runners.Parameterized.Parameters;
public class ColumnEncodedMutableNonTxStatsCollectorIT extends StatsCollectorIT {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/730f9588/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnEncodedMutableTxStatsCollectorIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnEncodedMutableTxStatsCollectorIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnEncodedMutableTxStatsCollectorIT.java
index eea591d..01fa2b5 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnEncodedMutableTxStatsCollectorIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnEncodedMutableTxStatsCollectorIT.java
@@ -20,6 +20,7 @@ package org.apache.phoenix.end2end;
import java.util.Arrays;
import java.util.Collection;
+import org.apache.phoenix.schema.stats.StatsCollectorIT;
import org.junit.runners.Parameterized.Parameters;
public class ColumnEncodedMutableTxStatsCollectorIT extends StatsCollectorIT {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/730f9588/phoenix-core/src/it/java/org/apache/phoenix/end2end/NonColumnEncodedImmutableNonTxStatsCollectorIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/NonColumnEncodedImmutableNonTxStatsCollectorIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/NonColumnEncodedImmutableNonTxStatsCollectorIT.java
index fe70030..27c6dc2 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/NonColumnEncodedImmutableNonTxStatsCollectorIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/NonColumnEncodedImmutableNonTxStatsCollectorIT.java
@@ -20,6 +20,7 @@ package org.apache.phoenix.end2end;
import java.util.Arrays;
import java.util.Collection;
+import org.apache.phoenix.schema.stats.StatsCollectorIT;
import org.junit.runners.Parameterized.Parameters;
public class NonColumnEncodedImmutableNonTxStatsCollectorIT extends StatsCollectorIT {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/730f9588/phoenix-core/src/it/java/org/apache/phoenix/end2end/NonColumnEncodedImmutableTxStatsCollectorIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/NonColumnEncodedImmutableTxStatsCollectorIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/NonColumnEncodedImmutableTxStatsCollectorIT.java
index 10a846a..0cec31a 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/NonColumnEncodedImmutableTxStatsCollectorIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/NonColumnEncodedImmutableTxStatsCollectorIT.java
@@ -20,6 +20,7 @@ package org.apache.phoenix.end2end;
import java.util.Arrays;
import java.util.Collection;
+import org.apache.phoenix.schema.stats.StatsCollectorIT;
import org.junit.runners.Parameterized.Parameters;
public class NonColumnEncodedImmutableTxStatsCollectorIT extends StatsCollectorIT {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/730f9588/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
deleted file mode 100644
index da8e78d..0000000
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
+++ /dev/null
@@ -1,734 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.end2end;
-
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_STATS_TABLE;
-import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
-import static org.apache.phoenix.util.TestUtil.getAllSplits;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.io.IOException;
-import java.sql.Array;
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-import java.util.Random;
-
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionLocation;
-import org.apache.hadoop.hbase.client.HTableInterface;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.phoenix.jdbc.PhoenixConnection;
-import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
-import org.apache.phoenix.query.ConnectionQueryServices;
-import org.apache.phoenix.query.KeyRange;
-import org.apache.phoenix.query.QueryServices;
-import org.apache.phoenix.query.QueryServicesOptions;
-import org.apache.phoenix.schema.PTable;
-import org.apache.phoenix.schema.PTableImpl;
-import org.apache.phoenix.schema.PTableKey;
-import org.apache.phoenix.schema.stats.GuidePostsInfo;
-import org.apache.phoenix.schema.stats.GuidePostsKey;
-import org.apache.phoenix.schema.stats.StatisticsUtil;
-import org.apache.phoenix.util.PropertiesUtil;
-import org.apache.phoenix.util.QueryUtil;
-import org.apache.phoenix.util.ReadOnlyProps;
-import org.apache.phoenix.util.SchemaUtil;
-import org.apache.phoenix.util.TestUtil;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Ignore;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-
-import com.google.common.collect.Maps;
-
-@RunWith(Parameterized.class)
-public abstract class StatsCollectorIT extends BaseUniqueNamesOwnClusterIT {
- private final String tableDDLOptions;
- private final boolean columnEncoded;
- private String tableName;
- private String schemaName;
- private String fullTableName;
- private String physicalTableName;
- private final boolean userTableNamespaceMapped;
- private final boolean mutable;
-
- protected StatsCollectorIT(boolean mutable, boolean transactional, boolean userTableNamespaceMapped, boolean columnEncoded) {
- StringBuilder sb = new StringBuilder();
- if (transactional) {
- sb.append("TRANSACTIONAL=true");
- }
- if (!columnEncoded) {
- if (sb.length()>0) {
- sb.append(",");
- }
- sb.append("COLUMN_ENCODED_BYTES=0");
- } else {
- if (sb.length()>0) {
- sb.append(",");
- }
- sb.append("COLUMN_ENCODED_BYTES=4");
- }
- if (!mutable) {
- if (sb.length()>0) {
- sb.append(",");
- }
- sb.append("IMMUTABLE_ROWS=true");
- if (!columnEncoded) {
- sb.append(",IMMUTABLE_STORAGE_SCHEME="+PTableImpl.ImmutableStorageScheme.ONE_CELL_PER_COLUMN);
- }
- }
- this.tableDDLOptions = sb.toString();
- this.userTableNamespaceMapped = userTableNamespaceMapped;
- this.columnEncoded = columnEncoded;
- this.mutable = mutable;
- }
-
- @BeforeClass
- public static void doSetup() throws Exception {
- // enable name space mapping at global level on both client and server side
- Map<String, String> serverProps = Maps.newHashMapWithExpectedSize(7);
- serverProps.put(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, "true");
- serverProps.put(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, Long.toString(20));
- Map<String, String> clientProps = Maps.newHashMapWithExpectedSize(2);
- clientProps.put(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, "true");
- clientProps.put(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, Long.toString(20));
- setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()), new ReadOnlyProps(clientProps.entrySet().iterator()));
- }
-
- @Before
- public void generateTableNames() throws SQLException {
- schemaName = generateUniqueName();
- if (userTableNamespaceMapped) {
- try (Connection conn = getConnection()) {
- conn.createStatement().execute("CREATE SCHEMA " + schemaName);
- }
- }
- tableName = "T_" + generateUniqueName();
- fullTableName = SchemaUtil.getTableName(schemaName, tableName);
- physicalTableName = SchemaUtil.getPhysicalHBaseTableName(schemaName, tableName, userTableNamespaceMapped).getString();
- }
-
- private Connection getConnection() throws SQLException {
- return getConnection(Integer.MAX_VALUE);
- }
-
- private Connection getConnection(Integer statsUpdateFreq) throws SQLException {
- Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
- props.setProperty(QueryServices.EXPLAIN_CHUNK_COUNT_ATTRIB, Boolean.TRUE.toString());
- props.setProperty(QueryServices.EXPLAIN_ROW_COUNT_ATTRIB, Boolean.TRUE.toString());
- props.setProperty(QueryServices.STATS_UPDATE_FREQ_MS_ATTRIB, Integer.toString(statsUpdateFreq));
- // enable/disable namespace mapping at connection level
- props.setProperty(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, Boolean.toString(userTableNamespaceMapped));
- return DriverManager.getConnection(getUrl(), props);
- }
-
- @Test
- public void testUpdateEmptyStats() throws Exception {
- Connection conn = getConnection();
- conn.setAutoCommit(true);
- conn.createStatement().execute(
- "CREATE TABLE " + fullTableName +" ( k CHAR(1) PRIMARY KEY )" + tableDDLOptions);
- conn.createStatement().execute("UPDATE STATISTICS " + fullTableName);
- ResultSet rs = conn.createStatement().executeQuery("EXPLAIN SELECT * FROM " + fullTableName);
- String explainPlan = QueryUtil.getExplainPlan(rs);
- assertEquals(
- "CLIENT 1-CHUNK 0 ROWS 20 BYTES PARALLEL 1-WAY FULL SCAN OVER " + physicalTableName + "\n" +
- " SERVER FILTER BY FIRST KEY ONLY",
- explainPlan);
- conn.close();
- }
-
- @Test
- public void testSomeUpdateEmptyStats() throws Exception {
- Connection conn = getConnection();
- conn.setAutoCommit(true);
- conn.createStatement().execute(
- "CREATE TABLE " + fullTableName +" ( k VARCHAR PRIMARY KEY, a.v1 VARCHAR, b.v2 VARCHAR ) " + tableDDLOptions + (tableDDLOptions.isEmpty() ? "" : ",") + "SALT_BUCKETS = 3");
- conn.createStatement().execute("UPSERT INTO " + fullTableName + "(k,v1) VALUES('a','123456789')");
- conn.createStatement().execute("UPDATE STATISTICS " + fullTableName);
-
- ResultSet rs;
- String explainPlan;
- rs = conn.createStatement().executeQuery("EXPLAIN SELECT v2 FROM " + fullTableName + " WHERE v2='foo'");
- explainPlan = QueryUtil.getExplainPlan(rs);
- // if we are using the ONE_CELL_PER_COLUMN_FAMILY storage scheme, we will have the single kv even though there are no values for col family v2
- String stats = columnEncoded && !mutable ? "4-CHUNK 1 ROWS 38 BYTES" : "3-CHUNK 0 ROWS 20 BYTES";
- assertEquals(
- "CLIENT " + stats + " PARALLEL 3-WAY FULL SCAN OVER " + physicalTableName + "\n" +
- " SERVER FILTER BY B.V2 = 'foo'\n" +
- "CLIENT MERGE SORT",
- explainPlan);
- rs = conn.createStatement().executeQuery("EXPLAIN SELECT * FROM " + fullTableName);
- explainPlan = QueryUtil.getExplainPlan(rs);
- assertEquals(
- "CLIENT 4-CHUNK 1 ROWS " + (columnEncoded ? "28" : "34") + " BYTES PARALLEL 3-WAY FULL SCAN OVER " + physicalTableName + "\n" +
- "CLIENT MERGE SORT",
- explainPlan);
- rs = conn.createStatement().executeQuery("EXPLAIN SELECT * FROM " + fullTableName + " WHERE k = 'a'");
- explainPlan = QueryUtil.getExplainPlan(rs);
- assertEquals(
- "CLIENT 1-CHUNK 1 ROWS " + (columnEncoded ? "204" : "202") + " BYTES PARALLEL 1-WAY POINT LOOKUP ON 1 KEY OVER " + physicalTableName + "\n" +
- "CLIENT MERGE SORT",
- explainPlan);
-
- conn.close();
- }
-
- @Test
- public void testUpdateStats() throws SQLException, IOException,
- InterruptedException {
- Connection conn;
- PreparedStatement stmt;
- ResultSet rs;
- Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
- conn = getConnection();
- conn.createStatement().execute(
- "CREATE TABLE " + fullTableName +" ( k VARCHAR, a_string_array VARCHAR(100) ARRAY[4], b_string_array VARCHAR(100) ARRAY[4] \n"
- + " CONSTRAINT pk PRIMARY KEY (k, b_string_array DESC))"
- + tableDDLOptions );
- String[] s;
- Array array;
- conn = upsertValues(props, fullTableName);
- // CAll the update statistics query here. If already major compaction has run this will not get executed.
- stmt = conn.prepareStatement("UPDATE STATISTICS " + fullTableName);
- stmt.execute();
- stmt = upsertStmt(conn, fullTableName);
- stmt.setString(1, "z");
- s = new String[] { "xyz", "def", "ghi", "jkll", null, null, "xxx" };
- array = conn.createArrayOf("VARCHAR", s);
- stmt.setArray(2, array);
- s = new String[] { "zya", "def", "ghi", "jkll", null, null, null, "xxx" };
- array = conn.createArrayOf("VARCHAR", s);
- stmt.setArray(3, array);
- stmt.execute();
- stmt = conn.prepareStatement("UPDATE STATISTICS " + fullTableName);
- stmt.execute();
- rs = conn.createStatement().executeQuery("SELECT k FROM " + fullTableName);
- assertTrue(rs.next());
- conn.close();
- }
-
- private void testNoDuplicatesAfterUpdateStats(String splitKey) throws Throwable {
- Connection conn = getConnection();
- PreparedStatement stmt;
- ResultSet rs;
- conn.createStatement()
- .execute("CREATE TABLE " + fullTableName
- + " ( k VARCHAR, c1.a bigint,c2.b bigint CONSTRAINT pk PRIMARY KEY (k))"+ tableDDLOptions
- + (splitKey != null ? " split on (" + splitKey + ")" : "") );
- conn.createStatement().execute("upsert into " + fullTableName + " values ('abc',1,3)");
- conn.createStatement().execute("upsert into " + fullTableName + " values ('def',2,4)");
- conn.commit();
- conn.createStatement().execute("UPDATE STATISTICS " + fullTableName);
- rs = conn.createStatement().executeQuery("SELECT k FROM " + fullTableName + " order by k desc");
- assertTrue(rs.next());
- assertEquals("def", rs.getString(1));
- assertTrue(rs.next());
- assertEquals("abc", rs.getString(1));
- assertTrue(!rs.next());
- conn.close();
- }
-
- @Test
- public void testNoDuplicatesAfterUpdateStatsWithSplits() throws Throwable {
- testNoDuplicatesAfterUpdateStats("'abc','def'");
- }
-
- @Test
- public void testNoDuplicatesAfterUpdateStatsWithDesc() throws Throwable {
- testNoDuplicatesAfterUpdateStats(null);
- }
-
- @Test
- public void testUpdateStatsWithMultipleTables() throws Throwable {
- String fullTableName2 = SchemaUtil.getTableName(schemaName, "T_" + generateUniqueName());
- Connection conn;
- PreparedStatement stmt;
- ResultSet rs;
- Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
- conn = getConnection();
- conn.createStatement().execute(
- "CREATE TABLE " + fullTableName +" ( k VARCHAR, a_string_array VARCHAR(100) ARRAY[4], b_string_array VARCHAR(100) ARRAY[4] \n"
- + " CONSTRAINT pk PRIMARY KEY (k, b_string_array DESC))" + tableDDLOptions );
- conn.createStatement().execute(
- "CREATE TABLE " + fullTableName2 +" ( k VARCHAR, a_string_array VARCHAR(100) ARRAY[4], b_string_array VARCHAR(100) ARRAY[4] \n"
- + " CONSTRAINT pk PRIMARY KEY (k, b_string_array DESC))" + tableDDLOptions );
- String[] s;
- Array array;
- conn = upsertValues(props, fullTableName);
- conn = upsertValues(props, fullTableName2);
- // CAll the update statistics query here
- stmt = conn.prepareStatement("UPDATE STATISTICS "+fullTableName);
- stmt.execute();
- stmt = conn.prepareStatement("UPDATE STATISTICS "+fullTableName2);
- stmt.execute();
- stmt = upsertStmt(conn, fullTableName);
- stmt.setString(1, "z");
- s = new String[] { "xyz", "def", "ghi", "jkll", null, null, "xxx" };
- array = conn.createArrayOf("VARCHAR", s);
- stmt.setArray(2, array);
- s = new String[] { "zya", "def", "ghi", "jkll", null, null, null, "xxx" };
- array = conn.createArrayOf("VARCHAR", s);
- stmt.setArray(3, array);
- stmt.execute();
- stmt = upsertStmt(conn, fullTableName2);
- stmt.setString(1, "z");
- s = new String[] { "xyz", "def", "ghi", "jkll", null, null, "xxx" };
- array = conn.createArrayOf("VARCHAR", s);
- stmt.setArray(2, array);
- s = new String[] { "zya", "def", "ghi", "jkll", null, null, null, "xxx" };
- array = conn.createArrayOf("VARCHAR", s);
- stmt.setArray(3, array);
- stmt.execute();
- conn.close();
- conn = getConnection();
- // This analyze would not work
- stmt = conn.prepareStatement("UPDATE STATISTICS "+fullTableName2);
- stmt.execute();
- rs = conn.createStatement().executeQuery("SELECT k FROM "+fullTableName2);
- assertTrue(rs.next());
- conn.close();
- }
-
- private Connection upsertValues(Properties props, String tableName) throws SQLException, IOException,
- InterruptedException {
- Connection conn;
- PreparedStatement stmt;
- conn = getConnection();
- stmt = upsertStmt(conn, tableName);
- stmt.setString(1, "a");
- String[] s = new String[] { "abc", "def", "ghi", "jkll", null, null, "xxx" };
- Array array = conn.createArrayOf("VARCHAR", s);
- stmt.setArray(2, array);
- s = new String[] { "abc", "def", "ghi", "jkll", null, null, null, "xxx" };
- array = conn.createArrayOf("VARCHAR", s);
- stmt.setArray(3, array);
- stmt.execute();
- conn.commit();
- stmt = upsertStmt(conn, tableName);
- stmt.setString(1, "b");
- s = new String[] { "xyz", "def", "ghi", "jkll", null, null, "xxx" };
- array = conn.createArrayOf("VARCHAR", s);
- stmt.setArray(2, array);
- s = new String[] { "zya", "def", "ghi", "jkll", null, null, null, "xxx" };
- array = conn.createArrayOf("VARCHAR", s);
- stmt.setArray(3, array);
- stmt.execute();
- conn.commit();
- stmt = upsertStmt(conn, tableName);
- stmt.setString(1, "c");
- s = new String[] { "xyz", "def", "ghi", "jkll", null, null, "xxx" };
- array = conn.createArrayOf("VARCHAR", s);
- stmt.setArray(2, array);
- s = new String[] { "zya", "def", "ghi", "jkll", null, null, null, "xxx" };
- array = conn.createArrayOf("VARCHAR", s);
- stmt.setArray(3, array);
- stmt.execute();
- conn.commit();
- stmt = upsertStmt(conn, tableName);
- stmt.setString(1, "d");
- s = new String[] { "xyz", "def", "ghi", "jkll", null, null, "xxx" };
- array = conn.createArrayOf("VARCHAR", s);
- stmt.setArray(2, array);
- s = new String[] { "zya", "def", "ghi", "jkll", null, null, null, "xxx" };
- array = conn.createArrayOf("VARCHAR", s);
- stmt.setArray(3, array);
- stmt.execute();
- conn.commit();
- stmt = upsertStmt(conn, tableName);
- stmt.setString(1, "b");
- s = new String[] { "xyz", "def", "ghi", "jkll", null, null, "xxx" };
- array = conn.createArrayOf("VARCHAR", s);
- stmt.setArray(2, array);
- s = new String[] { "zya", "def", "ghi", "jkll", null, null, null, "xxx" };
- array = conn.createArrayOf("VARCHAR", s);
- stmt.setArray(3, array);
- stmt.execute();
- conn.commit();
- stmt = upsertStmt(conn, tableName);
- stmt.setString(1, "e");
- s = new String[] { "xyz", "def", "ghi", "jkll", null, null, "xxx" };
- array = conn.createArrayOf("VARCHAR", s);
- stmt.setArray(2, array);
- s = new String[] { "zya", "def", "ghi", "jkll", null, null, null, "xxx" };
- array = conn.createArrayOf("VARCHAR", s);
- stmt.setArray(3, array);
- stmt.execute();
- conn.commit();
- return conn;
- }
-
- private PreparedStatement upsertStmt(Connection conn, String tableName) throws SQLException {
- PreparedStatement stmt;
- stmt = conn.prepareStatement("UPSERT INTO " + tableName + " VALUES(?,?,?)");
- return stmt;
- }
-
- private void compactTable(Connection conn, String tableName) throws Exception {
- TestUtil.doMajorCompaction(conn, tableName);
- }
-
- @Test
- @Ignore //TODO remove this once https://issues.apache.org/jira/browse/TEPHRA-208 is fixed
- public void testCompactUpdatesStats() throws Exception {
- testCompactUpdatesStats(0, fullTableName);
- }
-
- @Test
- @Ignore //TODO remove this once https://issues.apache.org/jira/browse/TEPHRA-208 is fixed
- public void testCompactUpdatesStatsWithMinStatsUpdateFreq() throws Exception {
- testCompactUpdatesStats(QueryServicesOptions.DEFAULT_STATS_UPDATE_FREQ_MS, fullTableName);
- }
-
- private static void invalidateStats(Connection conn, String tableName) throws SQLException {
- PTable ptable = conn.unwrap(PhoenixConnection.class)
- .getMetaDataCache().getTableRef(new PTableKey(null, tableName))
- .getTable();
- byte[] name = ptable.getPhysicalName().getBytes();
- conn.unwrap(PhoenixConnection.class).getQueryServices().invalidateStats(new GuidePostsKey(name, SchemaUtil.getEmptyColumnFamily(ptable)));
- }
-
- private void testCompactUpdatesStats(Integer statsUpdateFreq, String tableName) throws Exception {
- int nRows = 10;
- Connection conn = getConnection(statsUpdateFreq);
- PreparedStatement stmt;
- conn.createStatement().execute("CREATE TABLE " + tableName + "(k CHAR(1) PRIMARY KEY, v INTEGER, w INTEGER) "
- + (!tableDDLOptions.isEmpty() ? tableDDLOptions + "," : "")
- + HColumnDescriptor.KEEP_DELETED_CELLS + "=" + Boolean.FALSE);
- stmt = conn.prepareStatement("UPSERT INTO " + tableName + " VALUES(?,?,?)");
- for (int i = 0; i < nRows; i++) {
- stmt.setString(1, Character.toString((char) ('a' + i)));
- stmt.setInt(2, i);
- stmt.setInt(3, i);
- stmt.executeUpdate();
- }
- conn.commit();
-
- compactTable(conn, physicalTableName);
-
- if (statsUpdateFreq != 0) {
- invalidateStats(conn, tableName);
- } else {
- // Confirm that when we have a non zero STATS_UPDATE_FREQ_MS_ATTRIB, after we run
- // UPDATATE STATISTICS, the new statistics are faulted in as expected.
- List<KeyRange>keyRanges = getAllSplits(conn, tableName);
- assertNotEquals(nRows+1, keyRanges.size());
- // If we've set MIN_STATS_UPDATE_FREQ_MS_ATTRIB, an UPDATE STATISTICS will invalidate the cache
- // and forcing the new stats to be pulled over.
- int rowCount = conn.createStatement().executeUpdate("UPDATE STATISTICS " + tableName);
- assertEquals(10, rowCount);
- }
- List<KeyRange>keyRanges = getAllSplits(conn, tableName);
- assertEquals(nRows+1, keyRanges.size());
-
- int nDeletedRows = conn.createStatement().executeUpdate("DELETE FROM " + tableName + " WHERE V < " + nRows / 2);
- conn.commit();
- assertEquals(5, nDeletedRows);
-
- Scan scan = new Scan();
- scan.setRaw(true);
- PhoenixConnection phxConn = conn.unwrap(PhoenixConnection.class);
- try (HTableInterface htable = phxConn.getQueryServices().getTable(Bytes.toBytes(tableName))) {
- ResultScanner scanner = htable.getScanner(scan);
- Result result;
- while ((result = scanner.next())!=null) {
- System.out.println(result);
- }
- }
-
- compactTable(conn, physicalTableName);
-
- scan = new Scan();
- scan.setRaw(true);
- phxConn = conn.unwrap(PhoenixConnection.class);
- try (HTableInterface htable = phxConn.getQueryServices().getTable(Bytes.toBytes(tableName))) {
- ResultScanner scanner = htable.getScanner(scan);
- Result result;
- while ((result = scanner.next())!=null) {
- System.out.println(result);
- }
- }
-
- if (statsUpdateFreq != 0) {
- invalidateStats(conn, tableName);
- } else {
- assertEquals(nRows+1, keyRanges.size());
- // If we've set STATS_UPDATE_FREQ_MS_ATTRIB, an UPDATE STATISTICS will invalidate the cache
- // and force us to pull over the new stats
- int rowCount = conn.createStatement().executeUpdate("UPDATE STATISTICS " + tableName);
- assertEquals(5, rowCount);
- }
- keyRanges = getAllSplits(conn, tableName);
- assertEquals(nRows/2+1, keyRanges.size());
- ResultSet rs = conn.createStatement().executeQuery("SELECT SUM(GUIDE_POSTS_ROW_COUNT) FROM "
- + "\""+ SYSTEM_CATALOG_SCHEMA + "\".\"" + SYSTEM_STATS_TABLE + "\"" + " WHERE PHYSICAL_NAME='" + physicalTableName + "'");
- rs.next();
- assertEquals(nRows - nDeletedRows, rs.getLong(1));
- }
-
- @Test
- public void testWithMultiCF() throws Exception {
- int nRows = 20;
- Connection conn = getConnection(0);
- PreparedStatement stmt;
- conn.createStatement().execute(
- "CREATE TABLE " + fullTableName
- + "(k VARCHAR PRIMARY KEY, a.v INTEGER, b.v INTEGER, c.v INTEGER NULL, d.v INTEGER NULL) "
- + tableDDLOptions );
- stmt = conn.prepareStatement("UPSERT INTO " + fullTableName + " VALUES(?,?, ?, ?, ?)");
- byte[] val = new byte[250];
- for (int i = 0; i < nRows; i++) {
- stmt.setString(1, Character.toString((char)('a' + i)) + Bytes.toString(val));
- stmt.setInt(2, i);
- stmt.setInt(3, i);
- stmt.setInt(4, i);
- stmt.setInt(5, i);
- stmt.executeUpdate();
- }
- conn.commit();
- stmt = conn.prepareStatement("UPSERT INTO " + fullTableName + "(k, c.v, d.v) VALUES(?,?,?)");
- for (int i = 0; i < 5; i++) {
- stmt.setString(1, Character.toString((char)('a' + 'z' + i)) + Bytes.toString(val));
- stmt.setInt(2, i);
- stmt.setInt(3, i);
- stmt.executeUpdate();
- }
- conn.commit();
-
- ResultSet rs;
- TestUtil.analyzeTable(conn, fullTableName);
- List<KeyRange> keyRanges = getAllSplits(conn, fullTableName);
- assertEquals(26, keyRanges.size());
- rs = conn.createStatement().executeQuery("EXPLAIN SELECT * FROM " + fullTableName);
- assertEquals("CLIENT 26-CHUNK 25 ROWS " + (columnEncoded ? ( mutable ? "12530" : "13902" ) : "12420") + " BYTES PARALLEL 1-WAY FULL SCAN OVER " + physicalTableName,
- QueryUtil.getExplainPlan(rs));
-
- ConnectionQueryServices services = conn.unwrap(PhoenixConnection.class).getQueryServices();
- List<HRegionLocation> regions = services.getAllTableRegions(Bytes.toBytes(physicalTableName));
- assertEquals(1, regions.size());
-
- TestUtil.analyzeTable(conn, fullTableName);
- String query = "UPDATE STATISTICS " + fullTableName + " SET \""
- + QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB + "\"=" + Long.toString(1000);
- conn.createStatement().execute(query);
- keyRanges = getAllSplits(conn, fullTableName);
- boolean oneCellPerColFamliyStorageScheme = !mutable && columnEncoded;
- assertEquals(oneCellPerColFamliyStorageScheme ? 13 : 12, keyRanges.size());
-
- rs = conn
- .createStatement()
- .executeQuery(
- "SELECT COLUMN_FAMILY,SUM(GUIDE_POSTS_ROW_COUNT),SUM(GUIDE_POSTS_WIDTH),COUNT(*) from \"SYSTEM\".STATS where PHYSICAL_NAME = '"
- + physicalTableName + "' GROUP BY COLUMN_FAMILY ORDER BY COLUMN_FAMILY");
-
- assertTrue(rs.next());
- assertEquals("A", rs.getString(1));
- assertEquals(24, rs.getInt(2));
- assertEquals(columnEncoded ? ( mutable ? 12252 : 13624 ) : 12144, rs.getInt(3));
- assertEquals(oneCellPerColFamliyStorageScheme ? 12 : 11, rs.getInt(4));
-
- assertTrue(rs.next());
- assertEquals("B", rs.getString(1));
- assertEquals(oneCellPerColFamliyStorageScheme ? 24 : 20, rs.getInt(2));
- assertEquals(columnEncoded ? ( mutable ? 5600 : 6972 ) : 5540, rs.getInt(3));
- assertEquals(oneCellPerColFamliyStorageScheme ? 6 : 5, rs.getInt(4));
-
- assertTrue(rs.next());
- assertEquals("C", rs.getString(1));
- assertEquals(24, rs.getInt(2));
- assertEquals(columnEncoded ? ( mutable ? 6724 : 6988 ) : 6652, rs.getInt(3));
- assertEquals(6, rs.getInt(4));
-
- assertTrue(rs.next());
- assertEquals("D", rs.getString(1));
- assertEquals(24, rs.getInt(2));
- assertEquals(columnEncoded ? ( mutable ? 6724 : 6988 ) : 6652, rs.getInt(3));
- assertEquals(6, rs.getInt(4));
-
- assertFalse(rs.next());
-
- // Disable stats
- conn.createStatement().execute("ALTER TABLE " + fullTableName +
- " SET " + PhoenixDatabaseMetaData.GUIDE_POSTS_WIDTH + "=0");
- TestUtil.analyzeTable(conn, fullTableName);
- // Assert that there are no more guideposts
- rs = conn.createStatement().executeQuery("SELECT count(1) FROM " + PhoenixDatabaseMetaData.SYSTEM_STATS_NAME +
- " WHERE " + PhoenixDatabaseMetaData.PHYSICAL_NAME + "='" + physicalTableName + "' AND " + PhoenixDatabaseMetaData.COLUMN_FAMILY + " IS NOT NULL");
- assertTrue(rs.next());
- assertEquals(0, rs.getLong(1));
- assertFalse(rs.next());
- rs = conn.createStatement().executeQuery("EXPLAIN SELECT * FROM " + fullTableName);
- assertEquals("CLIENT 1-CHUNK PARALLEL 1-WAY FULL SCAN OVER " + physicalTableName,
- QueryUtil.getExplainPlan(rs));
- }
-
- @Test
- public void testRowCountAndByteCounts() throws SQLException {
- Connection conn = getConnection();
- String ddl = "CREATE TABLE " + fullTableName + " (t_id VARCHAR NOT NULL,\n" + "k1 INTEGER NOT NULL,\n"
- + "k2 INTEGER NOT NULL,\n" + "C3.k3 INTEGER,\n" + "C2.v1 VARCHAR,\n"
- + "CONSTRAINT pk PRIMARY KEY (t_id, k1, k2)) " + tableDDLOptions + " split on ('e','j','o')";
- conn.createStatement().execute(ddl);
- String[] strings = { "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r",
- "s", "t", "u", "v", "w", "x", "y", "z" };
- for (int i = 0; i < 26; i++) {
- conn.createStatement().execute(
- "UPSERT INTO " + fullTableName + " values('" + strings[i] + "'," + i + "," + (i + 1) + ","
- + (i + 2) + ",'" + strings[25 - i] + "')");
- }
- conn.commit();
- ResultSet rs;
- String query = "UPDATE STATISTICS " + fullTableName + " SET \""
- + QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB + "\"=" + Long.toString(20);
- conn.createStatement().execute(query);
- Random r = new Random();
- int count = 0;
- while (count < 4) {
- int startIndex = r.nextInt(strings.length);
- int endIndex = r.nextInt(strings.length - startIndex) + startIndex;
- long rows = endIndex - startIndex;
- long c2Bytes = rows * (columnEncoded ? ( mutable ? 37 : 48 ) : 35);
- String physicalTableName = SchemaUtil.getPhysicalTableName(Bytes.toBytes(fullTableName), userTableNamespaceMapped).toString();
- rs = conn.createStatement().executeQuery(
- "SELECT COLUMN_FAMILY,SUM(GUIDE_POSTS_ROW_COUNT),SUM(GUIDE_POSTS_WIDTH) from \"SYSTEM\".STATS where PHYSICAL_NAME = '"
- + physicalTableName + "' AND GUIDE_POST_KEY>= cast('" + strings[startIndex]
- + "' as varbinary) AND GUIDE_POST_KEY<cast('" + strings[endIndex]
- + "' as varbinary) and COLUMN_FAMILY='C2' group by COLUMN_FAMILY");
- if (startIndex < endIndex) {
- assertTrue(rs.next());
- assertEquals("C2", rs.getString(1));
- assertEquals(rows, rs.getLong(2));
- assertEquals(c2Bytes, rs.getLong(3));
- count++;
- }
- }
- }
-
- @Test
- public void testRowCountWhenNumKVsExceedCompactionScannerThreshold() throws Exception {
- String tableName = generateUniqueName();
- StringBuilder sb = new StringBuilder(200);
- sb.append("CREATE TABLE " + tableName + "(PK1 VARCHAR NOT NULL, ");
- int numRows = 10;
- try (Connection conn = DriverManager.getConnection(getUrl())) {
- int compactionScannerKVThreshold =
- conn.unwrap(PhoenixConnection.class).getQueryServices().getConfiguration()
- .getInt(HConstants.COMPACTION_KV_MAX,
- HConstants.COMPACTION_KV_MAX_DEFAULT);
- int numKvColumns = compactionScannerKVThreshold * 2;
- for (int i = 1; i <= numKvColumns; i++) {
- sb.append("KV" + i + " VARCHAR");
- if (i < numKvColumns) {
- sb.append(", ");
- }
- }
- sb.append(" CONSTRAINT PK PRIMARY KEY (PK1))");
- String ddl = sb.toString();
- conn.createStatement().execute(ddl);
- sb = new StringBuilder(200);
- sb.append("UPSERT INTO " + tableName + " VALUES (");
- for (int i = 1; i <= numKvColumns + 1; i++) {
- sb.append("?");
- if (i < numKvColumns + 1) {
- sb.append(", ");
- }
- }
- sb.append(")");
- String dml = sb.toString();
- PreparedStatement stmt = conn.prepareStatement(dml);
- String keyValue = "KVVVVVV";
- for (int j = 1; j <= numRows; j++) {
- for (int i = 1; i <= numKvColumns + 1; i++) {
- if (i == 1) {
- stmt.setString(1, "" + j);
- } else {
- stmt.setString(i, keyValue);
- }
- }
- stmt.executeUpdate();
- }
- conn.commit();
- conn.createStatement().execute("UPDATE STATISTICS " + tableName);
- String q = "SELECT SUM(GUIDE_POSTS_ROW_COUNT) FROM SYSTEM.STATS WHERE PHYSICAL_NAME = '" + tableName + "'";
- ResultSet rs = conn.createStatement().executeQuery(q);
- rs.next();
- assertEquals("Number of expected rows in stats table after update stats didn't match!", numRows, rs.getInt(1));
- conn.createStatement().executeUpdate("DELETE FROM SYSTEM.STATS WHERE PHYSICAL_NAME = '" + tableName + "'");
- conn.commit();
- TestUtil.doMajorCompaction(conn, tableName);
- q = "SELECT SUM(GUIDE_POSTS_ROW_COUNT) FROM SYSTEM.STATS WHERE PHYSICAL_NAME = '" + tableName + "'";
- rs = conn.createStatement().executeQuery(q);
- rs.next();
- assertEquals("Number of expected rows in stats table after major compaction didn't match", numRows, rs.getInt(1));
- }
- }
-
- @Test
- public void testEmptyGuidePostGeneratedWhenDataSizeLessThanGPWidth() throws Exception {
- String tableName = generateUniqueName();
- try (Connection conn = DriverManager.getConnection(getUrl())) {
- long guidePostWidth = 20000000;
- conn.createStatement()
- .execute("CREATE TABLE " + tableName
- + " ( k INTEGER, c1.a bigint,c2.b bigint CONSTRAINT pk PRIMARY KEY (k)) GUIDE_POSTS_WIDTH="
- + guidePostWidth + ", SALT_BUCKETS = 4");
- conn.createStatement().execute("upsert into " + tableName + " values (100,1,3)");
- conn.createStatement().execute("upsert into " + tableName + " values (101,2,4)");
- conn.commit();
- conn.createStatement().execute("UPDATE STATISTICS " + tableName);
- ConnectionQueryServices queryServices =
- conn.unwrap(PhoenixConnection.class).getQueryServices();
- try (HTableInterface statsHTable =
- queryServices.getTable(
- SchemaUtil.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_STATS_NAME_BYTES,
- queryServices.getProps()).getName())) {
- GuidePostsInfo gps =
- StatisticsUtil.readStatistics(statsHTable,
- new GuidePostsKey(Bytes.toBytes(tableName), Bytes.toBytes("C1")),
- HConstants.LATEST_TIMESTAMP);
- assertTrue(gps.isEmptyGuidePost());
- assertEquals(guidePostWidth, gps.getByteCounts()[0]);
- assertTrue(gps.getGuidePostTimestamps()[0] > 0);
- gps =
- StatisticsUtil.readStatistics(statsHTable,
- new GuidePostsKey(Bytes.toBytes(tableName), Bytes.toBytes("C2")),
- HConstants.LATEST_TIMESTAMP);
- assertTrue(gps.isEmptyGuidePost());
- assertEquals(guidePostWidth, gps.getByteCounts()[0]);
- assertTrue(gps.getGuidePostTimestamps()[0] > 0);
- }
- }
- }
-
-}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/730f9588/phoenix-core/src/it/java/org/apache/phoenix/end2end/SysTableNamespaceMappedStatsCollectorIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SysTableNamespaceMappedStatsCollectorIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SysTableNamespaceMappedStatsCollectorIT.java
index ea5f32f..4830189 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SysTableNamespaceMappedStatsCollectorIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SysTableNamespaceMappedStatsCollectorIT.java
@@ -22,6 +22,7 @@ import java.util.Collection;
import java.util.Map;
import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.schema.stats.StatsCollectorIT;
import org.apache.phoenix.util.ReadOnlyProps;
import org.junit.BeforeClass;
import org.junit.runners.Parameterized.Parameters;
http://git-wip-us.apache.org/repos/asf/phoenix/blob/730f9588/phoenix-core/src/it/java/org/apache/phoenix/schema/stats/StatsCollectorIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/schema/stats/StatsCollectorIT.java b/phoenix-core/src/it/java/org/apache/phoenix/schema/stats/StatsCollectorIT.java
new file mode 100644
index 0000000..c424f45
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/schema/stats/StatsCollectorIT.java
@@ -0,0 +1,832 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.schema.stats;
+
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_STATS_TABLE;
+import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
+import static org.apache.phoenix.util.TestUtil.getAllSplits;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.sql.Array;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Random;
+
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.coprocessor.MetaDataRegionObserver;
+import org.apache.phoenix.coprocessor.UngroupedAggregateRegionObserver;
+import org.apache.phoenix.end2end.BaseUniqueNamesOwnClusterIT;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
+import org.apache.phoenix.query.BaseTest;
+import org.apache.phoenix.query.ConnectionQueryServices;
+import org.apache.phoenix.query.KeyRange;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.query.QueryServicesOptions;
+import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTableImpl;
+import org.apache.phoenix.schema.PTableKey;
+import org.apache.phoenix.util.MetaDataUtil;
+import org.apache.phoenix.util.PropertiesUtil;
+import org.apache.phoenix.util.QueryUtil;
+import org.apache.phoenix.util.ReadOnlyProps;
+import org.apache.phoenix.util.SchemaUtil;
+import org.apache.phoenix.util.TestUtil;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Ignore;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import com.google.common.collect.Maps;
+
+@RunWith(Parameterized.class)
+public abstract class StatsCollectorIT extends BaseUniqueNamesOwnClusterIT {
+ private final String tableDDLOptions;
+ private final boolean columnEncoded;
+ private String tableName;
+ private String schemaName;
+ private String fullTableName;
+ private String physicalTableName;
+ private final boolean userTableNamespaceMapped;
+ private final boolean mutable;
+ private static final int defaultGuidePostWidth = 20;
+
+ protected StatsCollectorIT(boolean mutable, boolean transactional, boolean userTableNamespaceMapped, boolean columnEncoded) {
+ StringBuilder sb = new StringBuilder();
+ if (transactional) {
+ sb.append("TRANSACTIONAL=true");
+ }
+ if (!columnEncoded) {
+ if (sb.length()>0) {
+ sb.append(",");
+ }
+ sb.append("COLUMN_ENCODED_BYTES=0");
+ } else {
+ if (sb.length()>0) {
+ sb.append(",");
+ }
+ sb.append("COLUMN_ENCODED_BYTES=4");
+ }
+ if (!mutable) {
+ if (sb.length()>0) {
+ sb.append(",");
+ }
+ sb.append("IMMUTABLE_ROWS=true");
+ if (!columnEncoded) {
+ sb.append(",IMMUTABLE_STORAGE_SCHEME="+PTableImpl.ImmutableStorageScheme.ONE_CELL_PER_COLUMN);
+ }
+ }
+ this.tableDDLOptions = sb.toString();
+ this.userTableNamespaceMapped = userTableNamespaceMapped;
+ this.columnEncoded = columnEncoded;
+ this.mutable = mutable;
+ }
+
+ @BeforeClass
+ public static void doSetup() throws Exception {
+ // enable name space mapping at global level on both client and server side
+ Map<String, String> serverProps = Maps.newHashMapWithExpectedSize(7);
+ serverProps.put(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, "true");
+ serverProps.put(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, Long.toString(defaultGuidePostWidth));
+ Map<String, String> clientProps = Maps.newHashMapWithExpectedSize(2);
+ clientProps.put(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, "true");
+ clientProps.put(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, Long.toString(defaultGuidePostWidth));
+ setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()), new ReadOnlyProps(clientProps.entrySet().iterator()));
+ }
+
+ @Before
+ public void generateTableNames() throws SQLException {
+ schemaName = generateUniqueName();
+ if (userTableNamespaceMapped) {
+ try (Connection conn = getConnection()) {
+ conn.createStatement().execute("CREATE SCHEMA " + schemaName);
+ }
+ }
+ tableName = "T_" + generateUniqueName();
+ fullTableName = SchemaUtil.getTableName(schemaName, tableName);
+ physicalTableName = SchemaUtil.getPhysicalHBaseTableName(schemaName, tableName, userTableNamespaceMapped).getString();
+ }
+
+ private Connection getConnection() throws SQLException {
+ return getConnection(Integer.MAX_VALUE);
+ }
+
+ private Connection getConnection(Integer statsUpdateFreq) throws SQLException {
+ Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+ props.setProperty(QueryServices.EXPLAIN_CHUNK_COUNT_ATTRIB, Boolean.TRUE.toString());
+ props.setProperty(QueryServices.EXPLAIN_ROW_COUNT_ATTRIB, Boolean.TRUE.toString());
+ props.setProperty(QueryServices.STATS_UPDATE_FREQ_MS_ATTRIB, Integer.toString(statsUpdateFreq));
+ // enable/disable namespace mapping at connection level
+ props.setProperty(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, Boolean.toString(userTableNamespaceMapped));
+ return DriverManager.getConnection(getUrl(), props);
+ }
+
+ @Test
+ public void testUpdateEmptyStats() throws Exception {
+ Connection conn = getConnection();
+ conn.setAutoCommit(true);
+ conn.createStatement().execute(
+ "CREATE TABLE " + fullTableName +" ( k CHAR(1) PRIMARY KEY )" + tableDDLOptions);
+ conn.createStatement().execute("UPDATE STATISTICS " + fullTableName);
+ ResultSet rs = conn.createStatement().executeQuery("EXPLAIN SELECT * FROM " + fullTableName);
+ String explainPlan = QueryUtil.getExplainPlan(rs);
+ assertEquals(
+ "CLIENT 1-CHUNK 0 ROWS 20 BYTES PARALLEL 1-WAY FULL SCAN OVER " + physicalTableName + "\n" +
+ " SERVER FILTER BY FIRST KEY ONLY",
+ explainPlan);
+ conn.close();
+ }
+
+ @Test
+ public void testSomeUpdateEmptyStats() throws Exception {
+ Connection conn = getConnection();
+ conn.setAutoCommit(true);
+ conn.createStatement().execute(
+ "CREATE TABLE " + fullTableName +" ( k VARCHAR PRIMARY KEY, a.v1 VARCHAR, b.v2 VARCHAR ) " + tableDDLOptions + (tableDDLOptions.isEmpty() ? "" : ",") + "SALT_BUCKETS = 3");
+ conn.createStatement().execute("UPSERT INTO " + fullTableName + "(k,v1) VALUES('a','123456789')");
+ conn.createStatement().execute("UPDATE STATISTICS " + fullTableName);
+
+ ResultSet rs;
+ String explainPlan;
+ rs = conn.createStatement().executeQuery("EXPLAIN SELECT v2 FROM " + fullTableName + " WHERE v2='foo'");
+ explainPlan = QueryUtil.getExplainPlan(rs);
+ // if we are using the ONE_CELL_PER_COLUMN_FAMILY storage scheme, we will have the single kv even though there are no values for col family v2
+ String stats = columnEncoded && !mutable ? "4-CHUNK 1 ROWS 38 BYTES" : "3-CHUNK 0 ROWS 20 BYTES";
+ assertEquals(
+ "CLIENT " + stats + " PARALLEL 3-WAY FULL SCAN OVER " + physicalTableName + "\n" +
+ " SERVER FILTER BY B.V2 = 'foo'\n" +
+ "CLIENT MERGE SORT",
+ explainPlan);
+ rs = conn.createStatement().executeQuery("EXPLAIN SELECT * FROM " + fullTableName);
+ explainPlan = QueryUtil.getExplainPlan(rs);
+ assertEquals(
+ "CLIENT 4-CHUNK 1 ROWS " + (columnEncoded ? "28" : "34") + " BYTES PARALLEL 3-WAY FULL SCAN OVER " + physicalTableName + "\n" +
+ "CLIENT MERGE SORT",
+ explainPlan);
+ rs = conn.createStatement().executeQuery("EXPLAIN SELECT * FROM " + fullTableName + " WHERE k = 'a'");
+ explainPlan = QueryUtil.getExplainPlan(rs);
+ assertEquals(
+ "CLIENT 1-CHUNK 1 ROWS " + (columnEncoded ? "204" : "202") + " BYTES PARALLEL 1-WAY POINT LOOKUP ON 1 KEY OVER " + physicalTableName + "\n" +
+ "CLIENT MERGE SORT",
+ explainPlan);
+
+ conn.close();
+ }
+
+ @Test
+ public void testUpdateStats() throws SQLException, IOException,
+ InterruptedException {
+ Connection conn;
+ PreparedStatement stmt;
+ ResultSet rs;
+ Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+ conn = getConnection();
+ conn.createStatement().execute(
+ "CREATE TABLE " + fullTableName +" ( k VARCHAR, a_string_array VARCHAR(100) ARRAY[4], b_string_array VARCHAR(100) ARRAY[4] \n"
+ + " CONSTRAINT pk PRIMARY KEY (k, b_string_array DESC))"
+ + tableDDLOptions );
+ String[] s;
+ Array array;
+ conn = upsertValues(props, fullTableName);
+ // CAll the update statistics query here. If already major compaction has run this will not get executed.
+ stmt = conn.prepareStatement("UPDATE STATISTICS " + fullTableName);
+ stmt.execute();
+ stmt = upsertStmt(conn, fullTableName);
+ stmt.setString(1, "z");
+ s = new String[] { "xyz", "def", "ghi", "jkll", null, null, "xxx" };
+ array = conn.createArrayOf("VARCHAR", s);
+ stmt.setArray(2, array);
+ s = new String[] { "zya", "def", "ghi", "jkll", null, null, null, "xxx" };
+ array = conn.createArrayOf("VARCHAR", s);
+ stmt.setArray(3, array);
+ stmt.execute();
+ stmt = conn.prepareStatement("UPDATE STATISTICS " + fullTableName);
+ stmt.execute();
+ rs = conn.createStatement().executeQuery("SELECT k FROM " + fullTableName);
+ assertTrue(rs.next());
+ conn.close();
+ }
+
+ private void testNoDuplicatesAfterUpdateStats(String splitKey) throws Throwable {
+ Connection conn = getConnection();
+ PreparedStatement stmt;
+ ResultSet rs;
+ conn.createStatement()
+ .execute("CREATE TABLE " + fullTableName
+ + " ( k VARCHAR, c1.a bigint,c2.b bigint CONSTRAINT pk PRIMARY KEY (k))"+ tableDDLOptions
+ + (splitKey != null ? " split on (" + splitKey + ")" : "") );
+ conn.createStatement().execute("upsert into " + fullTableName + " values ('abc',1,3)");
+ conn.createStatement().execute("upsert into " + fullTableName + " values ('def',2,4)");
+ conn.commit();
+ conn.createStatement().execute("UPDATE STATISTICS " + fullTableName);
+ rs = conn.createStatement().executeQuery("SELECT k FROM " + fullTableName + " order by k desc");
+ assertTrue(rs.next());
+ assertEquals("def", rs.getString(1));
+ assertTrue(rs.next());
+ assertEquals("abc", rs.getString(1));
+ assertTrue(!rs.next());
+ conn.close();
+ }
+
+ @Test
+ public void testNoDuplicatesAfterUpdateStatsWithSplits() throws Throwable {
+ testNoDuplicatesAfterUpdateStats("'abc','def'");
+ }
+
+ @Test
+ public void testNoDuplicatesAfterUpdateStatsWithDesc() throws Throwable {
+ testNoDuplicatesAfterUpdateStats(null);
+ }
+
+ @Test
+ public void testUpdateStatsWithMultipleTables() throws Throwable {
+ String fullTableName2 = SchemaUtil.getTableName(schemaName, "T_" + generateUniqueName());
+ Connection conn;
+ PreparedStatement stmt;
+ ResultSet rs;
+ Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+ conn = getConnection();
+ conn.createStatement().execute(
+ "CREATE TABLE " + fullTableName +" ( k VARCHAR, a_string_array VARCHAR(100) ARRAY[4], b_string_array VARCHAR(100) ARRAY[4] \n"
+ + " CONSTRAINT pk PRIMARY KEY (k, b_string_array DESC))" + tableDDLOptions );
+ conn.createStatement().execute(
+ "CREATE TABLE " + fullTableName2 +" ( k VARCHAR, a_string_array VARCHAR(100) ARRAY[4], b_string_array VARCHAR(100) ARRAY[4] \n"
+ + " CONSTRAINT pk PRIMARY KEY (k, b_string_array DESC))" + tableDDLOptions );
+ String[] s;
+ Array array;
+ conn = upsertValues(props, fullTableName);
+ conn = upsertValues(props, fullTableName2);
+ // CAll the update statistics query here
+ stmt = conn.prepareStatement("UPDATE STATISTICS "+fullTableName);
+ stmt.execute();
+ stmt = conn.prepareStatement("UPDATE STATISTICS "+fullTableName2);
+ stmt.execute();
+ stmt = upsertStmt(conn, fullTableName);
+ stmt.setString(1, "z");
+ s = new String[] { "xyz", "def", "ghi", "jkll", null, null, "xxx" };
+ array = conn.createArrayOf("VARCHAR", s);
+ stmt.setArray(2, array);
+ s = new String[] { "zya", "def", "ghi", "jkll", null, null, null, "xxx" };
+ array = conn.createArrayOf("VARCHAR", s);
+ stmt.setArray(3, array);
+ stmt.execute();
+ stmt = upsertStmt(conn, fullTableName2);
+ stmt.setString(1, "z");
+ s = new String[] { "xyz", "def", "ghi", "jkll", null, null, "xxx" };
+ array = conn.createArrayOf("VARCHAR", s);
+ stmt.setArray(2, array);
+ s = new String[] { "zya", "def", "ghi", "jkll", null, null, null, "xxx" };
+ array = conn.createArrayOf("VARCHAR", s);
+ stmt.setArray(3, array);
+ stmt.execute();
+ conn.close();
+ conn = getConnection();
+ // This analyze would not work
+ stmt = conn.prepareStatement("UPDATE STATISTICS "+fullTableName2);
+ stmt.execute();
+ rs = conn.createStatement().executeQuery("SELECT k FROM "+fullTableName2);
+ assertTrue(rs.next());
+ conn.close();
+ }
+
+ private Connection upsertValues(Properties props, String tableName) throws SQLException, IOException,
+ InterruptedException {
+ Connection conn;
+ PreparedStatement stmt;
+ conn = getConnection();
+ stmt = upsertStmt(conn, tableName);
+ stmt.setString(1, "a");
+ String[] s = new String[] { "abc", "def", "ghi", "jkll", null, null, "xxx" };
+ Array array = conn.createArrayOf("VARCHAR", s);
+ stmt.setArray(2, array);
+ s = new String[] { "abc", "def", "ghi", "jkll", null, null, null, "xxx" };
+ array = conn.createArrayOf("VARCHAR", s);
+ stmt.setArray(3, array);
+ stmt.execute();
+ conn.commit();
+ stmt = upsertStmt(conn, tableName);
+ stmt.setString(1, "b");
+ s = new String[] { "xyz", "def", "ghi", "jkll", null, null, "xxx" };
+ array = conn.createArrayOf("VARCHAR", s);
+ stmt.setArray(2, array);
+ s = new String[] { "zya", "def", "ghi", "jkll", null, null, null, "xxx" };
+ array = conn.createArrayOf("VARCHAR", s);
+ stmt.setArray(3, array);
+ stmt.execute();
+ conn.commit();
+ stmt = upsertStmt(conn, tableName);
+ stmt.setString(1, "c");
+ s = new String[] { "xyz", "def", "ghi", "jkll", null, null, "xxx" };
+ array = conn.createArrayOf("VARCHAR", s);
+ stmt.setArray(2, array);
+ s = new String[] { "zya", "def", "ghi", "jkll", null, null, null, "xxx" };
+ array = conn.createArrayOf("VARCHAR", s);
+ stmt.setArray(3, array);
+ stmt.execute();
+ conn.commit();
+ stmt = upsertStmt(conn, tableName);
+ stmt.setString(1, "d");
+ s = new String[] { "xyz", "def", "ghi", "jkll", null, null, "xxx" };
+ array = conn.createArrayOf("VARCHAR", s);
+ stmt.setArray(2, array);
+ s = new String[] { "zya", "def", "ghi", "jkll", null, null, null, "xxx" };
+ array = conn.createArrayOf("VARCHAR", s);
+ stmt.setArray(3, array);
+ stmt.execute();
+ conn.commit();
+ stmt = upsertStmt(conn, tableName);
+ stmt.setString(1, "b");
+ s = new String[] { "xyz", "def", "ghi", "jkll", null, null, "xxx" };
+ array = conn.createArrayOf("VARCHAR", s);
+ stmt.setArray(2, array);
+ s = new String[] { "zya", "def", "ghi", "jkll", null, null, null, "xxx" };
+ array = conn.createArrayOf("VARCHAR", s);
+ stmt.setArray(3, array);
+ stmt.execute();
+ conn.commit();
+ stmt = upsertStmt(conn, tableName);
+ stmt.setString(1, "e");
+ s = new String[] { "xyz", "def", "ghi", "jkll", null, null, "xxx" };
+ array = conn.createArrayOf("VARCHAR", s);
+ stmt.setArray(2, array);
+ s = new String[] { "zya", "def", "ghi", "jkll", null, null, null, "xxx" };
+ array = conn.createArrayOf("VARCHAR", s);
+ stmt.setArray(3, array);
+ stmt.execute();
+ conn.commit();
+ return conn;
+ }
+
+ private PreparedStatement upsertStmt(Connection conn, String tableName) throws SQLException {
+ PreparedStatement stmt;
+ stmt = conn.prepareStatement("UPSERT INTO " + tableName + " VALUES(?,?,?)");
+ return stmt;
+ }
+
+ private void compactTable(Connection conn, String tableName) throws Exception {
+ TestUtil.doMajorCompaction(conn, tableName);
+ }
+
+ @Test
+ @Ignore //TODO remove this once https://issues.apache.org/jira/browse/TEPHRA-208 is fixed
+ public void testCompactUpdatesStats() throws Exception {
+ testCompactUpdatesStats(0, fullTableName);
+ }
+
+ @Test
+ @Ignore //TODO remove this once https://issues.apache.org/jira/browse/TEPHRA-208 is fixed
+ public void testCompactUpdatesStatsWithMinStatsUpdateFreq() throws Exception {
+ testCompactUpdatesStats(QueryServicesOptions.DEFAULT_STATS_UPDATE_FREQ_MS, fullTableName);
+ }
+
+ private static void invalidateStats(Connection conn, String tableName) throws SQLException {
+ PTable ptable = conn.unwrap(PhoenixConnection.class)
+ .getMetaDataCache().getTableRef(new PTableKey(null, tableName))
+ .getTable();
+ byte[] name = ptable.getPhysicalName().getBytes();
+ conn.unwrap(PhoenixConnection.class).getQueryServices().invalidateStats(new GuidePostsKey(name, SchemaUtil.getEmptyColumnFamily(ptable)));
+ }
+
+ private void testCompactUpdatesStats(Integer statsUpdateFreq, String tableName) throws Exception {
+ int nRows = 10;
+ Connection conn = getConnection(statsUpdateFreq);
+ PreparedStatement stmt;
+ conn.createStatement().execute("CREATE TABLE " + tableName + "(k CHAR(1) PRIMARY KEY, v INTEGER, w INTEGER) "
+ + (!tableDDLOptions.isEmpty() ? tableDDLOptions + "," : "")
+ + HColumnDescriptor.KEEP_DELETED_CELLS + "=" + Boolean.FALSE);
+ stmt = conn.prepareStatement("UPSERT INTO " + tableName + " VALUES(?,?,?)");
+ for (int i = 0; i < nRows; i++) {
+ stmt.setString(1, Character.toString((char) ('a' + i)));
+ stmt.setInt(2, i);
+ stmt.setInt(3, i);
+ stmt.executeUpdate();
+ }
+ conn.commit();
+
+ compactTable(conn, physicalTableName);
+
+ if (statsUpdateFreq != 0) {
+ invalidateStats(conn, tableName);
+ } else {
+ // Confirm that when we have a non zero STATS_UPDATE_FREQ_MS_ATTRIB, after we run
+ // UPDATATE STATISTICS, the new statistics are faulted in as expected.
+ List<KeyRange>keyRanges = getAllSplits(conn, tableName);
+ assertNotEquals(nRows+1, keyRanges.size());
+ // If we've set MIN_STATS_UPDATE_FREQ_MS_ATTRIB, an UPDATE STATISTICS will invalidate the cache
+ // and forcing the new stats to be pulled over.
+ int rowCount = conn.createStatement().executeUpdate("UPDATE STATISTICS " + tableName);
+ assertEquals(10, rowCount);
+ }
+ List<KeyRange>keyRanges = getAllSplits(conn, tableName);
+ assertEquals(nRows+1, keyRanges.size());
+
+ int nDeletedRows = conn.createStatement().executeUpdate("DELETE FROM " + tableName + " WHERE V < " + nRows / 2);
+ conn.commit();
+ assertEquals(5, nDeletedRows);
+
+ Scan scan = new Scan();
+ scan.setRaw(true);
+ PhoenixConnection phxConn = conn.unwrap(PhoenixConnection.class);
+ try (HTableInterface htable = phxConn.getQueryServices().getTable(Bytes.toBytes(tableName))) {
+ ResultScanner scanner = htable.getScanner(scan);
+ Result result;
+ while ((result = scanner.next())!=null) {
+ System.out.println(result);
+ }
+ }
+
+ compactTable(conn, physicalTableName);
+
+ scan = new Scan();
+ scan.setRaw(true);
+ phxConn = conn.unwrap(PhoenixConnection.class);
+ try (HTableInterface htable = phxConn.getQueryServices().getTable(Bytes.toBytes(tableName))) {
+ ResultScanner scanner = htable.getScanner(scan);
+ Result result;
+ while ((result = scanner.next())!=null) {
+ System.out.println(result);
+ }
+ }
+
+ if (statsUpdateFreq != 0) {
+ invalidateStats(conn, tableName);
+ } else {
+ assertEquals(nRows+1, keyRanges.size());
+ // If we've set STATS_UPDATE_FREQ_MS_ATTRIB, an UPDATE STATISTICS will invalidate the cache
+ // and force us to pull over the new stats
+ int rowCount = conn.createStatement().executeUpdate("UPDATE STATISTICS " + tableName);
+ assertEquals(5, rowCount);
+ }
+ keyRanges = getAllSplits(conn, tableName);
+ assertEquals(nRows/2+1, keyRanges.size());
+ ResultSet rs = conn.createStatement().executeQuery("SELECT SUM(GUIDE_POSTS_ROW_COUNT) FROM "
+ + "\""+ SYSTEM_CATALOG_SCHEMA + "\".\"" + SYSTEM_STATS_TABLE + "\"" + " WHERE PHYSICAL_NAME='" + physicalTableName + "'");
+ rs.next();
+ assertEquals(nRows - nDeletedRows, rs.getLong(1));
+ }
+
+ @Test
+ public void testWithMultiCF() throws Exception {
+ int nRows = 20;
+ Connection conn = getConnection(0);
+ PreparedStatement stmt;
+ conn.createStatement().execute(
+ "CREATE TABLE " + fullTableName
+ + "(k VARCHAR PRIMARY KEY, a.v INTEGER, b.v INTEGER, c.v INTEGER NULL, d.v INTEGER NULL) "
+ + tableDDLOptions );
+ stmt = conn.prepareStatement("UPSERT INTO " + fullTableName + " VALUES(?,?, ?, ?, ?)");
+ byte[] val = new byte[250];
+ for (int i = 0; i < nRows; i++) {
+ stmt.setString(1, Character.toString((char)('a' + i)) + Bytes.toString(val));
+ stmt.setInt(2, i);
+ stmt.setInt(3, i);
+ stmt.setInt(4, i);
+ stmt.setInt(5, i);
+ stmt.executeUpdate();
+ }
+ conn.commit();
+ stmt = conn.prepareStatement("UPSERT INTO " + fullTableName + "(k, c.v, d.v) VALUES(?,?,?)");
+ for (int i = 0; i < 5; i++) {
+ stmt.setString(1, Character.toString((char)('a' + 'z' + i)) + Bytes.toString(val));
+ stmt.setInt(2, i);
+ stmt.setInt(3, i);
+ stmt.executeUpdate();
+ }
+ conn.commit();
+
+ ResultSet rs;
+ TestUtil.analyzeTable(conn, fullTableName);
+ List<KeyRange> keyRanges = getAllSplits(conn, fullTableName);
+ assertEquals(26, keyRanges.size());
+ rs = conn.createStatement().executeQuery("EXPLAIN SELECT * FROM " + fullTableName);
+ assertEquals("CLIENT 26-CHUNK 25 ROWS " + (columnEncoded ? ( mutable ? "12530" : "13902" ) : "12420") + " BYTES PARALLEL 1-WAY FULL SCAN OVER " + physicalTableName,
+ QueryUtil.getExplainPlan(rs));
+
+ ConnectionQueryServices services = conn.unwrap(PhoenixConnection.class).getQueryServices();
+ List<HRegionLocation> regions = services.getAllTableRegions(Bytes.toBytes(physicalTableName));
+ assertEquals(1, regions.size());
+
+ TestUtil.analyzeTable(conn, fullTableName);
+ String query = "UPDATE STATISTICS " + fullTableName + " SET \""
+ + QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB + "\"=" + Long.toString(1000);
+ conn.createStatement().execute(query);
+ keyRanges = getAllSplits(conn, fullTableName);
+ boolean oneCellPerColFamliyStorageScheme = !mutable && columnEncoded;
+ assertEquals(oneCellPerColFamliyStorageScheme ? 13 : 12, keyRanges.size());
+
+ rs = conn
+ .createStatement()
+ .executeQuery(
+ "SELECT COLUMN_FAMILY,SUM(GUIDE_POSTS_ROW_COUNT),SUM(GUIDE_POSTS_WIDTH),COUNT(*) from \"SYSTEM\".STATS where PHYSICAL_NAME = '"
+ + physicalTableName + "' GROUP BY COLUMN_FAMILY ORDER BY COLUMN_FAMILY");
+
+ assertTrue(rs.next());
+ assertEquals("A", rs.getString(1));
+ assertEquals(24, rs.getInt(2));
+ assertEquals(columnEncoded ? ( mutable ? 12252 : 13624 ) : 12144, rs.getInt(3));
+ assertEquals(oneCellPerColFamliyStorageScheme ? 12 : 11, rs.getInt(4));
+
+ assertTrue(rs.next());
+ assertEquals("B", rs.getString(1));
+ assertEquals(oneCellPerColFamliyStorageScheme ? 24 : 20, rs.getInt(2));
+ assertEquals(columnEncoded ? ( mutable ? 5600 : 6972 ) : 5540, rs.getInt(3));
+ assertEquals(oneCellPerColFamliyStorageScheme ? 6 : 5, rs.getInt(4));
+
+ assertTrue(rs.next());
+ assertEquals("C", rs.getString(1));
+ assertEquals(24, rs.getInt(2));
+ assertEquals(columnEncoded ? ( mutable ? 6724 : 6988 ) : 6652, rs.getInt(3));
+ assertEquals(6, rs.getInt(4));
+
+ assertTrue(rs.next());
+ assertEquals("D", rs.getString(1));
+ assertEquals(24, rs.getInt(2));
+ assertEquals(columnEncoded ? ( mutable ? 6724 : 6988 ) : 6652, rs.getInt(3));
+ assertEquals(6, rs.getInt(4));
+
+ assertFalse(rs.next());
+
+ // Disable stats
+ conn.createStatement().execute("ALTER TABLE " + fullTableName +
+ " SET " + PhoenixDatabaseMetaData.GUIDE_POSTS_WIDTH + "=0");
+ TestUtil.analyzeTable(conn, fullTableName);
+ // Assert that there are no more guideposts
+ rs = conn.createStatement().executeQuery("SELECT count(1) FROM " + PhoenixDatabaseMetaData.SYSTEM_STATS_NAME +
+ " WHERE " + PhoenixDatabaseMetaData.PHYSICAL_NAME + "='" + physicalTableName + "' AND " + PhoenixDatabaseMetaData.COLUMN_FAMILY + " IS NOT NULL");
+ assertTrue(rs.next());
+ assertEquals(0, rs.getLong(1));
+ assertFalse(rs.next());
+ rs = conn.createStatement().executeQuery("EXPLAIN SELECT * FROM " + fullTableName);
+ assertEquals("CLIENT 1-CHUNK PARALLEL 1-WAY FULL SCAN OVER " + physicalTableName,
+ QueryUtil.getExplainPlan(rs));
+ }
+
+ @Test
+ public void testRowCountAndByteCounts() throws SQLException {
+ Connection conn = getConnection();
+ String ddl = "CREATE TABLE " + fullTableName + " (t_id VARCHAR NOT NULL,\n" + "k1 INTEGER NOT NULL,\n"
+ + "k2 INTEGER NOT NULL,\n" + "C3.k3 INTEGER,\n" + "C2.v1 VARCHAR,\n"
+ + "CONSTRAINT pk PRIMARY KEY (t_id, k1, k2)) " + tableDDLOptions + " split on ('e','j','o')";
+ conn.createStatement().execute(ddl);
+ String[] strings = { "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r",
+ "s", "t", "u", "v", "w", "x", "y", "z" };
+ for (int i = 0; i < 26; i++) {
+ conn.createStatement().execute(
+ "UPSERT INTO " + fullTableName + " values('" + strings[i] + "'," + i + "," + (i + 1) + ","
+ + (i + 2) + ",'" + strings[25 - i] + "')");
+ }
+ conn.commit();
+ ResultSet rs;
+ String query = "UPDATE STATISTICS " + fullTableName + " SET \""
+ + QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB + "\"=" + Long.toString(20);
+ conn.createStatement().execute(query);
+ Random r = new Random();
+ int count = 0;
+ while (count < 4) {
+ int startIndex = r.nextInt(strings.length);
+ int endIndex = r.nextInt(strings.length - startIndex) + startIndex;
+ long rows = endIndex - startIndex;
+ long c2Bytes = rows * (columnEncoded ? ( mutable ? 37 : 48 ) : 35);
+ String physicalTableName = SchemaUtil.getPhysicalTableName(Bytes.toBytes(fullTableName), userTableNamespaceMapped).toString();
+ rs = conn.createStatement().executeQuery(
+ "SELECT COLUMN_FAMILY,SUM(GUIDE_POSTS_ROW_COUNT),SUM(GUIDE_POSTS_WIDTH) from \"SYSTEM\".STATS where PHYSICAL_NAME = '"
+ + physicalTableName + "' AND GUIDE_POST_KEY>= cast('" + strings[startIndex]
+ + "' as varbinary) AND GUIDE_POST_KEY<cast('" + strings[endIndex]
+ + "' as varbinary) and COLUMN_FAMILY='C2' group by COLUMN_FAMILY");
+ if (startIndex < endIndex) {
+ assertTrue(rs.next());
+ assertEquals("C2", rs.getString(1));
+ assertEquals(rows, rs.getLong(2));
+ assertEquals(c2Bytes, rs.getLong(3));
+ count++;
+ }
+ }
+ }
+
+ @Test
+ public void testRowCountWhenNumKVsExceedCompactionScannerThreshold() throws Exception {
+ String tableName = generateUniqueName();
+ StringBuilder sb = new StringBuilder(200);
+ sb.append("CREATE TABLE " + tableName + "(PK1 VARCHAR NOT NULL, ");
+ int numRows = 10;
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ int compactionScannerKVThreshold =
+ conn.unwrap(PhoenixConnection.class).getQueryServices().getConfiguration()
+ .getInt(HConstants.COMPACTION_KV_MAX,
+ HConstants.COMPACTION_KV_MAX_DEFAULT);
+ int numKvColumns = compactionScannerKVThreshold * 2;
+ for (int i = 1; i <= numKvColumns; i++) {
+ sb.append("KV" + i + " VARCHAR");
+ if (i < numKvColumns) {
+ sb.append(", ");
+ }
+ }
+ sb.append(" CONSTRAINT PK PRIMARY KEY (PK1))");
+ String ddl = sb.toString();
+ conn.createStatement().execute(ddl);
+ sb = new StringBuilder(200);
+ sb.append("UPSERT INTO " + tableName + " VALUES (");
+ for (int i = 1; i <= numKvColumns + 1; i++) {
+ sb.append("?");
+ if (i < numKvColumns + 1) {
+ sb.append(", ");
+ }
+ }
+ sb.append(")");
+ String dml = sb.toString();
+ PreparedStatement stmt = conn.prepareStatement(dml);
+ String keyValue = "KVVVVVV";
+ for (int j = 1; j <= numRows; j++) {
+ for (int i = 1; i <= numKvColumns + 1; i++) {
+ if (i == 1) {
+ stmt.setString(1, "" + j);
+ } else {
+ stmt.setString(i, keyValue);
+ }
+ }
+ stmt.executeUpdate();
+ }
+ conn.commit();
+ conn.createStatement().execute("UPDATE STATISTICS " + tableName);
+ String q = "SELECT SUM(GUIDE_POSTS_ROW_COUNT) FROM SYSTEM.STATS WHERE PHYSICAL_NAME = '" + tableName + "'";
+ ResultSet rs = conn.createStatement().executeQuery(q);
+ rs.next();
+ assertEquals("Number of expected rows in stats table after update stats didn't match!", numRows, rs.getInt(1));
+ conn.createStatement().executeUpdate("DELETE FROM SYSTEM.STATS WHERE PHYSICAL_NAME = '" + tableName + "'");
+ conn.commit();
+ TestUtil.doMajorCompaction(conn, tableName);
+ q = "SELECT SUM(GUIDE_POSTS_ROW_COUNT) FROM SYSTEM.STATS WHERE PHYSICAL_NAME = '" + tableName + "'";
+ rs = conn.createStatement().executeQuery(q);
+ rs.next();
+ assertEquals("Number of expected rows in stats table after major compaction didn't match", numRows, rs.getInt(1));
+ }
+ }
+
+ @Test
+ public void testEmptyGuidePostGeneratedWhenDataSizeLessThanGPWidth() throws Exception {
+ String tableName = generateUniqueName();
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ long guidePostWidth = 20000000;
+ conn.createStatement()
+ .execute("CREATE TABLE " + tableName
+ + " ( k INTEGER, c1.a bigint,c2.b bigint CONSTRAINT pk PRIMARY KEY (k)) GUIDE_POSTS_WIDTH="
+ + guidePostWidth + ", SALT_BUCKETS = 4");
+ conn.createStatement().execute("upsert into " + tableName + " values (100,1,3)");
+ conn.createStatement().execute("upsert into " + tableName + " values (101,2,4)");
+ conn.commit();
+ conn.createStatement().execute("UPDATE STATISTICS " + tableName);
+ ConnectionQueryServices queryServices =
+ conn.unwrap(PhoenixConnection.class).getQueryServices();
+ try (HTableInterface statsHTable =
+ queryServices.getTable(
+ SchemaUtil.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_STATS_NAME_BYTES,
+ queryServices.getProps()).getName())) {
+ GuidePostsInfo gps =
+ StatisticsUtil.readStatistics(statsHTable,
+ new GuidePostsKey(Bytes.toBytes(tableName), Bytes.toBytes("C1")),
+ HConstants.LATEST_TIMESTAMP);
+ assertTrue(gps.isEmptyGuidePost());
+ assertEquals(guidePostWidth, gps.getByteCounts()[0]);
+ assertTrue(gps.getGuidePostTimestamps()[0] > 0);
+ gps =
+ StatisticsUtil.readStatistics(statsHTable,
+ new GuidePostsKey(Bytes.toBytes(tableName), Bytes.toBytes("C2")),
+ HConstants.LATEST_TIMESTAMP);
+ assertTrue(gps.isEmptyGuidePost());
+ assertEquals(guidePostWidth, gps.getByteCounts()[0]);
+ assertTrue(gps.getGuidePostTimestamps()[0] > 0);
+ }
+ }
+ }
+
+ @Test
+ public void testGuidePostWidthUsedInDefaultStatsCollector() throws Exception {
+ String baseTable = generateUniqueName();
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ String ddl =
+ "CREATE TABLE " + baseTable
+ + " (k INTEGER PRIMARY KEY, a bigint, b bigint, c bigint) "
+ + tableDDLOptions;
+ BaseTest.createTestTable(getUrl(), ddl, null, null);
+ conn.createStatement().execute("upsert into " + baseTable + " values (100,1,1,1)");
+ conn.createStatement().execute("upsert into " + baseTable + " values (101,2,2,2)");
+ conn.createStatement().execute("upsert into " + baseTable + " values (102,3,3,3)");
+ conn.createStatement().execute("upsert into " + baseTable + " values (103,4,4,4)");
+ conn.createStatement().execute("upsert into " + baseTable + " values (104,5,5,5)");
+ conn.createStatement().execute("upsert into " + baseTable + " values (105,6,6,6)");
+ conn.createStatement().execute("upsert into " + baseTable + " values (106,7,7,7)");
+ conn.createStatement().execute("upsert into " + baseTable + " values (107,8,8,8)");
+ conn.createStatement().execute("upsert into " + baseTable + " values (108,9,9,9)");
+ conn.createStatement().execute("upsert into " + baseTable + " values (109,10,10,10)");
+ conn.commit();
+ DefaultStatisticsCollector statsCollector = getDefaultStatsCollectorForTable(baseTable);
+ statsCollector.init();
+ assertEquals(defaultGuidePostWidth, statsCollector.getGuidePostDepth());
+
+ // ok let's create a global index now and see what guide post width is used for it
+ String globalIndex = "GI_" + generateUniqueName();
+ ddl = "CREATE INDEX " + globalIndex + " ON " + baseTable + " (a) INCLUDE (b) ";
+ conn.createStatement().execute(ddl);
+ statsCollector = getDefaultStatsCollectorForTable(globalIndex);
+ statsCollector.init();
+ assertEquals(defaultGuidePostWidth, statsCollector.getGuidePostDepth());
+
+ // let's check out local index too
+ String localIndex = "LI_" + generateUniqueName();
+ ddl = "CREATE LOCAL INDEX " + localIndex + " ON " + baseTable + " (b) INCLUDE (c) ";
+ conn.createStatement().execute(ddl);
+ // local indexes reside on the same table as base data table
+ statsCollector = getDefaultStatsCollectorForTable(baseTable);
+ statsCollector.init();
+ assertEquals(defaultGuidePostWidth, statsCollector.getGuidePostDepth());
+
+ // now let's create a view and an index on it and see what guide post width is used for
+ // it
+ String view = "V_" + generateUniqueName();
+ ddl = "CREATE VIEW " + view + " AS SELECT * FROM " + baseTable;
+ conn.createStatement().execute(ddl);
+ String viewIndex = "VI_" + generateUniqueName();
+ ddl = "CREATE INDEX " + viewIndex + " ON " + view + " (b)";
+ conn.createStatement().execute(ddl);
+ String viewIndexTableName = MetaDataUtil.getViewIndexTableName(baseTable);
+ statsCollector = getDefaultStatsCollectorForTable(viewIndexTableName);
+ statsCollector.init();
+ assertEquals(defaultGuidePostWidth, statsCollector.getGuidePostDepth());
+ /*
+ * Fantastic! Now let's change the guide post width of the base table. This should
+ * change the guide post width we are using in DefaultStatisticsCollector for all
+ * indexes too.
+ */
+ long newGpWidth = 500;
+ conn.createStatement()
+ .execute("ALTER TABLE " + baseTable + " SET GUIDE_POSTS_WIDTH=" + newGpWidth);
+
+ // base table
+ statsCollector = getDefaultStatsCollectorForTable(baseTable);
+ statsCollector.init();
+ assertEquals(newGpWidth, statsCollector.getGuidePostDepth());
+
+ // global index table
+ statsCollector = getDefaultStatsCollectorForTable(globalIndex);
+ statsCollector.init();
+ assertEquals(newGpWidth, statsCollector.getGuidePostDepth());
+
+ // view index table
+ statsCollector = getDefaultStatsCollectorForTable(viewIndexTableName);
+ statsCollector.init();
+ assertEquals(newGpWidth, statsCollector.getGuidePostDepth());
+ }
+ }
+
+ private DefaultStatisticsCollector getDefaultStatsCollectorForTable(String tableName)
+ throws Exception {
+ RegionCoprocessorEnvironment env = getRegionEnvrionment(tableName);
+ return (DefaultStatisticsCollector) StatisticsCollectorFactory
+ .createStatisticsCollector(env, tableName, System.currentTimeMillis(), null, null);
+ }
+
+ private RegionCoprocessorEnvironment getRegionEnvrionment(String tableName)
+ throws IOException, InterruptedException {
+ return (RegionCoprocessorEnvironment) getUtility()
+ .getRSForFirstRegionInTable(TableName.valueOf(tableName))
+ .getOnlineRegions(TableName.valueOf(tableName)).get(0).getCoprocessorHost()
+ .findCoprocessorEnvironment(UngroupedAggregateRegionObserver.class.getName());
+ }
+}
[37/37] phoenix git commit: PHOENIX-4351 Add i18n-util to bin LICENSE
file and to dependencyManagement
Posted by ja...@apache.org.
PHOENIX-4351 Add i18n-util to bin LICENSE file and to dependencyManagement
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/d200b516
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/d200b516
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/d200b516
Branch: refs/heads/4.x-HBase-1.1
Commit: d200b51658b028950b0768df85521f79aeb6a951
Parents: b115f9b
Author: Josh Elser <el...@apache.org>
Authored: Mon Nov 6 15:21:35 2017 -0500
Committer: James Taylor <jt...@salesforce.com>
Committed: Wed Nov 15 10:46:42 2017 -0800
----------------------------------------------------------------------
dev/release_files/LICENSE | 2 ++
phoenix-core/pom.xml | 3 +--
pom.xml | 5 +++++
3 files changed, 8 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/d200b516/dev/release_files/LICENSE
----------------------------------------------------------------------
diff --git a/dev/release_files/LICENSE b/dev/release_files/LICENSE
index a72ce86..0fd0255 100644
--- a/dev/release_files/LICENSE
+++ b/dev/release_files/LICENSE
@@ -254,6 +254,8 @@ Janino Compiler (https://github.com/janino-compiler/janino)
Hamcrest-core 1.3 (http://www.hamcrest.org) Copyright (c) 2000-2006, www.hamcrest.org
+i18n-util 1.0.1 (https://github.com/salesforce/i18n-util) Copyright (c) 2017, Salesforce.com, Inc. All rights reserved.
+
---
This product bundles the following products which are licensed with
http://git-wip-us.apache.org/repos/asf/phoenix/blob/d200b516/phoenix-core/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-core/pom.xml b/phoenix-core/pom.xml
index 0bdcc07..93fc70b 100644
--- a/phoenix-core/pom.xml
+++ b/phoenix-core/pom.xml
@@ -471,10 +471,9 @@
<artifactId>stream</artifactId>
<version>${stream.version}</version>
</dependency>
- <dependency>
+ <dependency>
<groupId>com.salesforce.i18n</groupId>
<artifactId>i18n-util</artifactId>
- <version>1.0.1</version>
</dependency>
</dependencies>
</project>
http://git-wip-us.apache.org/repos/asf/phoenix/blob/d200b516/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 933e710..48bfa16 100644
--- a/pom.xml
+++ b/pom.xml
@@ -927,6 +927,11 @@
<artifactId>stream</artifactId>
<version>${stream.version}</version>
</dependency>
+ <dependency>
+ <groupId>com.salesforce.i18n</groupId>
+ <artifactId>i18n-util</artifactId>
+ <version>1.0.1</version>
+ </dependency>
</dependencies>
</dependencyManagement>
[26/37] phoenix git commit: PHOENIX-4332 Indexes should inherit guide
post width of the base data table
Posted by ja...@apache.org.
http://git-wip-us.apache.org/repos/asf/phoenix/blob/730f9588/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java
index daf7c70..788e2dd 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java
@@ -18,6 +18,8 @@
package org.apache.phoenix.schema.stats;
import java.io.IOException;
+import java.sql.Connection;
+import java.sql.SQLException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
@@ -45,15 +47,22 @@ import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
import org.apache.phoenix.query.QueryServices;
import org.apache.phoenix.query.QueryServicesOptions;
+import org.apache.phoenix.schema.PName;
+import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTable.IndexType;
+import org.apache.phoenix.schema.PTableType;
import org.apache.phoenix.schema.SortOrder;
import org.apache.phoenix.schema.types.PInteger;
import org.apache.phoenix.schema.types.PLong;
import org.apache.phoenix.util.EnvironmentEdgeManager;
import org.apache.phoenix.util.MetaDataUtil;
+import org.apache.phoenix.util.PhoenixRuntime;
+import org.apache.phoenix.util.QueryUtil;
import org.apache.phoenix.util.SchemaUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.Maps;
/**
@@ -75,6 +84,7 @@ class DefaultStatisticsCollector implements StatisticsCollector {
private ImmutableBytesWritable currentRow;
private final long clientTimeStamp;
private final String tableName;
+ private final boolean isViewIndexTable;
DefaultStatisticsCollector(RegionCoprocessorEnvironment env, String tableName, long clientTimeStamp, byte[] family,
byte[] gp_width_bytes, byte[] gp_per_region_bytes) throws IOException {
@@ -95,6 +105,9 @@ class DefaultStatisticsCollector implements StatisticsCollector {
// since there's no row representing those in SYSTEM.CATALOG.
if (MetaDataUtil.isViewIndex(tableName)) {
pName = MetaDataUtil.getViewIndexUserTableName(tableName);
+ isViewIndexTable = true;
+ } else {
+ isViewIndexTable = false;
}
ptableKey = SchemaUtil.getTableKeyFromFullName(pName);
this.clientTimeStamp = clientTimeStamp;
@@ -109,7 +122,7 @@ class DefaultStatisticsCollector implements StatisticsCollector {
}
}
- private void initGuidepostDepth() throws IOException {
+ private void initGuidepostDepth() throws IOException, ClassNotFoundException, SQLException {
// First check is if guidepost info set on statement itself
if (guidePostPerRegionBytes != null || guidePostWidthBytes != null) {
int guidepostPerRegion = 0;
@@ -135,6 +148,38 @@ class DefaultStatisticsCollector implements StatisticsCollector {
if (!result.isEmpty()) {
Cell cell = result.listCells().get(0);
guidepostWidth = PLong.INSTANCE.getCodec().decodeLong(cell.getValueArray(), cell.getValueOffset(), SortOrder.getDefault());
+ } else if (!isViewIndexTable) {
+ /*
+ * The table we are collecting stats for is potentially a base table, or local
+ * index or a global index. For view indexes, we rely on the the guide post
+ * width column in the parent data table's metadata which we already tried
+ * retrieving above.
+ */
+ try (Connection conn =
+ QueryUtil.getConnectionOnServer(env.getConfiguration())) {
+ PTable table = PhoenixRuntime.getTable(conn, tableName);
+ if (table.getType() == PTableType.INDEX
+ && table.getIndexType() == IndexType.GLOBAL) {
+ /*
+ * For global indexes, we need to get the parentName first and then
+ * fetch guide post width configured for the parent table.
+ */
+ PName parentName = table.getParentName();
+ byte[] parentKey =
+ SchemaUtil.getTableKeyFromFullName(parentName.getString());
+ get = new Get(parentKey);
+ get.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+ PhoenixDatabaseMetaData.GUIDE_POSTS_WIDTH_BYTES);
+ result = htable.get(get);
+ if (!result.isEmpty()) {
+ Cell cell = result.listCells().get(0);
+ guidepostWidth =
+ PLong.INSTANCE.getCodec().decodeLong(cell.getValueArray(),
+ cell.getValueOffset(), SortOrder.getDefault());
+ }
+ }
+ }
+
}
} finally {
if (htable != null) {
@@ -318,7 +363,11 @@ class DefaultStatisticsCollector implements StatisticsCollector {
@Override
public void init() throws IOException {
- initGuidepostDepth();
+ try {
+ initGuidepostDepth();
+ } catch (ClassNotFoundException | SQLException e) {
+ throw new IOException("Unable to initialize the guide post depth", e);
+ }
this.statsWriter = StatisticsWriter.newWriter(env, tableName, clientTimeStamp, guidePostDepth);
}
@@ -331,4 +380,9 @@ class DefaultStatisticsCollector implements StatisticsCollector {
return null;
}
+ @VisibleForTesting // Don't call this method anywhere else
+ public long getGuidePostDepth() {
+ return guidePostDepth;
+ }
+
}
[25/37] phoenix git commit: PHOENIX-3460 Namespace separator : should
not be allowed in table or schema name
Posted by ja...@apache.org.
PHOENIX-3460 Namespace separator : should not be allowed in table or schema name
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/474bc186
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/474bc186
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/474bc186
Branch: refs/heads/4.x-HBase-1.1
Commit: 474bc1866985e85c5647d8ce95a439b452a31301
Parents: 730f958
Author: Thomas D'Silva <td...@apache.org>
Authored: Thu Nov 2 12:08:07 2017 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Wed Nov 15 10:46:40 2017 -0800
----------------------------------------------------------------------
phoenix-core/src/main/antlr3/PhoenixSQL.g | 15 ++++++++++----
.../apache/phoenix/parse/QueryParserTest.java | 21 ++++++++++++++++++++
2 files changed, 32 insertions(+), 4 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/474bc186/phoenix-core/src/main/antlr3/PhoenixSQL.g
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/antlr3/PhoenixSQL.g b/phoenix-core/src/main/antlr3/PhoenixSQL.g
index 721b514..93e0ede 100644
--- a/phoenix-core/src/main/antlr3/PhoenixSQL.g
+++ b/phoenix-core/src/main/antlr3/PhoenixSQL.g
@@ -1071,14 +1071,21 @@ cursor_name returns [CursorName ret]
// TODO: figure out how not repeat this two times
table_name returns [TableName ret]
- : t=identifier {$ret = factory.table(null, t); }
- | s=identifier DOT t=identifier {$ret = factory.table(s, t); }
+ : t=table_identifier {$ret = factory.table(null, t); }
+ | s=table_identifier DOT t=table_identifier {$ret = factory.table(s, t); }
;
// TODO: figure out how not repeat this two times
from_table_name returns [TableName ret]
- : t=identifier {$ret = factory.table(null, t); }
- | s=identifier DOT t=identifier {$ret = factory.table(s, t); }
+ : t=table_identifier {$ret = factory.table(null, t); }
+ | s=table_identifier DOT t=table_identifier {$ret = factory.table(s, t); }
+ ;
+
+table_identifier returns [String ret]
+ : c=identifier {
+ if (c.contains(QueryConstants.NAMESPACE_SEPARATOR) ) { throw new RuntimeException("Table or schema name cannot contain colon"); }
+ $ret = c;
+ }
;
// The lowest level function, which includes literals, binds, but also parenthesized expressions, functions, and case statements.
http://git-wip-us.apache.org/repos/asf/phoenix/blob/474bc186/phoenix-core/src/test/java/org/apache/phoenix/parse/QueryParserTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/parse/QueryParserTest.java b/phoenix-core/src/test/java/org/apache/phoenix/parse/QueryParserTest.java
index e7127b7..431f60b 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/parse/QueryParserTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/parse/QueryParserTest.java
@@ -29,6 +29,7 @@ import java.sql.SQLFeatureNotSupportedException;
import java.util.List;
import org.apache.hadoop.hbase.util.Pair;
+import org.apache.phoenix.exception.PhoenixParserException;
import org.apache.phoenix.exception.SQLExceptionCode;
import org.apache.phoenix.jdbc.PhoenixStatement.Operation;
import org.apache.phoenix.schema.SortOrder;
@@ -56,6 +57,15 @@ public class QueryParserTest {
assertEquals("Expected equality:\n" + sql + "\n" + newSQL, stmt, newStmt);
}
+ private void parseQueryThatShouldFail(String sql) throws Exception {
+ try {
+ parseQuery(sql);
+ fail("Query should throw a PhoenixParserException \n " + sql);
+ }
+ catch (PhoenixParserException e){
+ }
+ }
+
@Test
public void testParsePreQuery0() throws Exception {
String sql = ((
@@ -782,4 +792,15 @@ public class QueryParserTest {
String sql = Joiner.on(unicodeEnSpace).join(new String[] {"SELECT", "*", "FROM", "T"});
parseQuery(sql);
}
+
+ @Test
+ public void testInvalidTableOrSchemaName() throws Exception {
+ // namespace separator (:) cannot be used
+ parseQueryThatShouldFail("create table a:b (id varchar not null primary key)");
+ parseQueryThatShouldFail("create table \"a:b\" (id varchar not null primary key)");
+ // name separator (.) cannot be used without double quotes
+ parseQueryThatShouldFail("create table a.b.c.d (id varchar not null primary key)");
+ parseQuery("create table \"a.b\".\"c.d\" (id varchar not null primary key)");
+ parseQuery("create table \"a.b.c.d\" (id varchar not null primary key)");
+ }
}
[28/37] phoenix git commit: PHOENIX-4287 Incorrect aggregate query
results when stats are disable for parallelization
Posted by ja...@apache.org.
PHOENIX-4287 Incorrect aggregate query results when stats are disable for parallelization
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/cba2b571
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/cba2b571
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/cba2b571
Branch: refs/heads/4.x-HBase-1.1
Commit: cba2b5719cb39f244f12b79f732233bb9ef6fb4c
Parents: e0df4b2
Author: Samarth Jain <sa...@apache.org>
Authored: Tue Oct 31 10:12:22 2017 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Wed Nov 15 10:46:40 2017 -0800
----------------------------------------------------------------------
.../end2end/ExplainPlanWithStatsEnabledIT.java | 209 ++++++++++++++++++-
.../phoenix/iterate/BaseResultIterators.java | 55 +++--
2 files changed, 246 insertions(+), 18 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/cba2b571/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExplainPlanWithStatsEnabledIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExplainPlanWithStatsEnabledIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExplainPlanWithStatsEnabledIT.java
index 62538af..931c398 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExplainPlanWithStatsEnabledIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExplainPlanWithStatsEnabledIT.java
@@ -20,6 +20,7 @@ package org.apache.phoenix.end2end;
import static org.apache.phoenix.query.QueryServicesOptions.DEFAULT_USE_STATS_FOR_PARALLELIZATION;
import static org.apache.phoenix.util.PhoenixRuntime.TENANT_ID_ATTRIB;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.sql.Connection;
@@ -387,11 +388,8 @@ public class ExplainPlanWithStatsEnabledIT extends ParallelStatsEnabledIT {
@Test
public void testBytesRowsForSelectOnTenantViews() throws Exception {
String tenant1View = generateUniqueName();
- ;
String tenant2View = generateUniqueName();
- ;
String tenant3View = generateUniqueName();
- ;
String multiTenantBaseTable = generateUniqueName();
String tenant1 = "tenant1";
String tenant2 = "tenant2";
@@ -504,6 +502,211 @@ public class ExplainPlanWithStatsEnabledIT extends ParallelStatsEnabledIT {
}
}
+ @Test // See https://issues.apache.org/jira/browse/PHOENIX-4287
+ public void testEstimatesForAggregateQueries() throws Exception {
+ String tableName = generateUniqueName();
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ int guidePostWidth = 20;
+ String ddl =
+ "CREATE TABLE " + tableName + " (k INTEGER PRIMARY KEY, a bigint, b bigint)"
+ + " GUIDE_POSTS_WIDTH=" + guidePostWidth
+ + ", USE_STATS_FOR_PARALLELIZATION=false";
+ byte[][] splits =
+ new byte[][] { Bytes.toBytes(102), Bytes.toBytes(105), Bytes.toBytes(108) };
+ BaseTest.createTestTable(getUrl(), ddl, splits, null);
+ conn.createStatement().execute("upsert into " + tableName + " values (100,1,3)");
+ conn.createStatement().execute("upsert into " + tableName + " values (101,2,4)");
+ conn.createStatement().execute("upsert into " + tableName + " values (102,2,4)");
+ conn.createStatement().execute("upsert into " + tableName + " values (103,2,4)");
+ conn.createStatement().execute("upsert into " + tableName + " values (104,2,4)");
+ conn.createStatement().execute("upsert into " + tableName + " values (105,2,4)");
+ conn.createStatement().execute("upsert into " + tableName + " values (106,2,4)");
+ conn.createStatement().execute("upsert into " + tableName + " values (107,2,4)");
+ conn.createStatement().execute("upsert into " + tableName + " values (108,2,4)");
+ conn.createStatement().execute("upsert into " + tableName + " values (109,2,4)");
+ conn.commit();
+ conn.createStatement().execute("UPDATE STATISTICS " + tableName + "");
+ }
+ List<Object> binds = Lists.newArrayList();
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ String sql = "SELECT COUNT(*) " + " FROM " + tableName;
+ ResultSet rs = conn.createStatement().executeQuery(sql);
+ assertTrue(rs.next());
+ assertEquals(10, rs.getInt(1));
+ Estimate info = getByteRowEstimates(conn, sql, binds);
+ assertEquals((Long) 10l, info.getEstimatedRows());
+ assertTrue(info.getEstimateInfoTs() > 0);
+
+ // Now let's make sure that when using stats for parallelization, our estimates
+ // and query results stay the same
+ conn.createStatement().execute(
+ "ALTER TABLE " + tableName + " SET USE_STATS_FOR_PARALLELIZATION=true");
+ rs = conn.createStatement().executeQuery(sql);
+ assertTrue(rs.next());
+ assertEquals(10, rs.getInt(1));
+ info = getByteRowEstimates(conn, sql, binds);
+ assertEquals((Long) 10l, info.getEstimatedRows());
+ assertTrue(info.getEstimateInfoTs() > 0);
+ }
+ }
+
+ @Test
+ public void testSelectQueriesWithStatsForParallelizationOff() throws Exception {
+ testSelectQueriesWithFilters(false);
+ }
+
+ @Test
+ public void testSelectQueriesWithStatsForParallelizationOn() throws Exception {
+ testSelectQueriesWithFilters(true);
+ }
+
+ private void testSelectQueriesWithFilters(boolean useStatsForParallelization) throws Exception {
+ String tableName = generateUniqueName();
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ int guidePostWidth = 20;
+ String ddl =
+ "CREATE TABLE " + tableName + " (k INTEGER PRIMARY KEY, a bigint, b bigint)"
+ + " GUIDE_POSTS_WIDTH=" + guidePostWidth
+ + ", USE_STATS_FOR_PARALLELIZATION=" + useStatsForParallelization;
+ byte[][] splits =
+ new byte[][] { Bytes.toBytes(102), Bytes.toBytes(105), Bytes.toBytes(108) };
+ BaseTest.createTestTable(getUrl(), ddl, splits, null);
+ conn.createStatement().execute("upsert into " + tableName + " values (100,100,3)");
+ conn.createStatement().execute("upsert into " + tableName + " values (101,101,4)");
+ conn.createStatement().execute("upsert into " + tableName + " values (102,102,4)");
+ conn.createStatement().execute("upsert into " + tableName + " values (103,103,4)");
+ conn.createStatement().execute("upsert into " + tableName + " values (104,104,4)");
+ conn.createStatement().execute("upsert into " + tableName + " values (105,105,4)");
+ conn.createStatement().execute("upsert into " + tableName + " values (106,106,4)");
+ conn.createStatement().execute("upsert into " + tableName + " values (107,107,4)");
+ conn.createStatement().execute("upsert into " + tableName + " values (108,108,4)");
+ conn.createStatement().execute("upsert into " + tableName + " values (109,109,4)");
+ conn.commit();
+ conn.createStatement().execute("UPDATE STATISTICS " + tableName + "");
+ }
+ List<Object> binds = Lists.newArrayList();
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ // query whose start key is before any data
+ String sql = "SELECT a FROM " + tableName + " WHERE K >= 99";
+ ResultSet rs = conn.createStatement().executeQuery(sql);
+ int i = 0;
+ int numRows = 10;
+ while (rs.next()) {
+ assertEquals(100 + i, rs.getInt(1));
+ i++;
+ }
+ assertEquals(numRows, i);
+ Estimate info = getByteRowEstimates(conn, sql, binds);
+ assertEquals((Long) 10l, info.getEstimatedRows());
+ assertEquals((Long) 930l, info.getEstimatedBytes());
+ assertTrue(info.getEstimateInfoTs() > 0);
+
+ // query whose start key is after any data
+ sql = "SELECT a FROM " + tableName + " WHERE K >= 110";
+ rs = conn.createStatement().executeQuery(sql);
+ assertFalse(rs.next());
+ info = getByteRowEstimates(conn, sql, binds);
+ assertEquals((Long) 0l, info.getEstimatedRows());
+ assertEquals((Long) 0l, info.getEstimatedBytes());
+ assertTrue(info.getEstimateInfoTs() > 0);
+
+ // Query whose end key is before any data
+ sql = "SELECT a FROM " + tableName + " WHERE K <= 98";
+ rs = conn.createStatement().executeQuery(sql);
+ assertFalse(rs.next());
+ info = getByteRowEstimates(conn, sql, binds);
+ assertEquals((Long) 0l, info.getEstimatedRows());
+ assertEquals((Long) 0l, info.getEstimatedBytes());
+ assertTrue(info.getEstimateInfoTs() > 0);
+
+ // Query whose end key is after any data. In this case, we return the estimate as
+ // scanning all the guide posts.
+ sql = "SELECT a FROM " + tableName + " WHERE K <= 110";
+ rs = conn.createStatement().executeQuery(sql);
+ i = 0;
+ numRows = 10;
+ while (rs.next()) {
+ assertEquals(100 + i, rs.getInt(1));
+ i++;
+ }
+ assertEquals(numRows, i);
+ info = getByteRowEstimates(conn, sql, binds);
+ assertEquals((Long) 10l, info.getEstimatedRows());
+ assertEquals((Long) 930l, info.getEstimatedBytes());
+ assertTrue(info.getEstimateInfoTs() > 0);
+
+ // Query whose start key and end key is before any data. In this case, we return the
+ // estimate as
+ // scanning the first guide post
+ sql = "SELECT a FROM " + tableName + " WHERE K <= 90 AND K >= 80";
+ rs = conn.createStatement().executeQuery(sql);
+ assertFalse(rs.next());
+ info = getByteRowEstimates(conn, sql, binds);
+ assertEquals((Long) 0l, info.getEstimatedRows());
+ assertEquals((Long) 0l, info.getEstimatedBytes());
+ assertTrue(info.getEstimateInfoTs() > 0);
+
+ // Query whose start key and end key is after any data. In this case, we return the
+ // estimate as
+ // scanning no guide post
+ sql = "SELECT a FROM " + tableName + " WHERE K <= 130 AND K >= 120";
+ rs = conn.createStatement().executeQuery(sql);
+ assertFalse(rs.next());
+ info = getByteRowEstimates(conn, sql, binds);
+ assertEquals((Long) 0l, info.getEstimatedRows());
+ assertEquals((Long) 0l, info.getEstimatedBytes());
+ assertTrue(info.getEstimateInfoTs() > 0);
+
+ // Query whose start key is before and end key is between data. In this case, we return
+ // the estimate as
+ // scanning no guide post
+ sql = "SELECT a FROM " + tableName + " WHERE K <= 102 AND K >= 90";
+ rs = conn.createStatement().executeQuery(sql);
+ i = 0;
+ numRows = 3;
+ while (rs.next()) {
+ assertEquals(100 + i, rs.getInt(1));
+ i++;
+ }
+ info = getByteRowEstimates(conn, sql, binds);
+ // Depending on the guidepost boundary, this estimate
+ // can be slightly off. It's called estimate for a reason.
+ assertEquals((Long) 4l, info.getEstimatedRows());
+ assertEquals((Long) 330l, info.getEstimatedBytes());
+ assertTrue(info.getEstimateInfoTs() > 0);
+ // Query whose start key is between and end key is after data.
+ sql = "SELECT a FROM " + tableName + " WHERE K <= 120 AND K >= 100";
+ rs = conn.createStatement().executeQuery(sql);
+ i = 0;
+ numRows = 10;
+ while (rs.next()) {
+ assertEquals(100 + i, rs.getInt(1));
+ i++;
+ }
+ info = getByteRowEstimates(conn, sql, binds);
+ // Depending on the guidepost boundary, this estimate
+ // can be slightly off. It's called estimate for a reason.
+ assertEquals((Long) 9l, info.getEstimatedRows());
+ assertEquals((Long) 900l, info.getEstimatedBytes());
+ assertTrue(info.getEstimateInfoTs() > 0);
+ // Query whose start key and end key are both between data.
+ sql = "SELECT a FROM " + tableName + " WHERE K <= 109 AND K >= 100";
+ rs = conn.createStatement().executeQuery(sql);
+ i = 0;
+ numRows = 10;
+ while (rs.next()) {
+ assertEquals(100 + i, rs.getInt(1));
+ i++;
+ }
+ info = getByteRowEstimates(conn, sql, binds);
+ // Depending on the guidepost boundary, this estimate
+ // can be slightly off. It's called estimate for a reason.
+ assertEquals((Long) 9l, info.getEstimatedRows());
+ assertEquals((Long) 900l, info.getEstimatedBytes());
+ assertTrue(info.getEstimateInfoTs() > 0);
+ }
+ }
+
private static void createMultitenantTableAndViews(String tenant1View, String tenant2View,
String tenant3View, String tenant1, String tenant2, String tenant3,
String multiTenantTable, MyClock clock) throws SQLException {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/cba2b571/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
index 250cb48..e9deec3 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
@@ -35,6 +35,7 @@ import java.io.ByteArrayInputStream;
import java.io.DataInput;
import java.io.DataInputStream;
import java.io.EOFException;
+import java.io.IOException;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.BitSet;
@@ -585,15 +586,29 @@ public abstract class BaseResultIterators extends ExplainTable implements Result
return context.getConnection().getQueryServices().getTableStats(key);
}
- private List<Scan> addNewScan(List<List<Scan>> parallelScans, List<Scan> scans, Scan scan, byte[] startKey, boolean crossedRegionBoundary, HRegionLocation regionLocation) {
+ private List<Scan> addNewScan(List<List<Scan>> parallelScans, List<Scan> scans, Scan scan,
+ byte[] startKey, boolean crossedRegionBoundary, HRegionLocation regionLocation,
+ GuidePostEstimate estimate, Long gpsRows, Long gpsBytes) {
boolean startNewScan = scanGrouper.shouldStartNewScan(plan, scans, startKey, crossedRegionBoundary);
if (scan != null) {
if (regionLocation.getServerName() != null) {
scan.setAttribute(BaseScannerRegionObserver.SCAN_REGION_SERVER, regionLocation.getServerName().getVersionedBytes());
}
- scans.add(scan);
+ if (useStatsForParallelization || crossedRegionBoundary) {
+ scans.add(scan);
+ }
+ if (estimate != null && gpsRows != null) {
+ estimate.rowsEstimate += gpsRows;
+ }
+ if (estimate != null && gpsBytes != null) {
+ estimate.bytesEstimate += gpsBytes;
+ }
}
- if (startNewScan && !scans.isEmpty()) {
+ if (startNewScan && !scans.isEmpty() && useStatsForParallelization) {
+ /*
+ * Note that even if region boundary was crossed, if we are not using stats for
+ * parallelization, nothing gets added to the parallel scans.
+ */
parallelScans.add(scans);
scans = Lists.newArrayListWithExpectedSize(1);
}
@@ -653,7 +668,7 @@ public abstract class BaseResultIterators extends ExplainTable implements Result
newScan.setStopRow(regionInfo.getEndKey());
}
}
- scans = addNewScan(parallelScans, scans, newScan, endKey, true, regionLocation);
+ scans = addNewScan(parallelScans, scans, newScan, endKey, true, regionLocation, null, null, null);
regionIndex++;
}
if (!scans.isEmpty()) { // Add any remaining scans
@@ -662,6 +677,11 @@ public abstract class BaseResultIterators extends ExplainTable implements Result
return parallelScans;
}
+ private static class GuidePostEstimate {
+ private long bytesEstimate;
+ private long rowsEstimate;
+ }
+
/**
* Compute the list of parallel scans to run for a given query. The inner scans
* may be concatenated together directly, while the other ones may need to be
@@ -721,8 +741,7 @@ public abstract class BaseResultIterators extends ExplainTable implements Result
DataInput input = null;
PrefixByteDecoder decoder = null;
int guideIndex = 0;
- long estimatedRows = 0;
- long estimatedSize = 0;
+ GuidePostEstimate estimates = new GuidePostEstimate();
long estimateTs = Long.MAX_VALUE;
long minGuidePostTimestamp = Long.MAX_VALUE;
try {
@@ -763,6 +782,7 @@ public abstract class BaseResultIterators extends ExplainTable implements Result
endRegionKey = regionInfo.getEndKey();
keyOffset = ScanUtil.getRowKeyOffset(regionInfo.getStartKey(), endRegionKey);
}
+ byte[] initialKeyBytes = currentKeyBytes;
while (intersectWithGuidePosts && (endKey.length == 0 || currentGuidePost.compareTo(endKey) <= 0)) {
Scan newScan = scanRanges.intersectScan(scan, currentKeyBytes, currentGuidePostBytes, keyOffset,
false);
@@ -770,12 +790,11 @@ public abstract class BaseResultIterators extends ExplainTable implements Result
ScanUtil.setLocalIndexAttributes(newScan, keyOffset,
regionInfo.getStartKey(), regionInfo.getEndKey(),
newScan.getStartRow(), newScan.getStopRow());
- estimatedRows += gps.getRowCounts()[guideIndex];
- estimatedSize += gps.getByteCounts()[guideIndex];
- }
- if (useStatsForParallelization) {
- scans = addNewScan(parallelScans, scans, newScan, currentGuidePostBytes, false, regionLocation);
}
+ scans =
+ addNewScan(parallelScans, scans, newScan, currentGuidePostBytes, false,
+ regionLocation, estimates, gps.getRowCounts()[guideIndex],
+ gps.getByteCounts()[guideIndex]);
currentKeyBytes = currentGuidePostBytes;
try {
currentGuidePost = PrefixByteCodec.decode(decoder, input);
@@ -794,12 +813,19 @@ public abstract class BaseResultIterators extends ExplainTable implements Result
intersectWithGuidePosts = false;
}
}
+ if (!useStatsForParallelization) {
+ /*
+ * If we are not using stats for generating parallel scans, we need to reset the
+ * currentKey back to what it was at the beginning of the loop.
+ */
+ currentKeyBytes = initialKeyBytes;
+ }
Scan newScan = scanRanges.intersectScan(scan, currentKeyBytes, endKey, keyOffset, true);
if(newScan != null) {
ScanUtil.setLocalIndexAttributes(newScan, keyOffset, regionInfo.getStartKey(),
regionInfo.getEndKey(), newScan.getStartRow(), newScan.getStopRow());
}
- scans = addNewScan(parallelScans, scans, newScan, endKey, true, regionLocation);
+ scans = addNewScan(parallelScans, scans, newScan, endKey, true, regionLocation, null, null, null);
currentKeyBytes = endKey;
regionIndex++;
}
@@ -814,8 +840,8 @@ public abstract class BaseResultIterators extends ExplainTable implements Result
this.estimatedSize = gps.getByteCounts()[0];
this.estimateInfoTimestamp = gps.getGuidePostTimestamps()[0];
} else if (hasGuidePosts) {
- this.estimatedRows = estimatedRows;
- this.estimatedSize = estimatedSize;
+ this.estimatedRows = estimates.rowsEstimate;
+ this.estimatedSize = estimates.bytesEstimate;
this.estimateInfoTimestamp = estimateTs;
} else {
this.estimatedRows = null;
@@ -828,7 +854,6 @@ public abstract class BaseResultIterators extends ExplainTable implements Result
} finally {
if (stream != null) Closeables.closeQuietly(stream);
}
-
sampleScans(parallelScans,this.plan.getStatement().getTableSamplingRate());
return parallelScans;
}
[35/37] phoenix git commit: Set version to 4.13.0-HBase-1.3 for
release
Posted by ja...@apache.org.
Set version to 4.13.0-HBase-1.3 for release
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/47e7c60e
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/47e7c60e
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/47e7c60e
Branch: refs/heads/4.x-HBase-1.1
Commit: 47e7c60ebb158a231d614e7088e20a44da413e56
Parents: 8947624
Author: Mujtaba <mu...@apache.org>
Authored: Fri Nov 3 11:59:25 2017 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Wed Nov 15 10:46:42 2017 -0800
----------------------------------------------------------------------
phoenix-assembly/pom.xml | 2 +-
phoenix-client/pom.xml | 2 +-
phoenix-core/pom.xml | 2 +-
phoenix-flume/pom.xml | 2 +-
phoenix-hive/pom.xml | 2 +-
phoenix-kafka/pom.xml | 2 +-
phoenix-load-balancer/pom.xml | 2 +-
phoenix-pherf/pom.xml | 2 +-
phoenix-pig/pom.xml | 2 +-
phoenix-queryserver-client/pom.xml | 2 +-
phoenix-queryserver/pom.xml | 2 +-
phoenix-server/pom.xml | 2 +-
phoenix-spark/pom.xml | 2 +-
phoenix-tracing-webapp/pom.xml | 2 +-
pom.xml | 2 +-
15 files changed, 15 insertions(+), 15 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/47e7c60e/phoenix-assembly/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-assembly/pom.xml b/phoenix-assembly/pom.xml
index ae28514..8ec4ebb 100644
--- a/phoenix-assembly/pom.xml
+++ b/phoenix-assembly/pom.xml
@@ -27,7 +27,7 @@
<parent>
<groupId>org.apache.phoenix</groupId>
<artifactId>phoenix</artifactId>
- <version>4.13.0-SNAPSHOT</version>
+ <version>4.13.0-HBase-1.3</version>
</parent>
<artifactId>phoenix-assembly</artifactId>
<name>Phoenix Assembly</name>
http://git-wip-us.apache.org/repos/asf/phoenix/blob/47e7c60e/phoenix-client/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-client/pom.xml b/phoenix-client/pom.xml
index 648c452..77df4ca 100644
--- a/phoenix-client/pom.xml
+++ b/phoenix-client/pom.xml
@@ -27,7 +27,7 @@
<parent>
<groupId>org.apache.phoenix</groupId>
<artifactId>phoenix</artifactId>
- <version>4.13.0-SNAPSHOT</version>
+ <version>4.13.0-HBase-1.3</version>
</parent>
<artifactId>phoenix-client</artifactId>
<name>Phoenix Client</name>
http://git-wip-us.apache.org/repos/asf/phoenix/blob/47e7c60e/phoenix-core/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-core/pom.xml b/phoenix-core/pom.xml
index f82cddc..0bdcc07 100644
--- a/phoenix-core/pom.xml
+++ b/phoenix-core/pom.xml
@@ -4,7 +4,7 @@
<parent>
<groupId>org.apache.phoenix</groupId>
<artifactId>phoenix</artifactId>
- <version>4.13.0-SNAPSHOT</version>
+ <version>4.13.0-HBase-1.3</version>
</parent>
<artifactId>phoenix-core</artifactId>
<name>Phoenix Core</name>
http://git-wip-us.apache.org/repos/asf/phoenix/blob/47e7c60e/phoenix-flume/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-flume/pom.xml b/phoenix-flume/pom.xml
index 63df1af..bd1cd7e 100644
--- a/phoenix-flume/pom.xml
+++ b/phoenix-flume/pom.xml
@@ -26,7 +26,7 @@
<parent>
<groupId>org.apache.phoenix</groupId>
<artifactId>phoenix</artifactId>
- <version>4.13.0-SNAPSHOT</version>
+ <version>4.13.0-HBase-1.3</version>
</parent>
<artifactId>phoenix-flume</artifactId>
<name>Phoenix - Flume</name>
http://git-wip-us.apache.org/repos/asf/phoenix/blob/47e7c60e/phoenix-hive/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-hive/pom.xml b/phoenix-hive/pom.xml
index b0fd817..a2531e0 100644
--- a/phoenix-hive/pom.xml
+++ b/phoenix-hive/pom.xml
@@ -27,7 +27,7 @@
<parent>
<groupId>org.apache.phoenix</groupId>
<artifactId>phoenix</artifactId>
- <version>4.13.0-SNAPSHOT</version>
+ <version>4.13.0-HBase-1.3</version>
</parent>
<artifactId>phoenix-hive</artifactId>
<name>Phoenix - Hive</name>
http://git-wip-us.apache.org/repos/asf/phoenix/blob/47e7c60e/phoenix-kafka/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-kafka/pom.xml b/phoenix-kafka/pom.xml
index 47da23c..610bb01 100644
--- a/phoenix-kafka/pom.xml
+++ b/phoenix-kafka/pom.xml
@@ -26,7 +26,7 @@
<parent>
<groupId>org.apache.phoenix</groupId>
<artifactId>phoenix</artifactId>
- <version>4.13.0-SNAPSHOT</version>
+ <version>4.13.0-HBase-1.3</version>
</parent>
<artifactId>phoenix-kafka</artifactId>
<name>Phoenix - Kafka</name>
http://git-wip-us.apache.org/repos/asf/phoenix/blob/47e7c60e/phoenix-load-balancer/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-load-balancer/pom.xml b/phoenix-load-balancer/pom.xml
index b682140..7b819e1 100644
--- a/phoenix-load-balancer/pom.xml
+++ b/phoenix-load-balancer/pom.xml
@@ -27,7 +27,7 @@
<parent>
<groupId>org.apache.phoenix</groupId>
<artifactId>phoenix</artifactId>
- <version>4.13.0-SNAPSHOT</version>
+ <version>4.13.0-HBase-1.3</version>
</parent>
<artifactId>phoenix-load-balancer</artifactId>
<name>Phoenix Load Balancer</name>
http://git-wip-us.apache.org/repos/asf/phoenix/blob/47e7c60e/phoenix-pherf/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-pherf/pom.xml b/phoenix-pherf/pom.xml
index 8368c45..edd9a27 100644
--- a/phoenix-pherf/pom.xml
+++ b/phoenix-pherf/pom.xml
@@ -15,7 +15,7 @@
<parent>
<groupId>org.apache.phoenix</groupId>
<artifactId>phoenix</artifactId>
- <version>4.13.0-SNAPSHOT</version>
+ <version>4.13.0-HBase-1.3</version>
</parent>
<artifactId>phoenix-pherf</artifactId>
http://git-wip-us.apache.org/repos/asf/phoenix/blob/47e7c60e/phoenix-pig/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-pig/pom.xml b/phoenix-pig/pom.xml
index 942f106..aa86fc9 100644
--- a/phoenix-pig/pom.xml
+++ b/phoenix-pig/pom.xml
@@ -26,7 +26,7 @@
<parent>
<groupId>org.apache.phoenix</groupId>
<artifactId>phoenix</artifactId>
- <version>4.13.0-SNAPSHOT</version>
+ <version>4.13.0-HBase-1.3</version>
</parent>
<artifactId>phoenix-pig</artifactId>
<name>Phoenix - Pig</name>
http://git-wip-us.apache.org/repos/asf/phoenix/blob/47e7c60e/phoenix-queryserver-client/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-queryserver-client/pom.xml b/phoenix-queryserver-client/pom.xml
index 492b815..1c23e25 100644
--- a/phoenix-queryserver-client/pom.xml
+++ b/phoenix-queryserver-client/pom.xml
@@ -27,7 +27,7 @@
<parent>
<groupId>org.apache.phoenix</groupId>
<artifactId>phoenix</artifactId>
- <version>4.13.0-SNAPSHOT</version>
+ <version>4.13.0-HBase-1.3</version>
</parent>
<artifactId>phoenix-queryserver-client</artifactId>
<name>Phoenix Query Server Client</name>
http://git-wip-us.apache.org/repos/asf/phoenix/blob/47e7c60e/phoenix-queryserver/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-queryserver/pom.xml b/phoenix-queryserver/pom.xml
index 79d71df..dd923df 100644
--- a/phoenix-queryserver/pom.xml
+++ b/phoenix-queryserver/pom.xml
@@ -26,7 +26,7 @@
<parent>
<groupId>org.apache.phoenix</groupId>
<artifactId>phoenix</artifactId>
- <version>4.13.0-SNAPSHOT</version>
+ <version>4.13.0-HBase-1.3</version>
</parent>
<artifactId>phoenix-queryserver</artifactId>
<name>Phoenix Query Server</name>
http://git-wip-us.apache.org/repos/asf/phoenix/blob/47e7c60e/phoenix-server/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-server/pom.xml b/phoenix-server/pom.xml
index 67832ad..ba9524e 100644
--- a/phoenix-server/pom.xml
+++ b/phoenix-server/pom.xml
@@ -27,7 +27,7 @@
<parent>
<groupId>org.apache.phoenix</groupId>
<artifactId>phoenix</artifactId>
- <version>4.13.0-SNAPSHOT</version>
+ <version>4.13.0-HBase-1.3</version>
</parent>
<artifactId>phoenix-server</artifactId>
<name>Phoenix Server</name>
http://git-wip-us.apache.org/repos/asf/phoenix/blob/47e7c60e/phoenix-spark/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-spark/pom.xml b/phoenix-spark/pom.xml
index 3990246..e4d14c4 100644
--- a/phoenix-spark/pom.xml
+++ b/phoenix-spark/pom.xml
@@ -28,7 +28,7 @@
<parent>
<groupId>org.apache.phoenix</groupId>
<artifactId>phoenix</artifactId>
- <version>4.13.0-SNAPSHOT</version>
+ <version>4.13.0-HBase-1.3</version>
</parent>
<artifactId>phoenix-spark</artifactId>
<name>Phoenix - Spark</name>
http://git-wip-us.apache.org/repos/asf/phoenix/blob/47e7c60e/phoenix-tracing-webapp/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-tracing-webapp/pom.xml b/phoenix-tracing-webapp/pom.xml
index 433f9e2..79f0a86 100755
--- a/phoenix-tracing-webapp/pom.xml
+++ b/phoenix-tracing-webapp/pom.xml
@@ -27,7 +27,7 @@
<parent>
<groupId>org.apache.phoenix</groupId>
<artifactId>phoenix</artifactId>
- <version>4.13.0-SNAPSHOT</version>
+ <version>4.13.0-HBase-1.3</version>
</parent>
<artifactId>phoenix-tracing-webapp</artifactId>
http://git-wip-us.apache.org/repos/asf/phoenix/blob/47e7c60e/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 0c40eed..933e710 100644
--- a/pom.xml
+++ b/pom.xml
@@ -3,7 +3,7 @@
<modelVersion>4.0.0</modelVersion>
<groupId>org.apache.phoenix</groupId>
<artifactId>phoenix</artifactId>
- <version>4.13.0-SNAPSHOT</version>
+ <version>4.13.0-HBase-1.3</version>
<packaging>pom</packaging>
<name>Apache Phoenix</name>
<description>A SQL layer over HBase</description>