You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by ja...@apache.org on 2018/04/26 16:52:22 UTC
[1/6] phoenix git commit: PHOENIX-4699 Stop scan after finding first
child of table during drop
Repository: phoenix
Updated Branches:
refs/heads/4.x-HBase-1.3 e578a869c -> 59ab3f15c
PHOENIX-4699 Stop scan after finding first child of table during drop
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/f9369f8f
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/f9369f8f
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/f9369f8f
Branch: refs/heads/4.x-HBase-1.3
Commit: f9369f8f973456832bdcfcaf669f0e067bf73461
Parents: e578a86
Author: James Taylor <jt...@salesforce.com>
Authored: Thu Apr 19 14:39:27 2018 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Thu Apr 26 09:49:35 2018 -0700
----------------------------------------------------------------------
.../coprocessor/MetaDataEndpointImpl.java | 44 +++++++++++---------
1 file changed, 25 insertions(+), 19 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/f9369f8f/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index 4c72c2d..c28ad3c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -122,8 +122,10 @@ import org.apache.hadoop.hbase.coprocessor.CoprocessorException;
import org.apache.hadoop.hbase.coprocessor.CoprocessorService;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
+import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.filter.FilterList;
import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
+import org.apache.hadoop.hbase.filter.PageFilter;
import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.ipc.RpcServer.Call;
@@ -275,6 +277,10 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
public static final String ROW_KEY_ORDER_OPTIMIZABLE = "ROW_KEY_ORDER_OPTIMIZABLE";
public static final byte[] ROW_KEY_ORDER_OPTIMIZABLE_BYTES = Bytes.toBytes(ROW_KEY_ORDER_OPTIMIZABLE);
+ private static final byte[] CHILD_TABLE_BYTES = new byte[] {PTable.LinkType.CHILD_TABLE.getSerializedValue()};
+ private static final byte[] PHYSICAL_TABLE_BYTES =
+ new byte[] { PTable.LinkType.PHYSICAL_TABLE.getSerializedValue() };
+
// KeyValues for Table
private static final KeyValue TABLE_TYPE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, TABLE_TYPE_BYTES);
private static final KeyValue TABLE_SEQ_NUM_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, TABLE_SEQ_NUM_BYTES);
@@ -1527,7 +1533,6 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
for (PTable index : parentTable.getIndexes()) {
indexes.add(TableName.valueOf(index.getPhysicalName().getBytes()));
}
-
} else {
// Mapped View
cParentPhysicalName = SchemaUtil.getTableNameAsBytes(schemaName, tableName);
@@ -1810,13 +1815,10 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
private boolean execeededIndexQuota(PTableType tableType, PTable parentTable) {
return PTableType.INDEX == tableType && parentTable.getIndexes().size() >= maxIndexesPerTable;
}
-
- private static final byte[] CHILD_TABLE_BYTES = new byte[] {PTable.LinkType.CHILD_TABLE.getSerializedValue()};
-
private void findAllChildViews(Region region, byte[] tenantId, PTable table,
TableViewFinder result, long clientTimeStamp, int clientVersion) throws IOException, SQLException {
- TableViewFinder currResult = findChildViews(region, tenantId, table, clientVersion);
+ TableViewFinder currResult = findChildViews(region, tenantId, table, clientVersion, false);
result.addResult(currResult);
for (ViewInfo viewInfo : currResult.getViewInfoList()) {
byte[] viewtenantId = viewInfo.getTenantId();
@@ -1829,9 +1831,9 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
}
}
- // TODO remove this in 4.13 release
- @Deprecated
- private TableViewFinder findChildViews_deprecated(Region region, byte[] tenantId, PTable table, byte[] linkTypeBytes) throws IOException {
+ // TODO use child link instead once splittable system catalog (PHOENIX-3534) is implemented
+ // and we have a separate table for links.
+ private TableViewFinder findChildViews_deprecated(Region region, byte[] tenantId, PTable table, byte[] linkTypeBytes, boolean stopAfterFirst) throws IOException {
byte[] schemaName = table.getSchemaName().getBytes();
byte[] tableName = table.getTableName().getBytes();
boolean isMultiTenant = table.isMultiTenant();
@@ -1854,7 +1856,11 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
.getPhysicalHBaseTableName(schemaName, tableName, table.isNamespaceMapped())
.getBytes());
SuffixFilter rowFilter = new SuffixFilter(suffix);
- FilterList filter = new FilterList(linkFilter,tableTypeFilter,rowFilter);
+ List<Filter> filters = Lists.<Filter>newArrayList(linkFilter,tableTypeFilter,rowFilter);
+ if (stopAfterFirst) {
+ filters.add(new PageFilter(1));
+ }
+ FilterList filter = new FilterList(filters);
scan.setFilter(filter);
scan.addColumn(TABLE_FAMILY_BYTES, LINK_TYPE_BYTES);
scan.addColumn(TABLE_FAMILY_BYTES, TABLE_TYPE_BYTES);
@@ -1897,15 +1903,19 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
}
}
- private TableViewFinder findChildViews_4_11(Region region, byte[] tenantId, byte[] schemaName, byte[] tableName) throws IOException {
+ private TableViewFinder findChildViews_4_11(Region region, byte[] tenantId, byte[] schemaName, byte[] tableName, boolean stopAfterFirst) throws IOException {
Scan scan = new Scan();
byte[] startRow = SchemaUtil.getTableKey(tenantId, schemaName, tableName);
byte[] stopRow = ByteUtil.nextKey(startRow);
scan.setStartRow(startRow);
scan.setStopRow(stopRow);
SingleColumnValueFilter linkFilter = new SingleColumnValueFilter(TABLE_FAMILY_BYTES, LINK_TYPE_BYTES, CompareOp.EQUAL, CHILD_TABLE_BYTES);
+ Filter filter = linkFilter;
linkFilter.setFilterIfMissing(true);
- scan.setFilter(linkFilter);
+ if (stopAfterFirst) {
+ filter = new FilterList(linkFilter, new PageFilter(1));
+ }
+ scan.setFilter(filter);
scan.addColumn(TABLE_FAMILY_BYTES, LINK_TYPE_BYTES);
scan.addColumn(TABLE_FAMILY_BYTES, PARENT_TENANT_ID_BYTES);
@@ -1945,11 +1955,8 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
}
}
}
-
- private static final byte[] PHYSICAL_TABLE_BYTES =
- new byte[] { PTable.LinkType.PHYSICAL_TABLE.getSerializedValue() };
- private TableViewFinder findChildViews(Region region, byte[] tenantId, PTable table, int clientVersion)
+ private TableViewFinder findChildViews(Region region, byte[] tenantId, PTable table, int clientVersion, boolean stopAfterFirst)
throws IOException, SQLException {
byte[] tableKey =
SchemaUtil.getTableKey(ByteUtil.EMPTY_BYTE_ARRAY,
@@ -1960,10 +1967,10 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
loadTable(env, tableKey, cacheKey, MIN_SYSTEM_TABLE_TIMESTAMP,
HConstants.LATEST_TIMESTAMP, clientVersion);
if (systemCatalog.getTimeStamp() < MIN_SYSTEM_TABLE_TIMESTAMP_4_11_0) {
- return findChildViews_deprecated(region, tenantId, table, PHYSICAL_TABLE_BYTES);
+ return findChildViews_deprecated(region, tenantId, table, PHYSICAL_TABLE_BYTES, stopAfterFirst);
} else {
return findChildViews_4_11(region, tenantId, table.getSchemaName().getBytes(),
- table.getTableName().getBytes());
+ table.getTableName().getBytes(), stopAfterFirst);
}
}
@@ -2131,7 +2138,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
if (tableType == PTableType.TABLE || tableType == PTableType.SYSTEM) {
// Handle any child views that exist
- TableViewFinder tableViewFinderResult = findChildViews(region, tenantId, table, clientVersion);
+ TableViewFinder tableViewFinderResult = findChildViews(region, tenantId, table, clientVersion, !isCascade);
if (tableViewFinderResult.hasViews()) {
if (isCascade) {
if (tableViewFinderResult.allViewsInMultipleRegions()) {
@@ -2541,7 +2548,6 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
// lock the rows corresponding to views so that no other thread can modify the view meta-data
RowLock viewRowLock = acquireLock(region, viewKey, locks);
PTable view = doGetTable(viewKey, clientTimeStamp, viewRowLock, clientVersion);
-
ColumnOrdinalPositionUpdateList ordinalPositionList = new ColumnOrdinalPositionUpdateList();
List<PColumn> viewPkCols = new ArrayList<>(view.getPKColumns());
boolean addingExistingPkCol = false;
[2/6] phoenix git commit: PHOENIX-4708 Do not propagate
GUIDE_POSTS_WIDTH to children
Posted by ja...@apache.org.
PHOENIX-4708 Do not propagate GUIDE_POSTS_WIDTH to children
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/59ab3f15
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/59ab3f15
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/59ab3f15
Branch: refs/heads/4.x-HBase-1.3
Commit: 59ab3f15cb11ade9d85d6175d3f8697f329bd0b9
Parents: b173aaf
Author: James Taylor <jt...@salesforce.com>
Authored: Thu Apr 26 09:40:35 2018 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Thu Apr 26 09:51:01 2018 -0700
----------------------------------------------------------------------
.../phoenix/end2end/AlterTableWithViewsIT.java | 22 ++
.../coprocessor/MetaDataEndpointImpl.java | 115 +++++---
.../apache/phoenix/schema/TableProperty.java | 270 ++++++++++---------
3 files changed, 248 insertions(+), 159 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/59ab3f15/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableWithViewsIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableWithViewsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableWithViewsIT.java
index 237a8d2..e1b1372 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableWithViewsIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableWithViewsIT.java
@@ -25,6 +25,7 @@ import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.sql.Connection;
+import java.sql.DatabaseMetaData;
import java.sql.DriverManager;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
@@ -39,12 +40,14 @@ import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.coprocessor.TephraTransactionalProcessor;
import org.apache.phoenix.exception.SQLExceptionCode;
import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
import org.apache.phoenix.query.QueryConstants;
import org.apache.phoenix.schema.PName;
import org.apache.phoenix.schema.PNameFactory;
import org.apache.phoenix.schema.PTable;
import org.apache.phoenix.schema.PTableKey;
import org.apache.phoenix.schema.PTableType;
+import org.apache.phoenix.util.StringUtil;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
@@ -166,6 +169,25 @@ public class AlterTableWithViewsIT extends ParallelStatsDisabledIT {
assertTrue(viewTable2.isImmutableRows());
// update cache frequency is not propagated to the view since it was altered on the view
assertEquals(1, viewTable2.getUpdateCacheFrequency());
+
+ long gpw = 1000000;
+ conn.createStatement().execute("ALTER TABLE " + tableName + " SET GUIDE_POSTS_WIDTH=" + gpw);
+
+ ResultSet rs;
+ DatabaseMetaData md = conn.getMetaData();
+ rs = md.getTables("", "", StringUtil.escapeLike(tableName), null);
+ assertTrue(rs.next());
+ assertEquals(gpw, rs.getLong(PhoenixDatabaseMetaData.GUIDE_POSTS_WIDTH));
+
+ rs = md.getTables(null, "", StringUtil.escapeLike(viewOfTable1), null);
+ assertTrue(rs.next());
+ rs.getLong(PhoenixDatabaseMetaData.GUIDE_POSTS_WIDTH);
+ assertTrue(rs.wasNull());
+
+ rs = md.getTables(null, "", StringUtil.escapeLike(viewOfTable2), null);
+ assertTrue(rs.next());
+ rs.getLong(PhoenixDatabaseMetaData.GUIDE_POSTS_WIDTH);
+ assertTrue(rs.wasNull());
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/59ab3f15/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index 29eee7e..b77f113 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -91,6 +91,7 @@ import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.Arrays;
+import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashSet;
@@ -3163,7 +3164,56 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
byte[] tableNameBytes = index.getTableName().getBytes();
return ByteUtil.concat(tenantIdBytes, SEPARATOR_BYTE_ARRAY, schemaNameBytes, SEPARATOR_BYTE_ARRAY, tableNameBytes);
}
+
+ /**
+ * Determines whether or not we have a change that needs to be propagated from a base table
+ * to it's views. For example, a change to GUIDE_POSTS_WIDTH does not need to be propogated
+ * since it's only set on the physical table.
+ * @param table the table being altered
+ * @param rowKeyMetaData the filled in values for schemaName and tableName
+ * @param tableMetaData the metadata passed over from the client
+ * @return true if changes need to be propagated to the views and false otherwise.
+ */
+ private static boolean hasChangesToPropagate(PTable table, byte[][] rowKeyMetaData, List<Mutation> tableMetaData) {
+ boolean hasChangesToPropagate = true;
+ byte[] schemaName = rowKeyMetaData[SCHEMA_NAME_INDEX];
+ byte[] tableName = rowKeyMetaData[TABLE_NAME_INDEX];
+ for (Mutation m : tableMetaData) {
+ byte[] key = m.getRow();
+ int pkCount = getVarChars(key, rowKeyMetaData);
+ if (pkCount >= COLUMN_NAME_INDEX
+ && Bytes.compareTo(schemaName, rowKeyMetaData[SCHEMA_NAME_INDEX]) == 0
+ && Bytes.compareTo(tableName, rowKeyMetaData[TABLE_NAME_INDEX]) == 0) {
+ return true;
+ } else {
+ Collection<List<Cell>>cellLists = m.getFamilyCellMap().values();
+ for (List<Cell> cells : cellLists) {
+ if (cells != null) {
+ for (Cell cell : cells) {
+ byte[] qualifier = CellUtil.cloneQualifier(cell);
+ String columnName = Bytes.toString(qualifier);
+ try {
+ // Often Phoenix table properties aren't valid to be set on a view so thus
+ // do not need to be propogated. Here we check if the column name corresponds
+ // to a table property and whether that property is valid to set on a view.
+ TableProperty tableProp = TableProperty.valueOf(columnName);
+ if (tableProp.propagateToViews()) {
+ return true;
+ } else {
+ hasChangesToPropagate = false;
+ }
+ } catch (IllegalArgumentException e) {
+ }
+ }
+ }
+ }
+ }
+ }
+ return hasChangesToPropagate;
+ }
+
+
@Override
public void addColumn(RpcController controller, final AddColumnRequest request,
RpcCallback<MetaDataResponse> done) {
@@ -3188,37 +3238,40 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
// Size for worst case - all new columns are PK column
List<Mutation> mutationsForAddingColumnsToViews = Lists.newArrayListWithExpectedSize(tableMetaData.size() * ( 1 + table.getIndexes().size()));
if (type == PTableType.TABLE || type == PTableType.SYSTEM) {
- TableViewFinder childViewsResult = new TableViewFinder();
- findAllChildViews(region, tenantId, table, childViewsResult, clientTimeStamp, request.getClientVersion());
- if (childViewsResult.hasViews()) {
- /*
- * Dis-allow if:
- * 1) The meta-data for child view/s spans over
- * more than one region (since the changes cannot be made in a transactional fashion)
- *
- * 2) The base column count is 0 which means that the metadata hasn't been upgraded yet or
- * the upgrade is currently in progress.
- *
- * 3) If the request is from a client that is older than 4.5 version of phoenix.
- * Starting from 4.5, metadata requests have the client version included in them.
- * We don't want to allow clients before 4.5 to add a column to the base table if it has views.
- *
- * 4) Trying to swtich tenancy of a table that has views
- */
- if (!childViewsResult.allViewsInSingleRegion()
- || table.getBaseColumnCount() == 0
- || !request.hasClientVersion()
- || switchAttribute(table, table.isMultiTenant(), tableMetaData, MULTI_TENANT_BYTES)) {
- return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION,
- EnvironmentEdgeManager.currentTimeMillis(), null);
- } else {
- mutationsForAddingColumnsToViews = new ArrayList<>(childViewsResult.getViewInfoList().size() * tableMetaData.size());
- MetaDataMutationResult mutationResult = addColumnsAndTablePropertiesToChildViews(table, tableMetaData, mutationsForAddingColumnsToViews, schemaName, tableName, invalidateList, clientTimeStamp,
- childViewsResult, region, locks, request.getClientVersion());
- // return if we were not able to add the column successfully
- if (mutationResult!=null)
- return mutationResult;
- }
+ // If change doesn't need to be propagated, don't bother finding children
+ if (hasChangesToPropagate(table, rowKeyMetaData, tableMetaData)) {
+ TableViewFinder childViewsResult = new TableViewFinder();
+ findAllChildViews(region, tenantId, table, childViewsResult, clientTimeStamp, request.getClientVersion());
+ if (childViewsResult.hasViews()) {
+ /*
+ * Dis-allow if:
+ * 1) The meta-data for child view/s spans over
+ * more than one region (since the changes cannot be made in a transactional fashion)
+ *
+ * 2) The base column count is 0 which means that the metadata hasn't been upgraded yet or
+ * the upgrade is currently in progress.
+ *
+ * 3) If the request is from a client that is older than 4.5 version of phoenix.
+ * Starting from 4.5, metadata requests have the client version included in them.
+ * We don't want to allow clients before 4.5 to add a column to the base table if it has views.
+ *
+ * 4) Trying to swtich tenancy of a table that has views
+ */
+ if (!childViewsResult.allViewsInSingleRegion()
+ || table.getBaseColumnCount() == 0
+ || !request.hasClientVersion()
+ || switchAttribute(table, table.isMultiTenant(), tableMetaData, MULTI_TENANT_BYTES)) {
+ return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION,
+ EnvironmentEdgeManager.currentTimeMillis(), null);
+ } else {
+ mutationsForAddingColumnsToViews = new ArrayList<>(childViewsResult.getViewInfoList().size() * tableMetaData.size());
+ MetaDataMutationResult mutationResult = addColumnsAndTablePropertiesToChildViews(table, tableMetaData, mutationsForAddingColumnsToViews, schemaName, tableName, invalidateList, clientTimeStamp,
+ childViewsResult, region, locks, request.getClientVersion());
+ // return if we were not able to add the column successfully
+ if (mutationResult!=null)
+ return mutationResult;
+ }
+ }
}
} else if (type == PTableType.VIEW
&& EncodedColumnsUtil.usesEncodedColumnNames(table)) {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/59ab3f15/phoenix-core/src/main/java/org/apache/phoenix/schema/TableProperty.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/TableProperty.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/TableProperty.java
index 78b9beb..3d473c4 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/TableProperty.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/TableProperty.java
@@ -37,44 +37,44 @@ import org.apache.phoenix.transaction.TransactionFactory;
import org.apache.phoenix.util.SchemaUtil;
public enum TableProperty {
-
+
@Deprecated // use the IMMUTABLE keyword while creating the table
- IMMUTABLE_ROWS(PhoenixDatabaseMetaData.IMMUTABLE_ROWS, true, true, false) {
+ IMMUTABLE_ROWS(PhoenixDatabaseMetaData.IMMUTABLE_ROWS, true, true, false) {
@Override
public Object getPTableValue(PTable table) {
return table.isImmutableRows();
}
},
- MULTI_TENANT(PhoenixDatabaseMetaData.MULTI_TENANT, true, false, false) {
+ MULTI_TENANT(PhoenixDatabaseMetaData.MULTI_TENANT, true, false, false) {
@Override
public Object getPTableValue(PTable table) {
return table.isMultiTenant();
}
},
- DISABLE_WAL(PhoenixDatabaseMetaData.DISABLE_WAL, true, false, false) {
+ DISABLE_WAL(PhoenixDatabaseMetaData.DISABLE_WAL, true, false, false) {
@Override
public Object getPTableValue(PTable table) {
return table.isWALDisabled();
}
},
- SALT_BUCKETS(PhoenixDatabaseMetaData.SALT_BUCKETS, COLUMN_FAMILY_NOT_ALLOWED_TABLE_PROPERTY, false, SALT_ONLY_ON_CREATE_TABLE, false, false) {
+ SALT_BUCKETS(PhoenixDatabaseMetaData.SALT_BUCKETS, COLUMN_FAMILY_NOT_ALLOWED_TABLE_PROPERTY, false, SALT_ONLY_ON_CREATE_TABLE, false, false) {
@Override
public Object getPTableValue(PTable table) {
return table.getBucketNum();
}
},
- DEFAULT_COLUMN_FAMILY(DEFAULT_COLUMN_FAMILY_NAME, COLUMN_FAMILY_NOT_ALLOWED_TABLE_PROPERTY, false, DEFAULT_COLUMN_FAMILY_ONLY_ON_CREATE_TABLE, false, false) {
+ DEFAULT_COLUMN_FAMILY(DEFAULT_COLUMN_FAMILY_NAME, COLUMN_FAMILY_NOT_ALLOWED_TABLE_PROPERTY, false, DEFAULT_COLUMN_FAMILY_ONLY_ON_CREATE_TABLE, false, false) {
@Override
public Object getPTableValue(PTable table) {
return table.getDefaultFamilyName();
}
},
- TTL(HColumnDescriptor.TTL, COLUMN_FAMILY_NOT_ALLOWED_FOR_TTL, true, CANNOT_ALTER_PROPERTY, false, false) {
+ TTL(HColumnDescriptor.TTL, COLUMN_FAMILY_NOT_ALLOWED_FOR_TTL, true, CANNOT_ALTER_PROPERTY, false, false) {
@Override
public Object getPTableValue(PTable table) {
return null;
@@ -87,14 +87,14 @@ public enum TableProperty {
return table.getStoreNulls();
}
},
-
+
TRANSACTIONAL(PhoenixDatabaseMetaData.TRANSACTIONAL, COLUMN_FAMILY_NOT_ALLOWED_TABLE_PROPERTY, true, false, false) {
@Override
public Object getPTableValue(PTable table) {
return table.isTransactional();
}
},
-
+
TRANSACTION_PROVIDER(PhoenixDatabaseMetaData.TRANSACTION_PROVIDER, COLUMN_FAMILY_NOT_ALLOWED_TABLE_PROPERTY, true, false, false) {
@Override
public Object getPTableValue(PTable table) {
@@ -113,28 +113,28 @@ public enum TableProperty {
},
UPDATE_CACHE_FREQUENCY(PhoenixDatabaseMetaData.UPDATE_CACHE_FREQUENCY, true, true, true) {
- @Override
+ @Override
public Object getValue(Object value) {
- if (value instanceof String) {
- String strValue = (String) value;
- if ("ALWAYS".equalsIgnoreCase(strValue)) {
- return 0L;
- } else if ("NEVER".equalsIgnoreCase(strValue)) {
- return Long.MAX_VALUE;
- }
- } else {
- return value == null ? null : ((Number) value).longValue();
- }
- return value;
- }
+ if (value instanceof String) {
+ String strValue = (String) value;
+ if ("ALWAYS".equalsIgnoreCase(strValue)) {
+ return 0L;
+ } else if ("NEVER".equalsIgnoreCase(strValue)) {
+ return Long.MAX_VALUE;
+ }
+ } else {
+ return value == null ? null : ((Number) value).longValue();
+ }
+ return value;
+ }
@Override
public Object getPTableValue(PTable table) {
return table.getUpdateCacheFrequency();
- }
- },
-
- AUTO_PARTITION_SEQ(PhoenixDatabaseMetaData.AUTO_PARTITION_SEQ, COLUMN_FAMILY_NOT_ALLOWED_TABLE_PROPERTY, false, false, false) {
+ }
+ },
+
+ AUTO_PARTITION_SEQ(PhoenixDatabaseMetaData.AUTO_PARTITION_SEQ, COLUMN_FAMILY_NOT_ALLOWED_TABLE_PROPERTY, false, false, false) {
@Override
public Object getValue(Object value) {
return value == null ? null : SchemaUtil.normalizeIdentifier(value.toString());
@@ -144,15 +144,15 @@ public enum TableProperty {
public Object getPTableValue(PTable table) {
return table.getAutoPartitionSeqName();
}
- },
-
- APPEND_ONLY_SCHEMA(PhoenixDatabaseMetaData.APPEND_ONLY_SCHEMA, COLUMN_FAMILY_NOT_ALLOWED_TABLE_PROPERTY, true, true, false) {
+ },
+
+ APPEND_ONLY_SCHEMA(PhoenixDatabaseMetaData.APPEND_ONLY_SCHEMA, COLUMN_FAMILY_NOT_ALLOWED_TABLE_PROPERTY, true, true, false) {
@Override
public Object getPTableValue(PTable table) {
return table.isAppendOnlySchema();
}
},
- GUIDE_POSTS_WIDTH(PhoenixDatabaseMetaData.GUIDE_POSTS_WIDTH, true, false, false) {
+ GUIDE_POSTS_WIDTH(PhoenixDatabaseMetaData.GUIDE_POSTS_WIDTH, true, false, false, false) {
@Override
public Object getValue(Object value) {
return value == null ? null : ((Number) value).longValue();
@@ -162,30 +162,30 @@ public enum TableProperty {
public Object getPTableValue(PTable table) {
return null;
}
-
- },
-
- COLUMN_ENCODED_BYTES(PhoenixDatabaseMetaData.ENCODING_SCHEME, COLUMN_FAMILY_NOT_ALLOWED_TABLE_PROPERTY, false, false, false) {
- @Override
+
+ },
+
+ COLUMN_ENCODED_BYTES(PhoenixDatabaseMetaData.ENCODING_SCHEME, COLUMN_FAMILY_NOT_ALLOWED_TABLE_PROPERTY, false, false, false) {
+ @Override
public Object getValue(Object value) {
- if (value instanceof String) {
- String strValue = (String) value;
- if ("NONE".equalsIgnoreCase(strValue)) {
- return (byte)0;
- }
- } else {
- return value == null ? null : ((Number) value).byteValue();
- }
- return value;
- }
-
- @Override
- public Object getPTableValue(PTable table) {
- return table.getEncodingScheme();
- }
-
- },
-
+ if (value instanceof String) {
+ String strValue = (String) value;
+ if ("NONE".equalsIgnoreCase(strValue)) {
+ return (byte)0;
+ }
+ } else {
+ return value == null ? null : ((Number) value).byteValue();
+ }
+ return value;
+ }
+
+ @Override
+ public Object getPTableValue(PTable table) {
+ return table.getEncodingScheme();
+ }
+
+ },
+
IMMUTABLE_STORAGE_SCHEME(PhoenixDatabaseMetaData.IMMUTABLE_STORAGE_SCHEME, COLUMN_FAMILY_NOT_ALLOWED_TABLE_PROPERTY, true, false, false) {
@Override
public ImmutableStorageScheme getValue(Object value) {
@@ -203,7 +203,7 @@ public enum TableProperty {
public Object getPTableValue(PTable table) {
return table.getImmutableStorageScheme();
}
-
+
},
USE_STATS_FOR_PARALLELIZATION(PhoenixDatabaseMetaData.USE_STATS_FOR_PARALLELIZATION, true, true, true) {
@@ -224,81 +224,91 @@ public enum TableProperty {
}
}
;
-
- private final String propertyName;
- private final SQLExceptionCode colFamSpecifiedException;
- private final boolean isMutable; // whether or not a property can be changed through statements like ALTER TABLE.
- private final SQLExceptionCode mutatingImmutablePropException;
- private final boolean isValidOnView;
- private final boolean isMutableOnView;
-
- private TableProperty(String propertyName, boolean isMutable, boolean isValidOnView, boolean isMutableOnView) {
- this(propertyName, COLUMN_FAMILY_NOT_ALLOWED_TABLE_PROPERTY, isMutable, CANNOT_ALTER_PROPERTY, isValidOnView, isMutableOnView);
- }
-
- private TableProperty(String propertyName, SQLExceptionCode colFamilySpecifiedException, boolean isMutable, boolean isValidOnView, boolean isMutableOnView) {
- this(propertyName, colFamilySpecifiedException, isMutable, CANNOT_ALTER_PROPERTY, isValidOnView, isMutableOnView);
- }
-
- private TableProperty(String propertyName, boolean isMutable, boolean isValidOnView, boolean isMutableOnView, SQLExceptionCode isMutatingException) {
- this(propertyName, COLUMN_FAMILY_NOT_ALLOWED_TABLE_PROPERTY, isMutable, isMutatingException, isValidOnView, isMutableOnView);
- }
-
- private TableProperty(String propertyName, SQLExceptionCode colFamSpecifiedException, boolean isMutable, SQLExceptionCode mutatingException, boolean isValidOnView, boolean isMutableOnView) {
- this.propertyName = propertyName;
- this.colFamSpecifiedException = colFamSpecifiedException;
- this.isMutable = isMutable;
- this.mutatingImmutablePropException = mutatingException;
- this.isValidOnView = isValidOnView;
- this.isMutableOnView = isMutableOnView;
- }
-
- public static boolean isPhoenixTableProperty(String property) {
- try {
- TableProperty.valueOf(property);
- } catch (IllegalArgumentException e) {
- return false;
- }
- return true;
- }
-
- public Object getValue(Object value) {
- return value;
- }
-
+
+ private final String propertyName;
+ private final SQLExceptionCode colFamSpecifiedException;
+ private final boolean isMutable; // whether or not a property can be changed through statements like ALTER TABLE.
+ private final SQLExceptionCode mutatingImmutablePropException;
+ private final boolean isValidOnView;
+ private final boolean isMutableOnView;
+ private final boolean propagateToViews;
+
+ private TableProperty(String propertyName, boolean isMutable, boolean isValidOnView, boolean isMutableOnView) {
+ this(propertyName, COLUMN_FAMILY_NOT_ALLOWED_TABLE_PROPERTY, isMutable, CANNOT_ALTER_PROPERTY, isValidOnView, isMutableOnView, true);
+ }
+
+ private TableProperty(String propertyName, boolean isMutable, boolean isValidOnView, boolean isMutableOnView, boolean propagateToViews) {
+ this(propertyName, COLUMN_FAMILY_NOT_ALLOWED_TABLE_PROPERTY, isMutable, CANNOT_ALTER_PROPERTY, isValidOnView, isMutableOnView, propagateToViews);
+ }
+
+ private TableProperty(String propertyName, SQLExceptionCode colFamilySpecifiedException, boolean isMutable, boolean isValidOnView, boolean isMutableOnView) {
+ this(propertyName, colFamilySpecifiedException, isMutable, CANNOT_ALTER_PROPERTY, isValidOnView, isMutableOnView, true);
+ }
+
+ private TableProperty(String propertyName, boolean isMutable, boolean isValidOnView, boolean isMutableOnView, SQLExceptionCode isMutatingException) {
+ this(propertyName, COLUMN_FAMILY_NOT_ALLOWED_TABLE_PROPERTY, isMutable, isMutatingException, isValidOnView, isMutableOnView, true);
+ }
+
+ private TableProperty(String propertyName, SQLExceptionCode colFamSpecifiedException, boolean isMutable, SQLExceptionCode mutatingException, boolean isValidOnView, boolean isMutableOnView) {
+ this(propertyName, colFamSpecifiedException, isMutable, mutatingException, isValidOnView, isMutableOnView, true);
+ }
+
+ private TableProperty(String propertyName, SQLExceptionCode colFamSpecifiedException, boolean isMutable, SQLExceptionCode mutatingException, boolean isValidOnView, boolean isMutableOnView, boolean propagateToViews) {
+ this.propertyName = propertyName;
+ this.colFamSpecifiedException = colFamSpecifiedException;
+ this.isMutable = isMutable;
+ this.mutatingImmutablePropException = mutatingException;
+ this.isValidOnView = isValidOnView;
+ this.isMutableOnView = isMutableOnView;
+ this.propagateToViews = propagateToViews;
+ }
+
+ public static boolean isPhoenixTableProperty(String property) {
+ try {
+ TableProperty.valueOf(property);
+ } catch (IllegalArgumentException e) {
+ return false;
+ }
+ return true;
+ }
+
+ public Object getValue(Object value) {
+ return value;
+ }
+
public Object getValue(Map<String, Object> props) {
return getValue(props.get(this.toString()));
}
-
- // isQualified is true if column family name is specified in property name
- public void validate(boolean isMutating, boolean isQualified, PTableType tableType) throws SQLException {
- checkForColumnFamily(isQualified);
- checkIfApplicableForView(tableType);
- checkForMutability(isMutating,tableType);
- }
-
- private void checkForColumnFamily(boolean isQualified) throws SQLException {
- if (isQualified) {
- throw new SQLExceptionInfo.Builder(colFamSpecifiedException).setMessage(". Property: " + propertyName).build().buildException();
- }
- }
-
- private void checkForMutability(boolean isMutating, PTableType tableType) throws SQLException {
- if (isMutating && !isMutable) {
- throw new SQLExceptionInfo.Builder(mutatingImmutablePropException).setMessage(". Property: " + propertyName).build().buildException();
- }
- if (isMutating && tableType == PTableType.VIEW && !isMutableOnView) {
- throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_ALTER_TABLE_PROPERTY_ON_VIEW).setMessage(". Property: " + propertyName).build().buildException();
- }
- }
-
- private void checkIfApplicableForView(PTableType tableType)
- throws SQLException {
- if (tableType == PTableType.VIEW && !isValidOnView) {
- throw new SQLExceptionInfo.Builder(
- VIEW_WITH_PROPERTIES).setMessage("Property: " + propertyName).build().buildException();
- }
- }
+
+ // isQualified is true if column family name is specified in property name
+ public void validate(boolean isMutating, boolean isQualified, PTableType tableType) throws SQLException {
+ checkForColumnFamily(isQualified);
+ checkIfApplicableForView(tableType);
+ checkForMutability(isMutating,tableType);
+ }
+
+ private void checkForColumnFamily(boolean isQualified) throws SQLException {
+ if (isQualified) {
+ throw new SQLExceptionInfo.Builder(colFamSpecifiedException).setMessage(". Property: " + propertyName).build().buildException();
+ }
+ }
+
+ private void checkForMutability(boolean isMutating, PTableType tableType) throws SQLException {
+ if (isMutating && !isMutable) {
+ throw new SQLExceptionInfo.Builder(mutatingImmutablePropException).setMessage(". Property: " + propertyName).build().buildException();
+ }
+ if (isMutating && tableType == PTableType.VIEW && !isMutableOnView) {
+ throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_ALTER_TABLE_PROPERTY_ON_VIEW).setMessage(". Property: " + propertyName).build().buildException();
+ }
+ }
+
+ private void checkIfApplicableForView(PTableType tableType)
+ throws SQLException {
+ if (tableType == PTableType.VIEW && !isValidOnView) {
+ throw new SQLExceptionInfo.Builder(
+ VIEW_WITH_PROPERTIES).setMessage("Property: " + propertyName).build().buildException();
+ }
+ }
public String getPropertyName() {
return propertyName;
@@ -315,7 +325,11 @@ public enum TableProperty {
public boolean isMutableOnView() {
return isMutableOnView;
}
-
+
+ public boolean propagateToViews() {
+ return propagateToViews;
+ }
+
abstract public Object getPTableValue(PTable table);
-
+
}
[4/6] phoenix git commit: PHOENIX-4698 Tolerate orphaned views
(Maddineni Sukumar)
Posted by ja...@apache.org.
PHOENIX-4698 Tolerate orphaned views (Maddineni Sukumar)
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/7096a682
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/7096a682
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/7096a682
Branch: refs/heads/4.x-HBase-1.3
Commit: 7096a682c3bd1f6754b8294c706ebb38c00e0af1
Parents: f9369f8
Author: James Taylor <jt...@salesforce.com>
Authored: Thu Apr 19 14:42:24 2018 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Thu Apr 26 09:51:01 2018 -0700
----------------------------------------------------------------------
.../coprocessor/MetaDataEndpointImpl.java | 50 +++++++++++++-------
1 file changed, 33 insertions(+), 17 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/7096a682/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index c28ad3c..ddd3ffe 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -592,7 +592,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
done.run(builder.build());
return;
} catch (Throwable t) {
- logger.error("getTable failed", t);
+ logger.error("getTable failed", t);
ProtobufUtil.setControllerException(controller,
ServerUtil.createIOException(SchemaUtil.getTableName(schemaName, tableName), t));
}
@@ -755,9 +755,9 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
// compatibility.
Cell sortOrderKv = colKeyValues[SORT_ORDER_INDEX];
SortOrder sortOrder =
- sortOrderKv == null ? SortOrder.getDefault() : SortOrder.fromSystemValue(PInteger.INSTANCE
+ sortOrderKv == null ? SortOrder.getDefault() : SortOrder.fromSystemValue(PInteger.INSTANCE
.getCodec().decodeInt(sortOrderKv.getValueArray(),
- sortOrderKv.getValueOffset(), SortOrder.getDefault()));
+ sortOrderKv.getValueOffset(), SortOrder.getDefault()));
Cell arraySizeKv = colKeyValues[ARRAY_SIZE_INDEX];
Integer arraySize = arraySizeKv == null ? null :
@@ -1313,9 +1313,9 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
return table.getName() == null;
}
- private static boolean isSchemaDeleted(PSchema schema) {
- return schema.getSchemaName() == null;
- }
+ private static boolean isSchemaDeleted(PSchema schema) {
+ return schema.getSchemaName() == null;
+ }
private static boolean isFunctionDeleted(PFunction function) {
return function.getFunctionName() == null;
@@ -1827,6 +1827,13 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
byte[] tableKey = SchemaUtil.getTableKey(viewtenantId, viewSchema, viewTable);
ImmutableBytesPtr cacheKey = new ImmutableBytesPtr(tableKey);
PTable view = loadTable(env, tableKey, cacheKey, clientTimeStamp, clientTimeStamp, clientVersion);
+ if (view == null) {
+ logger.warn("Found orphan tenant view row in SYSTEM.CATALOG with tenantId:"
+ + Bytes.toString(tenantId) + ", schema:"
+ + Bytes.toString(viewSchema) + ", table:"
+ + Bytes.toString(viewTable));
+ continue;
+ }
findAllChildViews(region, viewtenantId, view, result, clientTimeStamp, clientVersion);
}
}
@@ -1969,8 +1976,9 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
if (systemCatalog.getTimeStamp() < MIN_SYSTEM_TABLE_TIMESTAMP_4_11_0) {
return findChildViews_deprecated(region, tenantId, table, PHYSICAL_TABLE_BYTES, stopAfterFirst);
} else {
- return findChildViews_4_11(region, tenantId, table.getSchemaName().getBytes(),
- table.getTableName().getBytes(), stopAfterFirst);
+ return findChildViews_4_11(region, tenantId,
+ table.getSchemaName() == null ? ByteUtil.EMPTY_BYTE_ARRAY : table.getSchemaName().getBytes(),
+ table.getTableName().getBytes(), stopAfterFirst);
}
}
@@ -2548,6 +2556,14 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
// lock the rows corresponding to views so that no other thread can modify the view meta-data
RowLock viewRowLock = acquireLock(region, viewKey, locks);
PTable view = doGetTable(viewKey, clientTimeStamp, viewRowLock, clientVersion);
+ if (view == null) {
+ logger.warn("Found orphan tenant view row in SYSTEM.CATALOG with tenantId:"
+ + Bytes.toString(tenantId) + ", schema:"
+ + Bytes.toString(schema) + ", table:"
+ + Bytes.toString(table));
+ continue;
+ }
+
ColumnOrdinalPositionUpdateList ordinalPositionList = new ColumnOrdinalPositionUpdateList();
List<PColumn> viewPkCols = new ArrayList<>(view.getPKColumns());
boolean addingExistingPkCol = false;
@@ -2705,12 +2721,12 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
for (TableProperty tableProp : TableProperty.values()) {
Cell tablePropertyCell = tablePropertyCellMap.get(tableProp);
if ( tablePropertyCell != null) {
- // set this table property on the view :
- // 1. if it is not mutable on a view (which means the property is always the same as the base table)
- // 2. or if it is mutable on a view and if it doesn't exist on the view
- // 3. or if it is mutable on a view and the property value is the same as the base table property (which means it wasn't changed on the view)
+ // set this table property on the view :
+ // 1. if it is not mutable on a view (which means the property is always the same as the base table)
+ // 2. or if it is mutable on a view and if it doesn't exist on the view
+ // 3. or if it is mutable on a view and the property value is the same as the base table property (which means it wasn't changed on the view)
Object viewProp = tableProp.getPTableValue(view);
- if (!tableProp.isMutableOnView() || viewProp==null || viewProp.equals(tableProp.getPTableValue(basePhysicalTable))) {
+ if (!tableProp.isMutableOnView() || viewProp==null || viewProp.equals(tableProp.getPTableValue(basePhysicalTable))) {
viewHeaderRowPut.add(CellUtil.createCell(viewKey, CellUtil.cloneFamily(tablePropertyCell),
CellUtil.cloneQualifier(tablePropertyCell), clientTimeStamp, tablePropertyCell.getTypeByte(),
CellUtil.cloneValue(tablePropertyCell)));
@@ -2800,10 +2816,10 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
// if switching from from non tx to tx
if (!basePhysicalTable.isTransactional() && switchAttribute(basePhysicalTable, basePhysicalTable.isTransactional(), tableMetadata, TRANSACTIONAL_BYTES)) {
- invalidateList.add(new ImmutableBytesPtr(viewKey));
- Put put = new Put(viewKey);
- put.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
- TRANSACTIONAL_BYTES, clientTimeStamp, PBoolean.INSTANCE.toBytes(true));
+ invalidateList.add(new ImmutableBytesPtr(viewKey));
+ Put put = new Put(viewKey);
+ put.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+ TRANSACTIONAL_BYTES, clientTimeStamp, PBoolean.INSTANCE.toBytes(true));
mutationsForAddingColumnsToViews.add(put);
}
}
[3/6] phoenix git commit: PHOENIX-4694 Prevent locking of parent
table when dropping view to reduce contention
Posted by ja...@apache.org.
PHOENIX-4694 Prevent locking of parent table when dropping view to reduce contention
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/2e177126
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/2e177126
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/2e177126
Branch: refs/heads/4.x-HBase-1.3
Commit: 2e1771261656002729b5943f2c4b2050a7bb8a1b
Parents: 4b7a14c
Author: James Taylor <jt...@salesforce.com>
Authored: Thu Apr 19 15:09:43 2018 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Thu Apr 26 09:51:01 2018 -0700
----------------------------------------------------------------------
.../java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2e177126/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index ddd3ffe..29eee7e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -2007,8 +2007,9 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
}
List<byte[]> tableNamesToDelete = Lists.newArrayList();
List<SharedTableState> sharedTablesToDelete = Lists.newArrayList();
+ // No need to lock parent table for views
byte[] parentTableName = MetaDataUtil.getParentTableName(tableMetadata);
- byte[] lockTableName = parentTableName == null ? tableName : parentTableName;
+ byte[] lockTableName = parentTableName == null || tableType.equals(PTableType.VIEW.getSerializedValue()) ? tableName : parentTableName;
byte[] lockKey = SchemaUtil.getTableKey(tenantIdBytes, schemaName, lockTableName);
byte[] key =
parentTableName == null ? lockKey : SchemaUtil.getTableKey(tenantIdBytes,
[5/6] phoenix git commit: PHOENIX-4686 Phoenix stats does not account
for server side limit push downs (Abhishek Chouhan)
Posted by ja...@apache.org.
PHOENIX-4686 Phoenix stats does not account for server side limit push downs (Abhishek Chouhan)
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/b173aaf7
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/b173aaf7
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/b173aaf7
Branch: refs/heads/4.x-HBase-1.3
Commit: b173aaf79b282c8be1564d2be9981e7c11d40686
Parents: 2e17712
Author: James Taylor <jt...@salesforce.com>
Authored: Thu Apr 26 09:14:52 2018 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Thu Apr 26 09:51:01 2018 -0700
----------------------------------------------------------------------
.../end2end/ExplainPlanWithStatsEnabledIT.java | 49 ++++++++++++++++++-
.../org/apache/phoenix/execute/ScanPlan.java | 4 +-
.../phoenix/iterate/BaseResultIterators.java | 50 +++++++++++++++++---
.../phoenix/iterate/ParallelIterators.java | 8 ++++
.../apache/phoenix/iterate/SerialIterators.java | 13 ++---
.../phoenix/schema/stats/StatisticsUtil.java | 6 +++
6 files changed, 111 insertions(+), 19 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/b173aaf7/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExplainPlanWithStatsEnabledIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExplainPlanWithStatsEnabledIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExplainPlanWithStatsEnabledIT.java
index 2099f4c..abaa2f6 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExplainPlanWithStatsEnabledIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExplainPlanWithStatsEnabledIT.java
@@ -40,6 +40,7 @@ import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
import org.apache.phoenix.schema.PTable;
import org.apache.phoenix.schema.PTableKey;
import org.apache.phoenix.schema.TableNotFoundException;
+import org.apache.phoenix.schema.stats.StatisticsUtil;
import org.apache.phoenix.schema.types.PInteger;
import org.apache.phoenix.util.ByteUtil;
import org.apache.phoenix.util.EnvironmentEdge;
@@ -123,6 +124,50 @@ public class ExplainPlanWithStatsEnabledIT extends ParallelStatsEnabledIT {
}
@Test
+ public void testBytesRowsForPointSelectWithLimitGreaterThanPointLookupSize() throws Exception {
+ String sql = "SELECT * FROM " + tableA + " where k in (? ,?) limit 4";
+ List<Object> binds = Lists.newArrayList();
+ binds.add(103); binds.add(104);
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ Estimate info = getByteRowEstimates(conn, sql, binds);
+ assertEquals((Long) 200L, info.estimatedBytes);
+ assertEquals((Long) 2L, info.estimatedRows);
+ assertEquals((Long) StatisticsUtil.NOT_STATS_BASED_TS, info.estimateInfoTs);
+ }
+ }
+
+ @Test
+ public void testBytesRowsForSelectWithLimit() throws Exception {
+ String sql = "SELECT * FROM " + tableA + " where c1.a in (?,?) limit 3";
+ String noIndexSQL = "SELECT /*+ NO_INDEX */ * FROM " + tableA + " where c1.a in (?,?) limit 3";
+ List<Object> binds = Lists.newArrayList();
+ binds.add(1); binds.add(2);
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ Estimate info = getByteRowEstimates(conn, sql, binds);
+ assertEquals((Long) 264L, info.estimatedBytes);
+ assertEquals((Long) 3L, info.estimatedRows);
+ assertEquals((Long) StatisticsUtil.NOT_STATS_BASED_TS, info.estimateInfoTs);
+
+ info = getByteRowEstimates(conn, noIndexSQL, binds);
+ assertEquals((Long) 634L, info.estimatedBytes);
+ assertEquals((Long) 10L, info.estimatedRows);
+ assertTrue(info.estimateInfoTs > 0);
+ }
+ }
+
+ @Test
+ public void testBytesRowsForSelectWithLimitIgnored() throws Exception {
+ String sql = "SELECT * FROM " + tableA + " where (c1.a > c2.b) limit 1";
+ List<Object> binds = Lists.newArrayList();
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ Estimate info = getByteRowEstimates(conn, sql, binds);
+ assertEquals((Long) 691L, info.estimatedBytes);
+ assertEquals((Long) 10L, info.estimatedRows);
+ assertTrue(info.estimateInfoTs > 0);
+ }
+ }
+
+ @Test
public void testBytesRowsForSelectWhenKeyInRange() throws Exception {
String sql = "SELECT * FROM " + tableB + " where k >= ?";
List<Object> binds = Lists.newArrayList();
@@ -278,7 +323,7 @@ public class ExplainPlanWithStatsEnabledIT extends ParallelStatsEnabledIT {
Estimate info = getByteRowEstimates(conn, sql, binds);
assertEquals((Long) 200L, info.estimatedBytes);
assertEquals((Long) 2L, info.estimatedRows);
- assertTrue(info.estimateInfoTs > 0);
+ assertEquals((Long) StatisticsUtil.NOT_STATS_BASED_TS, info.estimateInfoTs);
}
}
@@ -305,7 +350,7 @@ public class ExplainPlanWithStatsEnabledIT extends ParallelStatsEnabledIT {
Estimate info = getByteRowEstimates(conn, sql, binds);
assertEquals((Long) 176L, info.estimatedBytes);
assertEquals((Long) 2L, info.estimatedRows);
- assertTrue(info.estimateInfoTs > 0);
+ assertEquals((Long) StatisticsUtil.NOT_STATS_BASED_TS, info.estimateInfoTs);
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/b173aaf7/phoenix-core/src/main/java/org/apache/phoenix/execute/ScanPlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/ScanPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/ScanPlan.java
index ed145a4..cdb2da5 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/ScanPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/ScanPlan.java
@@ -68,8 +68,8 @@ import org.apache.phoenix.schema.PTable;
import org.apache.phoenix.schema.PTable.IndexType;
import org.apache.phoenix.schema.SaltingUtil;
import org.apache.phoenix.schema.TableRef;
+import org.apache.phoenix.schema.stats.StatisticsUtil;
import org.apache.phoenix.util.CostUtil;
-import org.apache.phoenix.util.EnvironmentEdgeManager;
import org.apache.phoenix.util.QueryUtil;
import org.apache.phoenix.util.ScanUtil;
import org.apache.phoenix.util.SchemaUtil;
@@ -124,7 +124,7 @@ public class ScanPlan extends BaseQueryPlan {
if (isSerial) {
serialBytesEstimate = estimate.getFirst();
serialRowsEstimate = estimate.getSecond();
- serialEstimateInfoTs = EnvironmentEdgeManager.currentTimeMillis();
+ serialEstimateInfoTs = StatisticsUtil.NOT_STATS_BASED_TS;
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/b173aaf7/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
index 682d1ed..aa9a9f5 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
@@ -60,6 +60,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
import org.apache.hadoop.hbase.filter.PageFilter;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
@@ -76,6 +77,7 @@ import org.apache.phoenix.coprocessor.UngroupedAggregateRegionObserver;
import org.apache.phoenix.exception.SQLExceptionCode;
import org.apache.phoenix.exception.SQLExceptionInfo;
import org.apache.phoenix.execute.MutationState;
+import org.apache.phoenix.filter.BooleanExpressionFilter;
import org.apache.phoenix.filter.ColumnProjectionFilter;
import org.apache.phoenix.filter.DistinctPrefixFilter;
import org.apache.phoenix.filter.EncodedQualifiersColumnProjectionFilter;
@@ -170,6 +172,8 @@ public abstract class BaseResultIterators extends ExplainTable implements Result
return plan.getTableRef().getTable();
}
+ abstract protected boolean isSerial();
+
protected boolean useStats() {
/*
* Don't use guide posts:
@@ -180,7 +184,7 @@ public abstract class BaseResultIterators extends ExplainTable implements Result
if (ScanUtil.isAnalyzeTable(scan)) {
return false;
}
- return true;
+ return !isSerial();
}
private static void initializeScan(QueryPlan plan, Integer perScanLimit, Integer offset, Scan scan) throws SQLException {
@@ -1105,10 +1109,25 @@ public abstract class BaseResultIterators extends ExplainTable implements Result
}
regionIndex++;
}
- if (scanRanges.isPointLookup()) {
- this.estimatedRows = Long.valueOf(scanRanges.getPointLookupCount());
+ if (!scans.isEmpty()) { // Add any remaining scans
+ parallelScans.add(scans);
+ }
+ Long pageLimit = getUnfilteredPageLimit(scan);
+ if (scanRanges.isPointLookup() || pageLimit != null) {
+ // If run in parallel, the limit is pushed to each parallel scan so must be accounted for in all of them
+ int parallelFactor = this.isSerial() ? 1 : parallelScans.size();
+ if (scanRanges.isPointLookup() && pageLimit != null) {
+ this.estimatedRows = Long.valueOf(Math.min(scanRanges.getPointLookupCount(), pageLimit * parallelFactor));
+ } else if (scanRanges.isPointLookup()) {
+ this.estimatedRows = Long.valueOf(scanRanges.getPointLookupCount());
+ } else {
+ this.estimatedRows = Long.valueOf(pageLimit) * parallelFactor;
+ }
this.estimatedSize = this.estimatedRows * SchemaUtil.estimateRowSize(table);
- this.estimateInfoTimestamp = computeMinTimestamp(gpsAvailableForAllRegions, estimates, fallbackTs);
+ // Indication to client that the statistics estimates were not
+ // calculated based on statistics but instead are based on row
+ // limits from the query.
+ this.estimateInfoTimestamp = StatisticsUtil.NOT_STATS_BASED_TS;
} else if (emptyGuidePost) {
// In case of an empty guide post, we estimate the number of rows scanned by
// using the estimated row size
@@ -1124,9 +1143,6 @@ public abstract class BaseResultIterators extends ExplainTable implements Result
this.estimatedSize = null;
this.estimateInfoTimestamp = null;
}
- if (!scans.isEmpty()) { // Add any remaining scans
- parallelScans.add(scans);
- }
} finally {
if (stream != null) Closeables.closeQuietly(stream);
}
@@ -1134,6 +1150,26 @@ public abstract class BaseResultIterators extends ExplainTable implements Result
return parallelScans;
}
+ /**
+ * Return row count limit of PageFilter if exists and there is no where
+ * clause filter.
+ * @return
+ */
+ private static Long getUnfilteredPageLimit(Scan scan) {
+ Long pageLimit = null;
+ Iterator<Filter> filters = ScanUtil.getFilterIterator(scan);
+ while (filters.hasNext()) {
+ Filter filter = filters.next();
+ if (filter instanceof BooleanExpressionFilter) {
+ return null;
+ }
+ if (filter instanceof PageFilter) {
+ pageLimit = ((PageFilter)filter).getPageSize();
+ }
+ }
+ return pageLimit;
+ }
+
private static Long computeMinTimestamp(boolean gpsAvailableForAllRegions,
GuidePostEstimate estimates,
long fallbackTs) {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/b173aaf7/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java
index 3a4b084..41d278d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java
@@ -69,6 +69,14 @@ public class ParallelIterators extends BaseResultIterators {
this(plan, perScanLimit, iteratorFactory, DefaultParallelScanGrouper.getInstance(), scan, initOneScanPerRegion, caches, dataPlan);
}
+ /**
+ * No need to use stats when executing serially
+ */
+ @Override
+ protected boolean isSerial() {
+ return false;
+ }
+
@Override
protected void submitWork(final List<List<Scan>> nestedScans, List<List<Pair<Scan,Future<PeekingResultIterator>>>> nestedFutures,
final Queue<PeekingResultIterator> allIterators, int estFlattenedSize, final boolean isReverse, ParallelScanGrouper scanGrouper) throws SQLException {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/b173aaf7/phoenix-core/src/main/java/org/apache/phoenix/iterate/SerialIterators.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/SerialIterators.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/SerialIterators.java
index f94a7c9..c13fcdb 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/SerialIterators.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/SerialIterators.java
@@ -73,6 +73,11 @@ public class SerialIterators extends BaseResultIterators {
}
@Override
+ protected boolean isSerial() {
+ return true;
+ }
+
+ @Override
protected void submitWork(final List<List<Scan>> nestedScans, List<List<Pair<Scan,Future<PeekingResultIterator>>>> nestedFutures,
final Queue<PeekingResultIterator> allIterators, int estFlattenedSize, boolean isReverse, final ParallelScanGrouper scanGrouper) {
ExecutorService executor = context.getConnection().getQueryServices().getExecutor();
@@ -117,14 +122,6 @@ public class SerialIterators extends BaseResultIterators {
}
}
- /**
- * No need to use stats when executing serially
- */
- @Override
- protected boolean useStats() {
- return false;
- }
-
@Override
protected String getName() {
return NAME;
http://git-wip-us.apache.org/repos/asf/phoenix/blob/b173aaf7/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsUtil.java
index 0b9c409..4a758b7 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsUtil.java
@@ -47,6 +47,12 @@ import com.google.common.collect.Sets;
* Simple utility class for managing multiple key parts of the statistic
*/
public class StatisticsUtil {
+ /**
+ * Indication to client that the statistics estimates were not
+ * calculated based on statistics but instead are based on row
+ * limits from the query.
+ */
+ public static final long NOT_STATS_BASED_TS = 0;
private static final Set<TableName> DISABLE_STATS = Sets.newHashSetWithExpectedSize(8);
// TODO: make this declarative through new DISABLE_STATS column on SYSTEM.CATALOG table.
[6/6] phoenix git commit: PHOENIX-4700 Fix split policy on system
tables other than SYSTEM.CATALOG
Posted by ja...@apache.org.
PHOENIX-4700 Fix split policy on system tables other than SYSTEM.CATALOG
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/4b7a14cd
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/4b7a14cd
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/4b7a14cd
Branch: refs/heads/4.x-HBase-1.3
Commit: 4b7a14cd0dc04c368366c8871531cdb41c7283d5
Parents: 7096a68
Author: James Taylor <jt...@salesforce.com>
Authored: Mon Apr 23 10:14:36 2018 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Thu Apr 26 09:51:01 2018 -0700
----------------------------------------------------------------------
.../apache/phoenix/query/QueryConstants.java | 117 +------------------
.../SplitOnLeadingVarCharColumnsPolicy.java | 43 +++++++
.../schema/SystemFunctionSplitPolicy.java | 27 +++++
.../phoenix/schema/SystemStatsSplitPolicy.java | 27 +++++
.../phoenix/schema/SystemSplitPolicyTest.java | 97 +++++++++++++++
5 files changed, 200 insertions(+), 111 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/4b7a14cd/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java
index 22fa2f4..4285334 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java
@@ -18,112 +18,7 @@
package org.apache.phoenix.query;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.APPEND_ONLY_SCHEMA;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.ARG_POSITION;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.ARRAY_SIZE;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.AUTO_PARTITION_SEQ;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.BASE_COLUMN_COUNT;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.BUFFER_LENGTH;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.CACHE_SIZE;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.CHAR_OCTET_LENGTH;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.CLASS_NAME;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_COUNT;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_DEF;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_FAMILY;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_NAME;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_QUALIFIER;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_QUALIFIER_COUNTER;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_SIZE;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.CURRENT_VALUE;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.CYCLE_FLAG;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DATA_TABLE_NAME;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DATA_TYPE;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DECIMAL_DIGITS;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DEFAULT_COLUMN_FAMILY_NAME;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DEFAULT_VALUE;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DISABLE_WAL;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.ENCODING_SCHEME;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.FUNCTION_NAME;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.GUIDE_POSTS_ROW_COUNT;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.GUIDE_POSTS_WIDTH;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.GUIDE_POST_KEY;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.IMMUTABLE_ROWS;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.IMMUTABLE_STORAGE_SCHEME;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.INCREMENT_BY;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.INDEX_STATE;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.INDEX_TYPE;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.IS_ARRAY;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.IS_AUTOINCREMENT;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.IS_CONSTANT;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.IS_NAMESPACE_MAPPED;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.IS_NULLABLE;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.IS_ROW_TIMESTAMP;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.IS_VIEW_REFERENCED;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.JAR_PATH;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.KEY_SEQ;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.LAST_STATS_UPDATE_TIME;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.LIMIT_REACHED_FLAG;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.LINK_TYPE;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.MAX_VALUE;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.MIN_VALUE;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.MULTI_TENANT;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.NULLABLE;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.NUM_ARGS;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.NUM_PREC_RADIX;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.ORDINAL_POSITION;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.PHYSICAL_NAME;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.PK_NAME;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.REF_GENERATION;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.REMARKS;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.RETURN_TYPE;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SALT_BUCKETS;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SCOPE_CATALOG;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SCOPE_SCHEMA;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SCOPE_TABLE;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SELF_REFERENCING_COL_NAME;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SEQUENCE_NAME;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SEQUENCE_SCHEMA;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SORT_ORDER;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SOURCE_DATA_TYPE;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SQL_DATA_TYPE;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SQL_DATETIME_SUB;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.START_WITH;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.STORE_NULLS;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_FUNCTION_TABLE;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_STATS_TABLE;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_NAME;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_SCHEM;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_SEQ_NUM;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_TYPE;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TENANT_ID;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TRANSACTIONAL;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TRANSACTION_PROVIDER;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TYPE;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TYPE_NAME;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TYPE_SEQUENCE;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.UPDATE_CACHE_FREQUENCY;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.USE_STATS_FOR_PARALLELIZATION;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_CONSTANT;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_INDEX_ID;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_STATEMENT;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_TYPE;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.BIND_PARAMETERS;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.CLIENT_IP;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.EXCEPTION_TRACE;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.EXPLAIN_PLAN;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.GLOBAL_SCAN_DETAILS;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.NO_OF_RESULTS_ITERATED;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.QUERY;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.QUERY_ID;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.QUERY_STATUS;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SCAN_METRICS_JSON;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.START_TIME;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TOTAL_EXECUTION_TIME;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.USER;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_LOG_TABLE;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.*;
import java.math.BigDecimal;
@@ -139,6 +34,8 @@ import org.apache.phoenix.schema.PName;
import org.apache.phoenix.schema.PNameFactory;
import org.apache.phoenix.schema.PTable.QualifierEncodingScheme;
import org.apache.phoenix.schema.SortOrder;
+import org.apache.phoenix.schema.SystemFunctionSplitPolicy;
+import org.apache.phoenix.schema.SystemStatsSplitPolicy;
import org.apache.phoenix.schema.TableProperty;
@@ -362,7 +259,7 @@ public interface QueryConstants {
+ PHYSICAL_NAME + ","
+ COLUMN_FAMILY + ","+ GUIDE_POST_KEY+"))\n" +
// Install split policy to prevent a physical table's stats from being split across regions.
- HTableDescriptor.SPLIT_POLICY + "='" + MetaDataSplitPolicy.class.getName() + "',\n" +
+ HTableDescriptor.SPLIT_POLICY + "='" + SystemStatsSplitPolicy.class.getName() + "',\n" +
PhoenixDatabaseMetaData.TRANSACTIONAL + "=" + Boolean.FALSE;
public static final String CREATE_SEQUENCE_METADATA =
@@ -406,7 +303,7 @@ public interface QueryConstants {
HConstants.VERSIONS + "=%s,\n" +
HColumnDescriptor.KEEP_DELETED_CELLS + "=%s,\n"+
// Install split policy to prevent a tenant's metadata from being split across regions.
- HTableDescriptor.SPLIT_POLICY + "='" + MetaDataSplitPolicy.class.getName() + "',\n" +
+ HTableDescriptor.SPLIT_POLICY + "='" + SystemFunctionSplitPolicy.class.getName() + "',\n" +
PhoenixDatabaseMetaData.TRANSACTIONAL + "=" + Boolean.FALSE;
public static final String CREATE_LOG_METADATA =
@@ -431,9 +328,7 @@ public interface QueryConstants {
" CONSTRAINT " + SYSTEM_TABLE_PK_NAME + " PRIMARY KEY (QUERY_ID))\n" +
HConstants.VERSIONS + "= " + MetaDataProtocol.DEFAULT_LOG_VERSIONS + ",\n" +
HColumnDescriptor.KEEP_DELETED_CELLS + "=%s,\n"+
- // Install split policy to prevent a tenant's metadata from being split across regions.
- HTableDescriptor.SPLIT_POLICY + "='" + MetaDataSplitPolicy.class.getName() + "',\n" +
- PhoenixDatabaseMetaData.TRANSACTIONAL + "=" + Boolean.FALSE+ ",\n" +
+ PhoenixDatabaseMetaData.TRANSACTIONAL + "=" + Boolean.FALSE+ ",\n" +
HColumnDescriptor.TTL + "=" + MetaDataProtocol.DEFAULT_LOG_TTL+",\n"+
TableProperty.COLUMN_ENCODED_BYTES.toString()+" = 0";
http://git-wip-us.apache.org/repos/asf/phoenix/blob/4b7a14cd/phoenix-core/src/main/java/org/apache/phoenix/schema/SplitOnLeadingVarCharColumnsPolicy.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/SplitOnLeadingVarCharColumnsPolicy.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/SplitOnLeadingVarCharColumnsPolicy.java
new file mode 100644
index 0000000..d481998
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/SplitOnLeadingVarCharColumnsPolicy.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.schema;
+
+import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
+import org.apache.phoenix.util.SchemaUtil;
+
+public abstract class SplitOnLeadingVarCharColumnsPolicy extends ConstantSizeRegionSplitPolicy {
+ abstract protected int getColumnToSplitAt();
+
+ protected final byte[] getSplitPoint(byte[] splitPoint) {
+ int offset = SchemaUtil.getVarCharLength(splitPoint, 0, splitPoint.length, getColumnToSplitAt());
+ // Only split between leading columns indicated.
+ if (offset == splitPoint.length) {
+ return splitPoint;
+ }
+ // Otherwise, an attempt is being made to split in the middle of a table.
+ // Just return a split point at the boundary of the first two columns instead
+ byte[] newSplitPoint = new byte[offset + 1];
+ System.arraycopy(splitPoint, 0, newSplitPoint, 0, offset+1);
+ return newSplitPoint;
+ }
+
+ @Override
+ protected final byte[] getSplitPoint() {
+ return getSplitPoint(super.getSplitPoint());
+ }
+}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/4b7a14cd/phoenix-core/src/main/java/org/apache/phoenix/schema/SystemFunctionSplitPolicy.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/SystemFunctionSplitPolicy.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/SystemFunctionSplitPolicy.java
new file mode 100644
index 0000000..58e1f9f
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/SystemFunctionSplitPolicy.java
@@ -0,0 +1,27 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.schema;
+
+public class SystemFunctionSplitPolicy extends SplitOnLeadingVarCharColumnsPolicy {
+
+ @Override
+ protected int getColumnToSplitAt() {
+ return 2;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/4b7a14cd/phoenix-core/src/main/java/org/apache/phoenix/schema/SystemStatsSplitPolicy.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/SystemStatsSplitPolicy.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/SystemStatsSplitPolicy.java
new file mode 100644
index 0000000..69fe8aa
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/SystemStatsSplitPolicy.java
@@ -0,0 +1,27 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.schema;
+
+public class SystemStatsSplitPolicy extends SplitOnLeadingVarCharColumnsPolicy {
+
+ @Override
+ protected int getColumnToSplitAt() {
+ return 1;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/4b7a14cd/phoenix-core/src/test/java/org/apache/phoenix/schema/SystemSplitPolicyTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/schema/SystemSplitPolicyTest.java b/phoenix-core/src/test/java/org/apache/phoenix/schema/SystemSplitPolicyTest.java
new file mode 100644
index 0000000..01074b4
--- /dev/null
+++ b/phoenix-core/src/test/java/org/apache/phoenix/schema/SystemSplitPolicyTest.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.schema;
+
+import static org.junit.Assert.assertArrayEquals;
+
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.query.QueryConstants;
+import org.apache.phoenix.schema.stats.StatisticsUtil;
+import org.apache.phoenix.schema.types.PInteger;
+import org.apache.phoenix.schema.types.PLong;
+import org.apache.phoenix.schema.types.PVarchar;
+import org.apache.phoenix.util.ByteUtil;
+import org.junit.Test;
+
+public class SystemSplitPolicyTest {
+ @Test
+ public void testStatsSplitPolicy() {
+ SplitOnLeadingVarCharColumnsPolicy policy = new SystemStatsSplitPolicy();
+ byte[] splitOn;
+ byte[] rowKey;
+ byte[] table;
+ ImmutableBytesWritable family;
+ table = PVarchar.INSTANCE.toBytes("FOO.BAR");
+ family = QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES_PTR;
+ rowKey = ByteUtil.concat(
+ PLong.INSTANCE.toBytes(20L),
+ PVarchar.INSTANCE.toBytes("BAS"),
+ QueryConstants.SEPARATOR_BYTE_ARRAY,
+ PInteger.INSTANCE.toBytes(100));
+ splitOn = StatisticsUtil.getRowKey(table, family, rowKey);
+ splitOn = policy.getSplitPoint(splitOn);
+ assertArrayEquals(ByteUtil.concat(table, QueryConstants.SEPARATOR_BYTE_ARRAY), splitOn);
+
+ table = PVarchar.INSTANCE.toBytes("MY_TABLE");
+ family = new ImmutableBytesWritable(Bytes.toBytes("ABC"));
+ rowKey = ByteUtil.concat(
+ PVarchar.INSTANCE.toBytes("BAS"),
+ QueryConstants.SEPARATOR_BYTE_ARRAY,
+ PInteger.INSTANCE.toBytes(100),
+ PLong.INSTANCE.toBytes(20L));
+ splitOn = StatisticsUtil.getRowKey(table, family, rowKey);
+ splitOn = policy.getSplitPoint(splitOn);
+ assertArrayEquals(ByteUtil.concat(table, QueryConstants.SEPARATOR_BYTE_ARRAY), splitOn);
+ }
+
+ private static byte[] getSystemFunctionRowKey(String tenantId, String funcName, String typeName, byte[] argPos) {
+ return ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId),
+ QueryConstants.SEPARATOR_BYTE_ARRAY,
+ PVarchar.INSTANCE.toBytes(funcName),
+ QueryConstants.SEPARATOR_BYTE_ARRAY,
+ PVarchar.INSTANCE.toBytes(typeName),
+ QueryConstants.SEPARATOR_BYTE_ARRAY,
+ argPos
+ );
+ }
+
+ private static byte[] getSystemFunctionSplitKey(String tenantId, String funcName) {
+ return ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId),
+ QueryConstants.SEPARATOR_BYTE_ARRAY,
+ PVarchar.INSTANCE.toBytes(funcName),
+ QueryConstants.SEPARATOR_BYTE_ARRAY);
+ }
+
+ @Test
+ public void testFunctionSplitPolicy() {
+ SplitOnLeadingVarCharColumnsPolicy policy = new SystemFunctionSplitPolicy();
+ byte[] splitPoint;
+ byte[] rowKey;
+ byte[] expectedSplitPoint;
+ rowKey = getSystemFunctionRowKey("","MY_FUNC", "VARCHAR", Bytes.toBytes(3));
+ expectedSplitPoint = getSystemFunctionSplitKey("","MY_FUNC");
+ splitPoint = policy.getSplitPoint(rowKey);
+ assertArrayEquals(expectedSplitPoint, splitPoint);
+
+ rowKey = getSystemFunctionRowKey("TENANT1","F", "", Bytes.toBytes(3));
+ expectedSplitPoint = getSystemFunctionSplitKey("TENANT1","F");
+ splitPoint = policy.getSplitPoint(rowKey);
+ assertArrayEquals(expectedSplitPoint, splitPoint);
+ }
+}