You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by ja...@apache.org on 2018/09/24 15:28:07 UTC
[02/50] [abbrv] phoenix git commit: PHOENIX-3534 Support multi region
SYSTEM.CATALOG table (Thomas D'Silva and Rahul Gidwani)
http://git-wip-us.apache.org/repos/asf/phoenix/blob/93fdd5ba/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
index b127408..9d5583b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
@@ -82,12 +82,12 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.regionserver.LocalIndexSplitter;
import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
@@ -96,6 +96,9 @@ import org.apache.phoenix.coprocessor.MetaDataEndpointImpl;
import org.apache.phoenix.coprocessor.MetaDataProtocol;
import org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult;
import org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode;
+import org.apache.phoenix.coprocessor.TableInfo;
+import org.apache.phoenix.coprocessor.TableViewFinderResult;
+import org.apache.phoenix.coprocessor.ViewFinder;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
import org.apache.phoenix.query.QueryConstants;
@@ -175,11 +178,6 @@ public class UpgradeUtil {
private static final String DELETE_LINK = "DELETE FROM " + SYSTEM_CATALOG_SCHEMA + "." + SYSTEM_CATALOG_TABLE
+ " WHERE (" + TABLE_SCHEM + "=? OR (" + TABLE_SCHEM + " IS NULL AND ? IS NULL)) AND " + TABLE_NAME + "=? AND " + COLUMN_FAMILY + "=? AND " + LINK_TYPE + " = " + LinkType.PHYSICAL_TABLE.getSerializedValue();
- private static final String GET_VIEWS_QUERY = "SELECT " + TENANT_ID + "," + TABLE_SCHEM + "," + TABLE_NAME
- + " FROM " + SYSTEM_CATALOG_SCHEMA + "." + SYSTEM_CATALOG_TABLE + " WHERE " + COLUMN_FAMILY + " = ? AND "
- + LINK_TYPE + " = " + LinkType.PHYSICAL_TABLE.getSerializedValue() + " AND ( " + TABLE_TYPE + "=" + "'"
- + PTableType.VIEW.getSerializedValue() + "' OR " + TABLE_TYPE + " IS NULL) ORDER BY "+TENANT_ID;
-
private UpgradeUtil() {
}
@@ -225,8 +223,8 @@ public class UpgradeUtil {
scan.setRaw(true);
scan.setMaxVersions();
ResultScanner scanner = null;
- HTableInterface source = null;
- HTableInterface target = null;
+ Table source = null;
+ Table target = null;
try {
source = conn.getQueryServices().getTable(sourceName);
target = conn.getQueryServices().getTable(targetName);
@@ -646,7 +644,7 @@ public class UpgradeUtil {
logger.info("Upgrading SYSTEM.SEQUENCE table");
byte[] seqTableKey = SchemaUtil.getTableKey(null, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_SCHEMA, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_TABLE);
- HTableInterface sysTable = conn.getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
+ Table sysTable = conn.getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
try {
logger.info("Setting SALT_BUCKETS property of SYSTEM.SEQUENCE to " + SaltingUtil.MAX_BUCKET_NUM);
KeyValue saltKV = KeyValueUtil.newKeyValue(seqTableKey,
@@ -699,7 +697,7 @@ public class UpgradeUtil {
Scan scan = new Scan();
scan.setRaw(true);
scan.setMaxVersions();
- HTableInterface seqTable = conn.getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES);
+ Table seqTable = conn.getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES);
try {
boolean committed = false;
logger.info("Adding salt byte to all SYSTEM.SEQUENCE rows");
@@ -1149,6 +1147,78 @@ public class UpgradeUtil {
}
}
+ /**
+ * Move child links form SYSTEM.CATALOG to SYSTEM.CHILD_LINK
+ * @param oldMetaConnection caller should take care of closing the passed connection appropriately
+ * @throws SQLException
+ */
+ public static void moveChildLinks(PhoenixConnection oldMetaConnection) throws SQLException {
+ PhoenixConnection metaConnection = null;
+ try {
+ // Need to use own connection with max time stamp to be able to read all data from SYSTEM.CATALOG
+ metaConnection = new PhoenixConnection(oldMetaConnection, HConstants.LATEST_TIMESTAMP);
+ logger.info("Upgrading metadata to add parent to child links for views");
+ metaConnection.commit();
+ String createChildLink = "UPSERT INTO SYSTEM.CHILD_LINK(TENANT_ID, TABLE_SCHEM, TABLE_NAME, COLUMN_NAME, COLUMN_FAMILY, LINK_TYPE) " +
+ "SELECT TENANT_ID, TABLE_SCHEM, TABLE_NAME, COLUMN_NAME, COLUMN_FAMILY, LINK_TYPE " +
+ "FROM SYSTEM.CATALOG " +
+ "WHERE LINK_TYPE = 4";
+ metaConnection.createStatement().execute(createChildLink);
+ metaConnection.commit();
+ String deleteChildLink = "DELETE FROM SYSTEM.CATALOG WHERE LINK_TYPE = 4 ";
+ metaConnection.createStatement().execute(deleteChildLink);
+ metaConnection.commit();
+ metaConnection.getQueryServices().clearCache();
+ } finally {
+ if (metaConnection != null) {
+ metaConnection.close();
+ }
+ }
+ }
+
+ public static void addViewIndexToParentLinks(PhoenixConnection oldMetaConnection) throws SQLException {
+ // Need to use own connection with max time stamp to be able to read all data from SYSTEM.CATALOG
+ try (PhoenixConnection queryConn = new PhoenixConnection(oldMetaConnection, HConstants.LATEST_TIMESTAMP);
+ PhoenixConnection upsertConn = new PhoenixConnection(oldMetaConnection, HConstants.LATEST_TIMESTAMP)) {
+ logger.info("Upgrading metadata to add parent links for indexes on views");
+ String indexQuery = "SELECT TENANT_ID, TABLE_SCHEM, TABLE_NAME, COLUMN_FAMILY FROM SYSTEM.CATALOG WHERE LINK_TYPE = "
+ + LinkType.INDEX_TABLE.getSerializedValue();
+ String createViewIndexLink = "UPSERT INTO SYSTEM.CATALOG (TENANT_ID, TABLE_SCHEM, TABLE_NAME, COLUMN_FAMILY, LINK_TYPE) VALUES (?,?,?,?,?) ";
+ ResultSet rs = queryConn.createStatement().executeQuery(indexQuery);
+ String prevTenantId = null;
+ PhoenixConnection metaConn = queryConn;
+ Properties props = new Properties(queryConn.getClientInfo());
+ props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(HConstants.LATEST_TIMESTAMP));
+ while (rs.next()) {
+ String tenantId = rs.getString("TENANT_ID");
+ if (prevTenantId != tenantId) {
+ prevTenantId = tenantId;
+ props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId);
+ metaConn = new PhoenixConnection(oldMetaConnection, props);
+ }
+ String schemaName = rs.getString("TABLE_SCHEM");
+ String parentTableName = rs.getString("TABLE_NAME");
+ String fullParentTableName = SchemaUtil.getTableName(schemaName, parentTableName);
+ String indexName = rs.getString("COLUMN_FAMILY");
+ PTable table = PhoenixRuntime.getTable(metaConn, fullParentTableName);
+ if (table==null) {
+ throw new TableNotFoundException(fullParentTableName);
+ }
+ if (table.getType().equals(PTableType.VIEW)) {
+ PreparedStatement prepareStatement = upsertConn.prepareStatement(createViewIndexLink);
+ prepareStatement.setString(1, tenantId);
+ prepareStatement.setString(2, schemaName);
+ prepareStatement.setString(3, indexName);
+ prepareStatement.setString(4, parentTableName);
+ prepareStatement.setByte(5, LinkType.VIEW_INDEX_PARENT_TABLE.getSerializedValue());
+ prepareStatement.execute();
+ upsertConn.commit();
+ }
+ }
+ queryConn.getQueryServices().clearCache();
+ }
+ }
+
private static void upsertBaseColumnCountInHeaderRow(PhoenixConnection metaConnection,
String tenantId, String schemaName, String viewOrTableName, int baseColumnCount)
throws SQLException {
@@ -1667,7 +1737,7 @@ public class UpgradeUtil {
tableMetadata.add(put);
}
- public static boolean truncateStats(HTableInterface metaTable, HTableInterface statsTable)
+ public static boolean truncateStats(Table metaTable, Table statsTable)
throws IOException, InterruptedException {
byte[] statsTableKey = SchemaUtil.getTableKey(null, PhoenixDatabaseMetaData.SYSTEM_SCHEMA_NAME,
PhoenixDatabaseMetaData.SYSTEM_STATS_TABLE);
@@ -1728,7 +1798,7 @@ public class UpgradeUtil {
return false;
}
- private static void mapTableToNamespace(HBaseAdmin admin, HTableInterface metatable, String srcTableName,
+ private static void mapTableToNamespace(HBaseAdmin admin, Table metatable, String srcTableName,
String destTableName, ReadOnlyProps props, Long ts, String phoenixTableName, PTableType pTableType,PName tenantId)
throws SnapshotCreationException, IllegalArgumentException, IOException, InterruptedException,
SQLException {
@@ -1791,7 +1861,7 @@ public class UpgradeUtil {
* Method to map existing phoenix table to a namespace. Should not be use if tables has views and indexes ,instead
* use map table utility in psql.py
*/
- public static void mapTableToNamespace(HBaseAdmin admin, HTableInterface metatable, String tableName,
+ public static void mapTableToNamespace(HBaseAdmin admin, Table metatable, String tableName,
ReadOnlyProps props, Long ts, PTableType pTableType, PName tenantId) throws SnapshotCreationException,
IllegalArgumentException, IOException, InterruptedException, SQLException {
String destTablename = SchemaUtil
@@ -1808,14 +1878,15 @@ public class UpgradeUtil {
readOnlyProps)) { throw new IllegalArgumentException(
QueryServices.IS_NAMESPACE_MAPPING_ENABLED + " is not enabled!!"); }
try (HBaseAdmin admin = conn.getQueryServices().getAdmin();
- HTableInterface metatable = conn.getQueryServices()
+ Table metatable = conn.getQueryServices()
.getTable(SchemaUtil
.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, readOnlyProps)
.getName());) {
- String tableName = SchemaUtil.normalizeIdentifier(srcTable);
- String schemaName = SchemaUtil.getSchemaNameFromFullName(tableName);
+ String fullTableName = SchemaUtil.normalizeIdentifier(srcTable);
+ String schemaName = SchemaUtil.getSchemaNameFromFullName(fullTableName);
+ String tableName = SchemaUtil.getTableNameFromFullName(fullTableName);
// Confirm table is not already upgraded
- PTable table = PhoenixRuntime.getTable(conn, tableName);
+ PTable table = PhoenixRuntime.getTable(conn, fullTableName);
// Upgrade is not required if schemaName is not present.
if (schemaName.equals("") && !PTableType.VIEW
@@ -1829,21 +1900,38 @@ public class UpgradeUtil {
String oldPhysicalName = table.getPhysicalName().getString();
String newPhysicalTablename = SchemaUtil.normalizeIdentifier(
SchemaUtil.getPhysicalTableName(oldPhysicalName, readOnlyProps).getNameAsString());
- logger.info(String.format("Upgrading %s %s..", table.getType(), tableName));
+ logger.info(String.format("Upgrading %s %s..", table.getType(), fullTableName));
logger.info(String.format("oldPhysicalName %s newPhysicalTablename %s..", oldPhysicalName, newPhysicalTablename));
logger.info(String.format("teanantId %s..", conn.getTenantId()));
+
+ TableViewFinderResult childViewsResult = new TableViewFinderResult();
+ try (Table childLinkTable =
+ conn.getQueryServices()
+ .getTable(SchemaUtil.getPhysicalName(
+ PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME_BYTES, readOnlyProps)
+ .getName())) {
+ byte[] tenantId = conn.getTenantId() != null ? conn.getTenantId().getBytes() : null;
+ ViewFinder.findAllRelatives(childLinkTable, tenantId, schemaName.getBytes(),
+ tableName.getBytes(), LinkType.CHILD_TABLE, childViewsResult);
+ }
+
// Upgrade the data or main table
- mapTableToNamespace(admin, metatable, tableName, newPhysicalTablename, readOnlyProps,
- PhoenixRuntime.getCurrentScn(readOnlyProps), tableName, table.getType(),conn.getTenantId());
+ mapTableToNamespace(admin, metatable, fullTableName, newPhysicalTablename, readOnlyProps,
+ PhoenixRuntime.getCurrentScn(readOnlyProps), fullTableName, table.getType(),conn.getTenantId());
// clear the cache and get new table
+ conn.removeTable(conn.getTenantId(), fullTableName,
+ table.getParentName() != null ? table.getParentName().getString() : null,
+ table.getTimeStamp());
+ byte[] tenantIdBytes = conn.getTenantId() == null ? ByteUtil.EMPTY_BYTE_ARRAY : conn.getTenantId().getBytes();
conn.getQueryServices().clearTableFromCache(
- conn.getTenantId() == null ? ByteUtil.EMPTY_BYTE_ARRAY : conn.getTenantId().getBytes(),
+ tenantIdBytes,
table.getSchemaName().getBytes(), table.getTableName().getBytes(),
PhoenixRuntime.getCurrentScn(readOnlyProps));
- MetaDataMutationResult result = new MetaDataClient(conn).updateCache(conn.getTenantId(),schemaName,
- SchemaUtil.getTableNameFromFullName(tableName),true);
+ MetaDataMutationResult result =
+ new MetaDataClient(conn).updateCache(conn.getTenantId(), schemaName, tableName,
+ true);
if (result.getMutationCode() != MutationCode.TABLE_ALREADY_EXISTS) { throw new TableNotFoundException(
- schemaName, tableName); }
+ schemaName, fullTableName); }
table = result.getTable();
// check whether table is properly upgraded before upgrading indexes
@@ -1893,13 +1981,12 @@ public class UpgradeUtil {
conn.commit();
}
conn.getQueryServices().clearTableFromCache(
- conn.getTenantId() == null ? ByteUtil.EMPTY_BYTE_ARRAY : conn.getTenantId().getBytes(),
+ tenantIdBytes,
index.getSchemaName().getBytes(), index.getTableName().getBytes(),
PhoenixRuntime.getCurrentScn(readOnlyProps));
}
updateIndexesSequenceIfPresent(conn, table);
conn.commit();
-
} else {
throw new RuntimeException("Error: problem occured during upgrade. Table is not upgraded successfully");
}
@@ -1907,12 +1994,32 @@ public class UpgradeUtil {
logger.info(String.format("Updating link information for view '%s' ..", table.getTableName()));
updateLink(conn, oldPhysicalName, newPhysicalTablename,table.getSchemaName(),table.getTableName());
conn.commit();
-
+
+ // if the view is a first level child, then we need to create the PARENT_TABLE link
+ // that was overwritten by the PHYSICAL_TABLE link
+ if (table.getParentName().equals(table.getPhysicalName())) {
+ logger.info(String.format("Creating PARENT link for view '%s' ..", table.getTableName()));
+ // Add row linking view to its parent
+ PreparedStatement linkStatement = conn.prepareStatement(MetaDataClient.CREATE_VIEW_LINK);
+ linkStatement.setString(1, Bytes.toStringBinary(tenantIdBytes));
+ linkStatement.setString(2, table.getSchemaName().getString());
+ linkStatement.setString(3, table.getTableName().getString());
+ linkStatement.setString(4, table.getParentName().getString());
+ linkStatement.setByte(5, LinkType.PARENT_TABLE.getSerializedValue());
+ linkStatement.setString(6, null);
+ linkStatement.execute();
+ conn.commit();
+ }
+
conn.getQueryServices().clearTableFromCache(
- conn.getTenantId() == null ? ByteUtil.EMPTY_BYTE_ARRAY : conn.getTenantId().getBytes(),
+ tenantIdBytes,
table.getSchemaName().getBytes(), table.getTableName().getBytes(),
PhoenixRuntime.getCurrentScn(readOnlyProps));
}
+ // Upgrade all child views
+ if (table.getType() == PTableType.TABLE) {
+ mapChildViewsToNamespace(conn.getURL(), conn.getClientInfo(), childViewsResult.getLinks());
+ }
}
}
@@ -1940,7 +2047,7 @@ public class UpgradeUtil {
private static void updateLink(PhoenixConnection conn, String srcTableName,
String destTableName, PName schemaName, PName tableName) throws SQLException {
String updateLinkSql = String.format(UPDATE_LINK, destTableName);
- boolean hasTenantId = conn.getTenantId() != null;
+ boolean hasTenantId = conn.getTenantId() != null && conn.getTenantId().getBytes().length!=0;
if (hasTenantId) {
updateLinkSql += " AND TENANT_ID = ? ";
}
@@ -1968,36 +2075,29 @@ public class UpgradeUtil {
deleteLinkStatment.execute();
}
- public static void mapChildViewsToNamespace(PhoenixConnection conn, String table, Properties props)
+ private static void mapChildViewsToNamespace(String connUrl, Properties props, List<TableInfo> viewInfoList)
throws SQLException, SnapshotCreationException, IllegalArgumentException, IOException,
InterruptedException {
- PreparedStatement preparedStatment = conn.prepareStatement(GET_VIEWS_QUERY);
- preparedStatment.setString(1, SchemaUtil.normalizeIdentifier(table));
- ResultSet rs = preparedStatment.executeQuery();
String tenantId = null;
String prevTenantId = null;
- PhoenixConnection passedConn = conn;
- while (rs.next()) {
- tenantId = rs.getString(1);
+ PhoenixConnection conn = null;
+ for (TableInfo viewInfo : viewInfoList) {
+ tenantId = viewInfo.getTenantId()!=null ? Bytes.toString(viewInfo.getTenantId()) : null;
+ String viewName = SchemaUtil.getTableName(viewInfo.getSchemaName(), viewInfo.getTableName());
if (prevTenantId != tenantId) {
if (tenantId != null) {
props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId);
} else {
props.remove(PhoenixRuntime.TENANT_ID_ATTRIB);
}
- if (passedConn != conn) {
+ if (conn!=null)
conn.close();
- }
- conn = DriverManager.getConnection(conn.getURL(), props).unwrap(PhoenixConnection.class);
+ conn = DriverManager.getConnection(connUrl, props).unwrap(PhoenixConnection.class);
}
- String viewName=SchemaUtil.getTableName(rs.getString(2), rs.getString(3));
logger.info(String.format("Upgrading view %s for tenantId %s..", viewName,tenantId));
UpgradeUtil.upgradeTable(conn, viewName);
prevTenantId = tenantId;
}
- if (passedConn != conn) {
- conn.close();
- }
}
public static final String getSysCatalogSnapshotName(long currentSystemTableTimestamp) {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/93fdd5ba/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java b/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
index 0ff17d3..154dd7a 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
@@ -1253,7 +1253,7 @@ public class QueryCompilerTest extends BaseConnectionlessQueryTest {
}
}
-
+
@Test
public void testDuplicatePKColumn() throws Exception {
String ddl = "CREATE TABLE t (k1 VARCHAR, k1 VARCHAR CONSTRAINT pk PRIMARY KEY(k1))";
http://git-wip-us.apache.org/repos/asf/phoenix/blob/93fdd5ba/phoenix-core/src/test/java/org/apache/phoenix/coprocessor/MetaDataEndpointImplTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/coprocessor/MetaDataEndpointImplTest.java b/phoenix-core/src/test/java/org/apache/phoenix/coprocessor/MetaDataEndpointImplTest.java
new file mode 100644
index 0000000..cb41191
--- /dev/null
+++ b/phoenix-core/src/test/java/org/apache/phoenix/coprocessor/MetaDataEndpointImplTest.java
@@ -0,0 +1,299 @@
+package org.apache.phoenix.coprocessor;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.phoenix.end2end.ParallelStatsDisabledIT;
+import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
+import org.apache.phoenix.schema.PColumn;
+import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.TableNotFoundException;
+import org.apache.phoenix.util.PhoenixRuntime;
+import org.junit.Test;
+
+import com.google.common.base.Joiner;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+public class MetaDataEndpointImplTest extends ParallelStatsDisabledIT {
+ private final TableName catalogTable = TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
+ private final TableName linkTable = TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME_BYTES);
+
+ /*
+ The tree structure is as follows: Where ParentTable is the Base Table
+ and all children are views and child views respectively.
+
+ ParentTable
+ / \
+ leftChild rightChild
+ /
+ leftGrandChild
+ */
+
+ @Test
+ public void testGettingChildrenAndParentViews() throws Exception {
+ String baseTable = generateUniqueName();
+ String leftChild = generateUniqueName();
+ String rightChild = generateUniqueName();
+ String leftGrandChild = generateUniqueName();
+ Connection conn = DriverManager.getConnection(getUrl());
+ String ddlFormat =
+ "CREATE TABLE IF NOT EXISTS " + baseTable + " (" + " PK2 VARCHAR NOT NULL, V1 VARCHAR, V2 VARCHAR "
+ + " CONSTRAINT NAME_PK PRIMARY KEY (PK2)" + " )";
+ conn.createStatement().execute(ddlFormat);
+
+ conn.createStatement().execute("CREATE VIEW " + rightChild + " AS SELECT * FROM " + baseTable);
+ conn.createStatement().execute("CREATE VIEW " + leftChild + " (carrier VARCHAR) AS SELECT * FROM " + baseTable);
+ conn.createStatement().execute("CREATE VIEW " + leftGrandChild + " (dropped_calls BIGINT) AS SELECT * FROM " + leftChild);
+
+ PTable table = PhoenixRuntime.getTable(conn, baseTable.toUpperCase());
+ PTable rightChildTable = PhoenixRuntime.getTable(conn, rightChild.toUpperCase());
+ System.err.println(rightChildTable);
+
+ TableViewFinderResult childViews = new TableViewFinderResult();
+ ViewFinder.findAllRelatives(getTable(linkTable), HConstants.EMPTY_BYTE_ARRAY, table.getSchemaName().getBytes(),
+ table.getTableName().getBytes(), PTable.LinkType.CHILD_TABLE, childViews);
+ assertEquals(3, childViews.getLinks().size());
+
+ PTable childMostView = PhoenixRuntime.getTable(conn , leftGrandChild.toUpperCase());
+ TableViewFinderResult parentViews = new TableViewFinderResult();
+ ViewFinder
+ .findAllRelatives(getTable(catalogTable), HConstants.EMPTY_BYTE_ARRAY, childMostView.getSchemaName().getBytes(),
+ childMostView.getTableName().getBytes(), PTable.LinkType.PARENT_TABLE, parentViews);
+ // returns back everything but the parent table - should only return back the left_child and not the right child
+ assertEquals(1, parentViews.getLinks().size());
+ // now lets check and make sure the columns are correct
+ assertColumnNamesEqual(PhoenixRuntime.getTable(conn, childMostView.getName().getString()), "PK2", "V1", "V2", "CARRIER", "DROPPED_CALLS");
+
+ }
+
+ @Test
+ public void testGettingOneChild() throws Exception {
+ String baseTable = generateUniqueName();
+ String leftChild = generateUniqueName();
+ Connection conn = DriverManager.getConnection(getUrl());
+ String ddlFormat =
+ "CREATE TABLE IF NOT EXISTS " + baseTable + " (" + " PK2 VARCHAR NOT NULL, V1 VARCHAR, V2 VARCHAR "
+ + " CONSTRAINT NAME_PK PRIMARY KEY (PK2)" + " )";
+ conn.createStatement().execute(ddlFormat);
+ conn.createStatement().execute("CREATE VIEW " + leftChild + " (carrier VARCHAR) AS SELECT * FROM " + baseTable);
+
+
+ // now lets check and make sure the columns are correct
+ assertColumnNamesEqual(PhoenixRuntime.getTable(conn, leftChild.toUpperCase()), "PK2", "V1", "V2", "CARRIER");
+ }
+
+ @Test
+ public void testDroppingADerivedColumn() throws Exception {
+ String baseTable = generateUniqueName();
+ String childView = generateUniqueName();
+ Connection conn = DriverManager.getConnection(getUrl());
+ String ddlFormat = "CREATE TABLE " + baseTable + " (A VARCHAR PRIMARY KEY, B VARCHAR, C VARCHAR)";
+ conn.createStatement().execute(ddlFormat);
+ conn.createStatement().execute("CREATE VIEW " + childView + " (D VARCHAR) AS SELECT * FROM " + baseTable);
+ assertColumnNamesEqual(PhoenixRuntime.getTable(conn, childView.toUpperCase()), "A", "B", "C", "D");
+ conn.createStatement().execute("ALTER VIEW " + childView + " DROP COLUMN C");
+
+ // now lets check and make sure the columns are correct
+ assertColumnNamesEqual(PhoenixRuntime.getTableNoCache(conn, childView.toUpperCase()), "A", "B", "D");
+
+ }
+
+ @Test
+ public void testDroppingAColumn() throws Exception {
+ String baseTable = generateUniqueName();
+ String childView = generateUniqueName();
+ Connection conn = DriverManager.getConnection(getUrl());
+ String ddlFormat = "CREATE TABLE " + baseTable + " (A VARCHAR PRIMARY KEY, B VARCHAR, C VARCHAR)";
+ conn.createStatement().execute(ddlFormat);
+ conn.createStatement().execute("CREATE VIEW " + childView + " (D VARCHAR) AS SELECT * FROM " + baseTable);
+ assertColumnNamesEqual(PhoenixRuntime.getTable(conn, childView.toUpperCase()), "A", "B", "C", "D");
+ conn.createStatement().execute("ALTER TABLE " + baseTable + " DROP COLUMN C");
+
+ // now lets check and make sure the columns are correct
+ assertColumnNamesEqual(PhoenixRuntime.getTableNoCache(conn, childView.toUpperCase()), "A", "B", "D");
+ }
+
+ @Test
+ public void testAlteringBaseColumns() throws Exception {
+ String baseTable = generateUniqueName();
+ String leftChild = generateUniqueName();
+ Connection conn = DriverManager.getConnection(getUrl());
+ String ddlFormat =
+ "CREATE TABLE IF NOT EXISTS " + baseTable + " (" + " PK2 VARCHAR NOT NULL, V1 VARCHAR, V2 VARCHAR "
+ + " CONSTRAINT NAME_PK PRIMARY KEY (PK2)" + " )";
+ conn.createStatement().execute(ddlFormat);
+ conn.createStatement().execute("CREATE VIEW " + leftChild + " (carrier VARCHAR) AS SELECT * FROM " + baseTable);
+
+ // now lets check and make sure the columns are correct
+ PTable childPTable = PhoenixRuntime.getTable(conn, leftChild.toUpperCase());
+ assertColumnNamesEqual(childPTable, "PK2", "V1", "V2", "CARRIER");
+
+ // now lets alter the base table by adding a column
+ conn.createStatement().execute("ALTER TABLE " + baseTable + " ADD V3 integer");
+
+ // make sure that column was added to the base table
+ PTable table = PhoenixRuntime.getTableNoCache(conn, baseTable.toUpperCase());
+ assertColumnNamesEqual(table, "PK2", "V1", "V2", "V3");
+
+
+ childPTable = PhoenixRuntime.getTableNoCache(conn, leftChild.toUpperCase());
+ assertColumnNamesEqual(childPTable, "PK2", "V1", "V2", "V3", "CARRIER");
+ }
+
+ @Test
+ public void testAddingAColumnWithADifferentDefinition() throws Exception {
+ String baseTable = generateUniqueName();
+ String view = generateUniqueName();
+ Connection conn = DriverManager.getConnection(getUrl());
+ String ddlFormat =
+ "CREATE TABLE IF NOT EXISTS " + baseTable + " (" + " PK2 VARCHAR NOT NULL, V1 VARCHAR, V2 VARCHAR "
+ + " CONSTRAINT NAME_PK PRIMARY KEY (PK2)" + " )";
+ conn.createStatement().execute(ddlFormat);
+ conn.createStatement().execute("CREATE VIEW " + view + " (carrier BIGINT) AS SELECT * FROM " + baseTable);
+ Map<String, String> expected = new ImmutableMap.Builder<String, String>()
+ .put("PK2", "VARCHAR")
+ .put("V1", "VARCHAR")
+ .put("V2", "VARCHAR")
+ .put("CARRIER", "BIGINT")
+ .build();
+
+ assertColumnNamesAndDefinitionsEqual(PhoenixRuntime.getTable(conn , view.toUpperCase()), expected);
+ try {
+ conn.createStatement().execute("ALTER TABLE " + baseTable + " ADD carrier VARCHAR");
+ }
+ catch(SQLException e) {
+ assertEquals(SQLExceptionCode.CANNOT_MUTATE_TABLE.getErrorCode(), e.getErrorCode());
+ }
+
+ Map<String, String> expectedBaseTableColumns = new ImmutableMap.Builder<String, String>()
+ .put("PK2", "VARCHAR")
+ .put("V1", "VARCHAR")
+ .put("V2", "VARCHAR")
+ .build();
+
+ assertColumnNamesAndDefinitionsEqual(PhoenixRuntime.getTable(conn , baseTable.toUpperCase()), expectedBaseTableColumns);
+
+ // the view column "CARRIER" should still be unchanged
+ Map<String, String> expectedViewColumnDefinition = new ImmutableMap.Builder<String, String>()
+ .put("PK2", "VARCHAR")
+ .put("V1", "VARCHAR")
+ .put("V2", "VARCHAR")
+ .put("CARRIER", "BIGINT")
+ .build();
+
+ assertColumnNamesAndDefinitionsEqual(PhoenixRuntime.getTable(conn , view.toUpperCase()), expectedViewColumnDefinition);
+ }
+
+ public void testDropCascade() throws Exception {
+ String baseTable = generateUniqueName();
+ String child = generateUniqueName();
+ String grandChild = generateUniqueName();
+ Connection conn = DriverManager.getConnection(getUrl());
+ String ddlFormat =
+ "CREATE TABLE IF NOT EXISTS " + baseTable + " (" + " PK2 VARCHAR NOT NULL, V1 VARCHAR, V2 VARCHAR "
+ + " CONSTRAINT NAME_PK PRIMARY KEY (PK2)" + " )";
+ conn.createStatement().execute(ddlFormat);
+ conn.createStatement().execute("CREATE VIEW " + child + " (A VARCHAR) AS SELECT * FROM " + baseTable);
+ conn.createStatement().execute("CREATE VIEW " + grandChild + " (B VARCHAR) AS SELECT * FROM " + child);
+
+ PTable childMostView = PhoenixRuntime.getTable(conn , child.toUpperCase());
+ // now lets check and make sure the columns are correct
+ PTable grandChildPTable = PhoenixRuntime.getTable(conn, childMostView.getName().getString());
+ assertColumnNamesEqual(grandChildPTable, "PK2", "V1", "V2", "A");
+
+ // now lets drop the parent table
+ conn.createStatement().execute("DROP TABLE " + baseTable + " CASCADE");
+
+ // the tables should no longer exist
+ try {
+ PhoenixRuntime.getTableNoCache(conn, baseTable);
+ fail();
+ }
+ catch(TableNotFoundException e){}
+ try {
+ PhoenixRuntime.getTableNoCache(conn, child);
+ fail();
+ }
+ catch(TableNotFoundException e){}
+ try {
+ PhoenixRuntime.getTableNoCache(conn, grandChild);
+ fail();
+ }
+ catch(TableNotFoundException e){}
+ }
+
+ @Test
+ public void testWhereClause() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ String baseTableName = generateUniqueName();
+ String childViewName = generateUniqueName();
+ String grandChildViewName = generateUniqueName();
+ String baseTableDdl = "CREATE TABLE " + baseTableName + " (" +
+ "A0 CHAR(1) NOT NULL PRIMARY KEY," +
+ "A1 CHAR(1), A2 CHAR (1))";
+ conn.createStatement().execute(baseTableDdl);
+ conn.createStatement().execute(
+ "CREATE VIEW " + childViewName + " AS SELECT * FROM " + baseTableName + " WHERE A1 = 'X'");
+ conn.createStatement().execute(
+ "CREATE VIEW " + grandChildViewName + " AS SELECT * FROM " + childViewName + " WHERE A2 = 'Y'");
+
+ PTable childViewTable = PhoenixRuntime.getTableNoCache(conn, childViewName);
+ PTable grandChildViewTable = PhoenixRuntime.getTableNoCache(conn, grandChildViewName);
+
+ assertNotNull(childViewTable.getColumnForColumnName("A1").getViewConstant());
+ assertNotNull(grandChildViewTable.getColumnForColumnName("A1").getViewConstant());
+ assertNotNull(grandChildViewTable.getColumnForColumnName("A2").getViewConstant());
+ }
+
+ private void assertColumnNamesEqual(PTable table, String... cols) {
+ List<String> actual = Lists.newArrayList();
+ for (PColumn column : table.getColumns()) {
+ actual.add(column.getName().getString().trim());
+ }
+ List<String> expected = Arrays.asList(cols);
+ assertEquals(Joiner.on(", ").join(expected), Joiner.on(", ").join(actual));
+ }
+
+ private void assertColumnNamesAndDefinitionsEqual(PTable table, Map<String, String> expected) {
+ Map<String, String> actual = Maps.newHashMap();
+ for (PColumn column : table.getColumns()) {
+ actual.put(column.getName().getString().trim(), column.getDataType().getSqlTypeName());
+ }
+ assertEquals(expected, actual);
+ }
+
+ private HTable getTable(TableName catalogTable) throws IOException {
+ return new HTable(utility.getConfiguration(), catalogTable);
+ }
+
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/phoenix/blob/93fdd5ba/phoenix-core/src/test/java/org/apache/phoenix/execute/CorrelatePlanTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/execute/CorrelatePlanTest.java b/phoenix-core/src/test/java/org/apache/phoenix/execute/CorrelatePlanTest.java
index d88a915..2e881b8 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/execute/CorrelatePlanTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/execute/CorrelatePlanTest.java
@@ -30,6 +30,7 @@ import java.util.Arrays;
import java.util.Collections;
import java.util.List;
+import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
@@ -254,7 +255,7 @@ public class CorrelatePlanTest {
PName colName = PNameFactory.newName(name);
columns.add(new PColumnImpl(PNameFactory.newName(name), PNameFactory.newName(VALUE_COLUMN_FAMILY),
expr.getDataType(), expr.getMaxLength(), expr.getScale(), expr.isNullable(),
- i, expr.getSortOrder(), null, null, false, name, false, false, colName.getBytes()));
+ i, expr.getSortOrder(), null, null, false, name, false, false, colName.getBytes(), HConstants.LATEST_TIMESTAMP));
}
try {
PTable pTable = PTableImpl.makePTable(null, PName.EMPTY_NAME, PName.EMPTY_NAME,
http://git-wip-us.apache.org/repos/asf/phoenix/blob/93fdd5ba/phoenix-core/src/test/java/org/apache/phoenix/execute/LiteralResultIteratorPlanTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/execute/LiteralResultIteratorPlanTest.java b/phoenix-core/src/test/java/org/apache/phoenix/execute/LiteralResultIteratorPlanTest.java
index 017e6c8..6bf298e 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/execute/LiteralResultIteratorPlanTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/execute/LiteralResultIteratorPlanTest.java
@@ -30,6 +30,7 @@ import java.sql.SQLException;
import java.util.Collections;
import java.util.List;
+import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
@@ -176,7 +177,8 @@ public class LiteralResultIteratorPlanTest {
PName colName = PNameFactory.newName(name);
columns.add(new PColumnImpl(PNameFactory.newName(name),
PNameFactory.newName(VALUE_COLUMN_FAMILY), expr.getDataType(), expr.getMaxLength(),
- expr.getScale(), expr.isNullable(), i, expr.getSortOrder(), null, null, false, name, false, false, colName.getBytes()));
+ expr.getScale(), expr.isNullable(), i, expr.getSortOrder(), null, null, false, name, false, false, colName.getBytes(),
+ HConstants.LATEST_TIMESTAMP));
}
try {
PTable pTable = PTableImpl.makePTable(null, PName.EMPTY_NAME, PName.EMPTY_NAME, PTableType.SUBQUERY, null,
http://git-wip-us.apache.org/repos/asf/phoenix/blob/93fdd5ba/phoenix-core/src/test/java/org/apache/phoenix/execute/UnnestArrayPlanTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/execute/UnnestArrayPlanTest.java b/phoenix-core/src/test/java/org/apache/phoenix/execute/UnnestArrayPlanTest.java
index 195c2f0..4808213 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/execute/UnnestArrayPlanTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/execute/UnnestArrayPlanTest.java
@@ -31,6 +31,7 @@ import java.sql.SQLException;
import java.util.Arrays;
import java.util.List;
+import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.phoenix.compile.ColumnResolver;
@@ -120,9 +121,11 @@ public class UnnestArrayPlanTest {
RowKeyValueAccessor accessor = new RowKeyValueAccessor(Arrays.asList(dummy), 0);
UnnestArrayPlan plan = new UnnestArrayPlan(subPlan, new RowKeyColumnExpression(dummy, accessor), withOrdinality);
PName colName = PNameFactory.newName("ELEM");
- PColumn elemColumn = new PColumnImpl(PNameFactory.newName("ELEM"), PNameFactory.newName(VALUE_COLUMN_FAMILY), baseType, null, null, true, 0, SortOrder.getDefault(), null, null, false, "", false, false, colName.getBytes());
+ PColumn elemColumn = new PColumnImpl(PNameFactory.newName("ELEM"), PNameFactory.newName(VALUE_COLUMN_FAMILY), baseType, null, null, true, 0, SortOrder.getDefault(), null, null, false, "", false, false, colName.getBytes(),
+ HConstants.LATEST_TIMESTAMP);
colName = PNameFactory.newName("IDX");
- PColumn indexColumn = withOrdinality ? new PColumnImpl(colName, PNameFactory.newName(VALUE_COLUMN_FAMILY), PInteger.INSTANCE, null, null, true, 0, SortOrder.getDefault(), null, null, false, "", false, false, colName.getBytes()) : null;
+ PColumn indexColumn = withOrdinality ? new PColumnImpl(colName, PNameFactory.newName(VALUE_COLUMN_FAMILY), PInteger.INSTANCE, null, null, true, 0, SortOrder.getDefault(), null, null, false, "", false, false, colName.getBytes(),
+ HConstants.LATEST_TIMESTAMP) : null;
List<PColumn> columns = withOrdinality ? Arrays.asList(elemColumn, indexColumn) : Arrays.asList(elemColumn);
ProjectedColumnExpression elemExpr = new ProjectedColumnExpression(elemColumn, columns, 0, elemColumn.getName().getString());
ProjectedColumnExpression indexExpr = withOrdinality ? new ProjectedColumnExpression(indexColumn, columns, 1, indexColumn.getName().getString()) : null;
http://git-wip-us.apache.org/repos/asf/phoenix/blob/93fdd5ba/phoenix-core/src/test/java/org/apache/phoenix/expression/ColumnExpressionTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/expression/ColumnExpressionTest.java b/phoenix-core/src/test/java/org/apache/phoenix/expression/ColumnExpressionTest.java
index 2788235..0856e79 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/expression/ColumnExpressionTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/expression/ColumnExpressionTest.java
@@ -25,6 +25,7 @@ import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
+import org.apache.hadoop.hbase.HConstants;
import org.apache.phoenix.schema.PColumn;
import org.apache.phoenix.schema.PColumnImpl;
import org.apache.phoenix.schema.PName;
@@ -43,7 +44,7 @@ public class ColumnExpressionTest {
int scale = 5;
PName colName = PNameFactory.newName("c1");
PColumn column = new PColumnImpl(colName, PNameFactory.newName("f1"), PDecimal.INSTANCE, maxLen, scale,
- true, 20, SortOrder.getDefault(), 0, null, false, null, false, false, colName.getBytes());
+ true, 20, SortOrder.getDefault(), 0, null, false, null, false, false, colName.getBytes(), HConstants.LATEST_TIMESTAMP);
ColumnExpression colExp = new KeyValueColumnExpression(column);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
DataOutputStream dOut = new DataOutputStream(baos);
@@ -64,7 +65,7 @@ public class ColumnExpressionTest {
int maxLen = 30;
PName colName = PNameFactory.newName("c1");
PColumn column = new PColumnImpl(colName, PNameFactory.newName("f1"), PBinary.INSTANCE, maxLen, null,
- true, 20, SortOrder.getDefault(), 0, null, false, null, false, false, colName.getBytes());
+ true, 20, SortOrder.getDefault(), 0, null, false, null, false, false, colName.getBytes(), HConstants.LATEST_TIMESTAMP);
ColumnExpression colExp = new KeyValueColumnExpression(column);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
DataOutputStream dOut = new DataOutputStream(baos);
@@ -85,7 +86,7 @@ public class ColumnExpressionTest {
int scale = 5;
PName colName = PNameFactory.newName("c1");
PColumn column = new PColumnImpl(colName, PNameFactory.newName("f1"), PVarchar.INSTANCE, null, scale,
- true, 20, SortOrder.getDefault(), 0, null, false, null, false, false, colName.getBytes());
+ true, 20, SortOrder.getDefault(), 0, null, false, null, false, false, colName.getBytes(), HConstants.LATEST_TIMESTAMP);
ColumnExpression colExp = new KeyValueColumnExpression(column);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
DataOutputStream dOut = new DataOutputStream(baos);
@@ -105,7 +106,7 @@ public class ColumnExpressionTest {
public void testSerializationWithNullScaleAndMaxLength() throws Exception {
PName colName = PNameFactory.newName("c1");
PColumn column = new PColumnImpl(colName, PNameFactory.newName("f1"), PDecimal.INSTANCE, null, null, true,
- 20, SortOrder.getDefault(), 0, null, false, null, false, false, colName.getBytes());
+ 20, SortOrder.getDefault(), 0, null, false, null, false, false, colName.getBytes(), HConstants.LATEST_TIMESTAMP);
ColumnExpression colExp = new KeyValueColumnExpression(column);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
DataOutputStream dOut = new DataOutputStream(baos);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/93fdd5ba/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
index f49d291..45b61c1 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
@@ -90,7 +90,12 @@ import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Types;
+import java.util.ArrayDeque;
+import java.util.ArrayList;
import java.util.Arrays;
+import java.util.Collections;
+import java.util.Deque;
+import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
@@ -112,13 +117,20 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.IntegrationTestingUtility;
+import org.apache.hadoop.hbase.MiniHBaseCluster;
+import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.ipc.PhoenixRpcSchedulerFactory;
+import org.apache.hadoop.hbase.master.AssignmentManager;
+import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.regionserver.RSRpcServices;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
import org.apache.phoenix.end2end.BaseHBaseManagedTimeIT;
import org.apache.phoenix.exception.SQLExceptionCode;
import org.apache.phoenix.exception.SQLExceptionInfo;
@@ -138,6 +150,7 @@ import org.apache.phoenix.util.QueryUtil;
import org.apache.phoenix.util.ReadOnlyProps;
import org.apache.phoenix.util.SchemaUtil;
import org.apache.phoenix.util.ServerUtil.ConnectionFactory;
+import org.apache.phoenix.util.TestUtil;
import org.junit.ClassRule;
import org.junit.rules.TemporaryFolder;
import org.slf4j.Logger;
@@ -145,6 +158,7 @@ import org.slf4j.LoggerFactory;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
@@ -218,26 +232,26 @@ public abstract class BaseTest {
" CONSTRAINT pk PRIMARY KEY (organization_id, entity_id)\n" +
") ");
builder.put(TABLE_WITH_ARRAY, "create table "
- + TABLE_WITH_ARRAY
- + " (organization_id char(15) not null, \n"
- + " entity_id char(15) not null,\n"
- + " a_string_array varchar(100) array[],\n"
- + " b_string varchar(100),\n"
- + " a_integer integer,\n"
- + " a_date date,\n"
- + " a_time time,\n"
- + " a_timestamp timestamp,\n"
- + " x_decimal decimal(31,10),\n"
- + " x_long_array bigint array[],\n"
- + " x_integer integer,\n"
- + " a_byte_array tinyint array[],\n"
- + " a_short smallint,\n"
- + " a_float float,\n"
- + " a_double_array double array[],\n"
- + " a_unsigned_float unsigned_float,\n"
- + " a_unsigned_double unsigned_double \n"
- + " CONSTRAINT pk PRIMARY KEY (organization_id, entity_id)\n"
- + ")");
+ + TABLE_WITH_ARRAY
+ + " (organization_id char(15) not null, \n"
+ + " entity_id char(15) not null,\n"
+ + " a_string_array varchar(100) array[],\n"
+ + " b_string varchar(100),\n"
+ + " a_integer integer,\n"
+ + " a_date date,\n"
+ + " a_time time,\n"
+ + " a_timestamp timestamp,\n"
+ + " x_decimal decimal(31,10),\n"
+ + " x_long_array bigint array[],\n"
+ + " x_integer integer,\n"
+ + " a_byte_array tinyint array[],\n"
+ + " a_short smallint,\n"
+ + " a_float float,\n"
+ + " a_double_array double array[],\n"
+ + " a_unsigned_float unsigned_float,\n"
+ + " a_unsigned_double unsigned_double \n"
+ + " CONSTRAINT pk PRIMARY KEY (organization_id, entity_id)\n"
+ + ")");
builder.put(BTABLE_NAME,"create table " + BTABLE_NAME +
" (a_string varchar not null, \n" +
" a_id char(3) not null,\n" +
@@ -388,7 +402,7 @@ public abstract class BaseTest {
protected static String url;
protected static PhoenixTestDriver driver;
protected static boolean clusterInitialized = false;
- private static HBaseTestingUtility utility;
+ protected static HBaseTestingUtility utility;
protected static final Configuration config = HBaseConfiguration.create();
private static class TearDownMiniClusterThreadFactory implements ThreadFactory {
@@ -720,9 +734,9 @@ public abstract class BaseTest {
throw new IllegalStateException("Used up all unique names");
}
TABLE_COUNTER.incrementAndGet();
- return "T" + Integer.toString(MAX_SUFFIX_VALUE + nextName).substring(1);
+ return "N" + Integer.toString(MAX_SUFFIX_VALUE + nextName).substring(1);
}
-
+
private static AtomicInteger SEQ_NAME_SUFFIX = new AtomicInteger(0);
private static final int MAX_SEQ_SUFFIX_VALUE = 1000000;
@@ -872,12 +886,18 @@ public abstract class BaseTest {
// Make sure all tables and views have been dropped
props.remove(CURRENT_SCN_ATTRIB);
try (Connection seeLatestConn = DriverManager.getConnection(url, props)) {
- DatabaseMetaData dbmd = seeLatestConn.getMetaData();
- ResultSet rs = dbmd.getTables(null, null, null, new String[]{PTableType.VIEW.toString(), PTableType.TABLE.toString()});
- boolean hasTables = rs.next();
- if (hasTables) {
- fail("The following tables are not deleted that should be:" + getTableNames(rs));
- }
+ DatabaseMetaData dbmd = seeLatestConn.getMetaData();
+ ResultSet rs = dbmd.getTables(null, null, null, new String[]{PTableType.VIEW.toString(), PTableType.TABLE.toString()});
+ while (rs.next()) {
+ String fullTableName = SchemaUtil.getEscapedTableName(
+ rs.getString(PhoenixDatabaseMetaData.TABLE_SCHEM),
+ rs.getString(PhoenixDatabaseMetaData.TABLE_NAME));
+ try {
+ PhoenixRuntime.getTable(conn, fullTableName);
+ fail("The following tables are not deleted that should be:" + getTableNames(rs));
+ } catch (TableNotFoundException e) {
+ }
+ }
}
}
finally {
@@ -926,12 +946,12 @@ public abstract class BaseTest {
}
private static String getTableNames(ResultSet rs) throws SQLException {
- StringBuilder buf = new StringBuilder();
- do {
- buf.append(" ");
- buf.append(SchemaUtil.getTableName(rs.getString(PhoenixDatabaseMetaData.TABLE_SCHEM), rs.getString(PhoenixDatabaseMetaData.TABLE_NAME)));
- } while (rs.next());
- return buf.toString();
+ StringBuilder buf = new StringBuilder();
+ do {
+ buf.append(" ");
+ buf.append(SchemaUtil.getTableName(rs.getString(PhoenixDatabaseMetaData.TABLE_SCHEM), rs.getString(PhoenixDatabaseMetaData.TABLE_NAME)));
+ } while (rs.next());
+ return buf.toString();
}
private static String getSchemaNames(ResultSet rs) throws SQLException {
@@ -1281,10 +1301,10 @@ public abstract class BaseTest {
}
private static String initEntityHistoryTableValues(String tableName, String tenantId, byte[][] splits, Date date, Long ts, String url) throws Exception {
- if (tableName == null) {
- tableName = generateUniqueName();
- }
-
+ if (tableName == null) {
+ tableName = generateUniqueName();
+ }
+
if (ts == null) {
ensureTableCreated(url, tableName, ENTITY_HISTORY_TABLE_NAME, splits, null);
} else {
@@ -1391,10 +1411,10 @@ public abstract class BaseTest {
}
protected static String initSaltedEntityHistoryTableValues(String tableName, String tenantId, byte[][] splits, Date date, Long ts, String url) throws Exception {
- if (tableName == null) {
- tableName = generateUniqueName();
- }
-
+ if (tableName == null) {
+ tableName = generateUniqueName();
+ }
+
if (ts == null) {
ensureTableCreated(url, tableName, ENTITY_HISTORY_SALTED_TABLE_NAME, splits, null);
} else {
@@ -1613,42 +1633,42 @@ public abstract class BaseTest {
}
public static void upsertRows(Connection conn, String fullTableName, int numRows) throws SQLException {
- for (int i=1; i<=numRows; ++i) {
- upsertRow(conn, fullTableName, i, false);
- }
+ for (int i=1; i<=numRows; ++i) {
+ upsertRow(conn, fullTableName, i, false);
+ }
}
public static void upsertRow(Connection conn, String fullTableName, int index, boolean firstRowInBatch) throws SQLException {
- String upsert = "UPSERT INTO " + fullTableName
+ String upsert = "UPSERT INTO " + fullTableName
+ " VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)";
- PreparedStatement stmt = conn.prepareStatement(upsert);
- stmt.setString(1, firstRowInBatch ? "firstRowInBatch_" : "" + "varchar"+index);
- stmt.setString(2, "char"+index);
- stmt.setInt(3, index);
- stmt.setLong(4, index);
- stmt.setBigDecimal(5, new BigDecimal(index));
- Date date = DateUtil.parseDate("2015-01-01 00:00:00");
- stmt.setDate(6, date);
- stmt.setString(7, "varchar_a");
- stmt.setString(8, "chara");
- stmt.setInt(9, index+1);
- stmt.setLong(10, index+1);
- stmt.setBigDecimal(11, new BigDecimal(index+1));
- stmt.setDate(12, date);
- stmt.setString(13, "varchar_b");
- stmt.setString(14, "charb");
- stmt.setInt(15, index+2);
- stmt.setLong(16, index+2);
- stmt.setBigDecimal(17, new BigDecimal(index+2));
- stmt.setDate(18, date);
- stmt.executeUpdate();
- }
+ PreparedStatement stmt = conn.prepareStatement(upsert);
+ stmt.setString(1, firstRowInBatch ? "firstRowInBatch_" : "" + "varchar"+index);
+ stmt.setString(2, "char"+index);
+ stmt.setInt(3, index);
+ stmt.setLong(4, index);
+ stmt.setBigDecimal(5, new BigDecimal(index));
+ Date date = DateUtil.parseDate("2015-01-01 00:00:00");
+ stmt.setDate(6, date);
+ stmt.setString(7, "varchar_a");
+ stmt.setString(8, "chara");
+ stmt.setInt(9, index+1);
+ stmt.setLong(10, index+1);
+ stmt.setBigDecimal(11, new BigDecimal(index+1));
+ stmt.setDate(12, date);
+ stmt.setString(13, "varchar_b");
+ stmt.setString(14, "charb");
+ stmt.setInt(15, index+2);
+ stmt.setLong(16, index+2);
+ stmt.setBigDecimal(17, new BigDecimal(index+2));
+ stmt.setDate(18, date);
+ stmt.executeUpdate();
+ }
// Populate the test table with data.
public static void populateTestTable(String fullTableName) throws SQLException {
Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
- upsertRows(conn, fullTableName, 3);
+ upsertRows(conn, fullTableName, 3);
conn.commit();
}
}
@@ -1758,4 +1778,156 @@ public abstract class BaseTest {
}
phxConn.close();
}
+
+
+ /**
+ * Split SYSTEM.CATALOG at the given split point
+ */
+ protected static void splitRegion(byte[] splitPoint) throws SQLException, IOException, InterruptedException {
+ HBaseAdmin admin =
+ driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin();
+ admin.split(PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME, splitPoint);
+ // make sure the split finishes (there's no synchronous splitting before HBase 2.x)
+ admin.disableTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME);
+ admin.enableTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME);
+ }
+
+ /**
+ * Returns true if the region contains atleast one of the metadata rows we are interested in
+ */
+ protected static boolean regionContainsMetadataRows(HRegionInfo regionInfo,
+ List<byte[]> metadataRowKeys) {
+ for (byte[] rowKey : metadataRowKeys) {
+ if (regionInfo.containsRow(rowKey)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ /**
+ * Splits SYSTEM.CATALOG into multiple regions based on the table or view names passed in.
+ * Metadata for each table or view is moved to a separate region,
+ * @param tenantToTableAndViewMap map from tenant to tables and views owned by the tenant
+ */
+ protected static void splitSystemCatalog(Map<String, List<String>> tenantToTableAndViewMap) throws Exception {
+ List<byte[]> splitPoints = Lists.newArrayListWithExpectedSize(5);
+ // add the rows keys of the table or view metadata rows
+ Set<String> schemaNameSet=Sets.newHashSetWithExpectedSize(15);
+ for (Entry<String, List<String>> entrySet : tenantToTableAndViewMap.entrySet()) {
+ String tenantId = entrySet.getKey();
+ for (String fullName : entrySet.getValue()) {
+ String schemaName = SchemaUtil.getSchemaNameFromFullName(fullName);
+ // we don't allow SYSTEM.CATALOG to split within a schema, so to ensure each table
+ // or view is on a separate region they need to have a unique tenant and schema name
+ assertTrue("Schema names of tables/view must be unique ", schemaNameSet.add(tenantId+"."+schemaName));
+ String tableName = SchemaUtil.getTableNameFromFullName(fullName);
+ splitPoints.add(
+ SchemaUtil.getTableKey(tenantId, "".equals(schemaName) ? null : schemaName, tableName));
+ }
+ }
+ Collections.sort(splitPoints, Bytes.BYTES_COMPARATOR);
+
+ HBaseAdmin admin =
+ driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin();
+ assertTrue("Needs at least two split points ", splitPoints.size() > 1);
+ assertTrue(
+ "Number of split points should be less than or equal to the number of region servers ",
+ splitPoints.size() <= NUM_SLAVES_BASE);
+ HBaseTestingUtility util = getUtility();
+ MiniHBaseCluster cluster = util.getHBaseCluster();
+ HMaster master = cluster.getMaster();
+ AssignmentManager am = master.getAssignmentManager();
+ // No need to split on the first splitPoint since the end key of region boundaries are exclusive
+ for (int i=1; i<splitPoints.size(); ++i) {
+ splitRegion(splitPoints.get(i));
+ }
+ HashMap<ServerName, List<HRegionInfo>> serverToRegionsList = Maps.newHashMapWithExpectedSize(NUM_SLAVES_BASE);
+ Deque<ServerName> availableRegionServers = new ArrayDeque<ServerName>(NUM_SLAVES_BASE);
+ for (int i=0; i<NUM_SLAVES_BASE; ++i) {
+ availableRegionServers.push(util.getHBaseCluster().getRegionServer(i).getServerName());
+ }
+ List<HRegionInfo> tableRegions =
+ admin.getTableRegions(PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME);
+ for (HRegionInfo hRegionInfo : tableRegions) {
+ // filter on regions we are interested in
+ if (regionContainsMetadataRows(hRegionInfo, splitPoints)) {
+ ServerName serverName = am.getRegionStates().getRegionServerOfRegion(hRegionInfo);
+ if (!serverToRegionsList.containsKey(serverName)) {
+ serverToRegionsList.put(serverName, new ArrayList<HRegionInfo>());
+ }
+ serverToRegionsList.get(serverName).add(hRegionInfo);
+ availableRegionServers.remove(serverName);
+ // Scan scan = new Scan();
+ // scan.setStartRow(hRegionInfo.getStartKey());
+ // scan.setStopRow(hRegionInfo.getEndKey());
+ // HTable primaryTable = new HTable(getUtility().getConfiguration(),
+ // PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME);
+ // ResultScanner resultScanner = primaryTable.getScanner(scan);
+ // for (Result result : resultScanner) {
+ // System.out.println(result);
+ // }
+ }
+ }
+ assertTrue("No region servers available to move regions on to ", !availableRegionServers.isEmpty());
+ for (Entry<ServerName, List<HRegionInfo>> entry : serverToRegionsList.entrySet()) {
+ List<HRegionInfo> regions = entry.getValue();
+ if (regions.size()>1) {
+ for (int i=1; i< regions.size(); ++i) {
+ moveRegion(regions.get(i), entry.getKey(), availableRegionServers.pop());
+ }
+ }
+ }
+
+ // verify each region of SYSTEM.CATALOG is on its own region server
+ tableRegions =
+ admin.getTableRegions(PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME);
+ Set<ServerName> serverNames = Sets.newHashSet();
+ for (HRegionInfo hRegionInfo : tableRegions) {
+ // filter on regions we are interested in
+ if (regionContainsMetadataRows(hRegionInfo, splitPoints)) {
+ ServerName serverName = am.getRegionStates().getRegionServerOfRegion(hRegionInfo);
+ if (!serverNames.contains(serverName)) {
+ serverNames.add(serverName);
+ }
+ else {
+ fail("Multiple regions on "+serverName.getServerName());
+ }
+ }
+ }
+ }
+
+ private static int getRegionServerIndex(MiniHBaseCluster cluster, ServerName serverName) {
+ // we have a small number of region servers, this should be fine for now.
+ List<RegionServerThread> servers = cluster.getRegionServerThreads();
+ for (int i = 0; i < servers.size(); i++) {
+ if (servers.get(i).getRegionServer().getServerName().equals(serverName)) {
+ return i;
+ }
+ }
+ return -1;
+ }
+
+ /**
+ * Ensures each region of SYSTEM.CATALOG is on a different region server
+ */
+ private static void moveRegion(HRegionInfo regionInfo, ServerName srcServerName, ServerName dstServerName) throws Exception {
+ HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin();
+ HBaseTestingUtility util = getUtility();
+ MiniHBaseCluster cluster = util.getHBaseCluster();
+ HMaster master = cluster.getMaster();
+ AssignmentManager am = master.getAssignmentManager();
+
+ HRegionServer dstServer = cluster.getRegionServer(getRegionServerIndex(cluster, dstServerName));
+ HRegionServer srcServer = cluster.getRegionServer(getRegionServerIndex(cluster, srcServerName));
+ byte[] encodedRegionNameInBytes = regionInfo.getEncodedNameAsBytes();
+ admin.move(encodedRegionNameInBytes, Bytes.toBytes(dstServer.getServerName().getServerName()));
+ while (dstServer.getOnlineRegion(regionInfo.getRegionName()) == null
+ || dstServer.getRegionsInTransitionInRS().containsKey(encodedRegionNameInBytes)
+ || srcServer.getRegionsInTransitionInRS().containsKey(encodedRegionNameInBytes)
+ || master.getAssignmentManager().getRegionStates().isRegionsInTransition()) {
+ // wait for the move to be finished
+ Thread.sleep(100);
+ }
+ }
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/93fdd5ba/phoenix-core/src/test/java/org/apache/phoenix/util/MetaDataUtilTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/MetaDataUtilTest.java b/phoenix-core/src/test/java/org/apache/phoenix/util/MetaDataUtilTest.java
index 0443b77..485a21f 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/util/MetaDataUtilTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/util/MetaDataUtilTest.java
@@ -22,6 +22,7 @@ import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.util.Bytes;
@@ -62,7 +63,26 @@ public class MetaDataUtilTest {
assertFalse(MetaDataUtil.areClientAndServerCompatible(VersionUtil.encodeVersion(3,1,10), 3, 5));
}
- /**
+ @Test
+ public void testMutatingAPut() throws Exception {
+ String version = VersionInfo.getVersion();
+ KeyValueBuilder builder = KeyValueBuilder.get(version);
+ byte[] row = Bytes.toBytes("row");
+ byte[] family = PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES;
+ byte[] qualifier = Bytes.toBytes("qual");
+ byte[] value = Bytes.toBytes("generic-value");
+ KeyValue kv = builder.buildPut(wrap(row), wrap(family), wrap(qualifier), wrap(value));
+ Put put = new Put(row);
+ KeyValueBuilder.addQuietly(put, builder, kv);
+ byte[] newValue = Bytes.toBytes("new-value");
+ Cell cell = put.get(family, qualifier).get(0);
+ assertEquals(Bytes.toString(value), Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()));
+ MetaDataUtil.mutatePutValue(put, family, qualifier, newValue);
+ cell = put.get(family, qualifier).get(0);
+ assertEquals(Bytes.toString(newValue), Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()));
+ }
+
+ /**
* Ensure it supports {@link GenericKeyValueBuilder}
* @throws Exception on failure
*/
http://git-wip-us.apache.org/repos/asf/phoenix/blob/93fdd5ba/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java b/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
index 6920772..1683a13 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
@@ -132,7 +132,7 @@ public class TestUtil {
private static final Log LOG = LogFactory.getLog(TestUtil.class);
private static final Long ZERO = new Long(0);
- public static final String DEFAULT_SCHEMA_NAME = "";
+ public static final String DEFAULT_SCHEMA_NAME = "S";
public static final String DEFAULT_DATA_TABLE_NAME = "T";
public static final String DEFAULT_INDEX_TABLE_NAME = "I";
public static final String DEFAULT_DATA_TABLE_FULL_NAME = SchemaUtil.getTableName(DEFAULT_SCHEMA_NAME, "T");
@@ -722,6 +722,22 @@ public class TestUtil {
public String getExpressionStr() {
return null;
}
+
+ @Override
+ public long getTimestamp() {
+ return HConstants.LATEST_TIMESTAMP;
+ }
+
+ @Override
+ public boolean isDerived() {
+ return false;
+ }
+
+ @Override
+ public boolean isExcluded() {
+ return false;
+ }
+
@Override
public boolean isRowTimestamp() {
return false;
http://git-wip-us.apache.org/repos/asf/phoenix/blob/93fdd5ba/phoenix-protocol/src/main/MetaDataService.proto
----------------------------------------------------------------------
diff --git a/phoenix-protocol/src/main/MetaDataService.proto b/phoenix-protocol/src/main/MetaDataService.proto
index 369522c..13d8f1a 100644
--- a/phoenix-protocol/src/main/MetaDataService.proto
+++ b/phoenix-protocol/src/main/MetaDataService.proto
@@ -50,6 +50,8 @@ enum MutationCode {
AUTO_PARTITION_SEQUENCE_NOT_FOUND = 20;
CANNOT_COERCE_AUTO_PARTITION_ID = 21;
TOO_MANY_INDEXES = 22;
+ UNABLE_TO_CREATE_CHILD_LINK = 23;
+ UNABLE_TO_UPDATE_PARENT_TABLE = 24;
};
message SharedTableState {
@@ -83,6 +85,9 @@ message GetTableRequest {
required int64 tableTimestamp = 4;
required int64 clientTimestamp = 5;
optional int32 clientVersion = 6;
+ optional bool skipAddingParentColumns = 7;
+ optional bool skipAddingIndexes = 8;
+ optional PTable lockedAncestorTable = 9;
}
message GetFunctionsRequest {
@@ -125,6 +130,7 @@ message DropTableRequest {
required string tableType = 2;
optional bool cascade = 3;
optional int32 clientVersion = 4;
+ optional bool skipAddingParentColumns = 5;
}
message DropSchemaRequest {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/93fdd5ba/phoenix-protocol/src/main/PTable.proto
----------------------------------------------------------------------
diff --git a/phoenix-protocol/src/main/PTable.proto b/phoenix-protocol/src/main/PTable.proto
index 16381dd..bc868bc 100644
--- a/phoenix-protocol/src/main/PTable.proto
+++ b/phoenix-protocol/src/main/PTable.proto
@@ -35,12 +35,12 @@ enum PTableType {
message PColumn {
required bytes columnNameBytes = 1;
optional bytes familyNameBytes = 2;
- required string dataType = 3;
+ optional string dataType = 3;
optional int32 maxLength = 4;
optional int32 scale = 5;
required bool nullable = 6;
required int32 position = 7;
- required int32 sortOrder = 8;
+ optional int32 sortOrder = 8;
optional int32 arraySize = 9;
optional bytes viewConstant = 10;
optional bool viewReferenced = 11;
@@ -48,6 +48,8 @@ message PColumn {
optional bool isRowTimestamp = 13;
optional bool isDynamic = 14;
optional bytes columnQualifierBytes = 15;
+ optional int64 timestamp = 16;
+ optional bool derived = 17 [default = false];
}
message PTableStats {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/93fdd5ba/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 7e74e63..4412c15 100644
--- a/pom.xml
+++ b/pom.xml
@@ -320,6 +320,24 @@
<goal>verify</goal>
</goals>
</execution>
+<execution>
+ <id>SplitSystemCatalogTests</id>
+ <configuration>
+ <encoding>UTF-8</encoding>
+ <forkCount>${numForkedIT}</forkCount>
+ <runOrder>alphabetical</runOrder>
+ <reuseForks>false</reuseForks>
+ <argLine>-enableassertions -Xmx2000m -XX:MaxPermSize=256m -Djava.security.egd=file:/dev/./urandom "-Djava.library.path=${hadoop.library.path}${path.separator}${java.library.path}" -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=./target/</argLine>
+ <redirectTestOutputToFile>${test.output.tofile}</redirectTestOutputToFile>
+ <testSourceDirectory>${basedir}/src/it/java</testSourceDirectory>
+ <groups>org.apache.phoenix.end2end.SplitSystemCatalogTests</groups>
+ <shutdown>kill</shutdown>
+ </configuration>
+ <goals>
+ <goal>integration-test</goal>
+ <goal>verify</goal>
+ </goals>
+ </execution>
</executions>
</plugin>
<plugin>