You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by ja...@apache.org on 2016/09/21 01:41:33 UTC
[1/4] phoenix git commit: PHOENIX-3290 Move and/or combine as many
NeedsOwnCluster tests to bring down test run time
Repository: phoenix
Updated Branches:
refs/heads/master 7601d5942 -> 2d27179b3
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
index 35e1c62..425e84c 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
@@ -58,16 +58,17 @@ public class ViewIT extends BaseViewIT {
super(transactional);
}
- @Test
- public void testReadOnlyView() throws Exception {
+ @Test
+ public void testReadOnlyOnReadOnlyView() throws Exception {
Connection earlierCon = DriverManager.getConnection(getUrl());
Connection conn = DriverManager.getConnection(getUrl());
- String ddl = "CREATE TABLE " + fullTableName + " (k INTEGER NOT NULL PRIMARY KEY, v1 DATE) "+ tableDDLOptions;
+ String ddl = "CREATE TABLE " + fullTableName + " (k INTEGER NOT NULL PRIMARY KEY, v1 DATE) "+ tableDDLOptions;
conn.createStatement().execute(ddl);
- ddl = "CREATE VIEW v (v2 VARCHAR) AS SELECT * FROM " + tableName + " WHERE k > 5";
+ String fullParentViewName = "V_" + generateRandomString();
+ ddl = "CREATE VIEW " + fullParentViewName + " (v2 VARCHAR) AS SELECT * FROM " + tableName + " WHERE k > 5";
conn.createStatement().execute(ddl);
try {
- conn.createStatement().execute("UPSERT INTO v VALUES(1)");
+ conn.createStatement().execute("UPSERT INTO " + fullParentViewName + " VALUES(1)");
fail();
} catch (ReadOnlyTableException e) {
@@ -77,42 +78,37 @@ public class ViewIT extends BaseViewIT {
}
conn.commit();
- analyzeTable(conn, "v", transactional);
+ analyzeTable(conn, fullParentViewName, transactional);
- List<KeyRange> splits = getAllSplits(conn, "v");
+ List<KeyRange> splits = getAllSplits(conn, fullParentViewName);
assertEquals(4, splits.size());
int count = 0;
- ResultSet rs = conn.createStatement().executeQuery("SELECT k FROM " + tableName);
+ ResultSet rs = conn.createStatement().executeQuery("SELECT k FROM " + fullTableName);
while (rs.next()) {
assertEquals(count++, rs.getInt(1));
}
assertEquals(10, count);
count = 0;
- rs = conn.createStatement().executeQuery("SELECT k FROM v");
+ rs = conn.createStatement().executeQuery("SELECT k FROM " + fullParentViewName);
while (rs.next()) {
count++;
assertEquals(count + 5, rs.getInt(1));
}
assertEquals(4, count);
count = 0;
- rs = earlierCon.createStatement().executeQuery("SELECT k FROM v");
+ rs = earlierCon.createStatement().executeQuery("SELECT k FROM " + fullParentViewName);
while (rs.next()) {
count++;
assertEquals(count + 5, rs.getInt(1));
}
assertEquals(4, count);
- }
-
- @Test
- public void testReadOnlyOnReadOnlyView() throws Exception {
- testReadOnlyView();
- Connection conn = DriverManager.getConnection(getUrl());
- String ddl = "CREATE VIEW v2 AS SELECT * FROM v WHERE k < 9";
+ String fullViewName = "V_" + generateRandomString();
+ ddl = "CREATE VIEW " + fullViewName + " AS SELECT * FROM " + fullParentViewName + " WHERE k < 9";
conn.createStatement().execute(ddl);
try {
- conn.createStatement().execute("UPSERT INTO v2 VALUES(1)");
+ conn.createStatement().execute("UPSERT INTO " + fullViewName + " VALUES(1)");
fail();
} catch (ReadOnlyTableException e) {
@@ -121,8 +117,8 @@ public class ViewIT extends BaseViewIT {
}
conn = DriverManager.getConnection(getUrl());
- int count = 0;
- ResultSet rs = conn.createStatement().executeQuery("SELECT k FROM v2");
+ count = 0;
+ rs = conn.createStatement().executeQuery("SELECT k FROM " + fullViewName);
while (rs.next()) {
count++;
assertEquals(count + 5, rs.getInt(1));
@@ -142,20 +138,21 @@ public class ViewIT extends BaseViewIT {
@Test
public void testUpdatableOnUpdatableView() throws Exception {
- testUpdatableView(null);
+ String viewName = testUpdatableView(null);
Connection conn = DriverManager.getConnection(getUrl());
- String ddl = "CREATE VIEW v2 AS SELECT * FROM v WHERE k3 = 2";
+ String fullViewName = "V_" + generateRandomString();
+ String ddl = "CREATE VIEW " + fullViewName + " AS SELECT * FROM " + viewName + " WHERE k3 = 2";
conn.createStatement().execute(ddl);
- ResultSet rs = conn.createStatement().executeQuery("SELECT k1, k2, k3 FROM v2");
+ ResultSet rs = conn.createStatement().executeQuery("SELECT k1, k2, k3 FROM " + fullViewName);
assertTrue(rs.next());
assertEquals(1, rs.getInt(1));
assertEquals(109, rs.getInt(2));
assertEquals(2, rs.getInt(3));
assertFalse(rs.next());
- conn.createStatement().execute("UPSERT INTO v2(k2) VALUES(122)");
+ conn.createStatement().execute("UPSERT INTO " + fullViewName + "(k2) VALUES(122)");
conn.commit();
- rs = conn.createStatement().executeQuery("SELECT k1, k2, k3 FROM v2 WHERE k2 >= 120");
+ rs = conn.createStatement().executeQuery("SELECT k1, k2, k3 FROM " + fullViewName + " WHERE k2 >= 120");
assertTrue(rs.next());
assertEquals(1, rs.getInt(1));
assertEquals(122, rs.getInt(2));
@@ -163,14 +160,14 @@ public class ViewIT extends BaseViewIT {
assertFalse(rs.next());
try {
- conn.createStatement().execute("UPSERT INTO v2(k2,k3) VALUES(123,3)");
+ conn.createStatement().execute("UPSERT INTO " + fullViewName + "(k2,k3) VALUES(123,3)");
fail();
} catch (SQLException e) {
assertEquals(SQLExceptionCode.CANNOT_UPDATE_VIEW_COLUMN.getErrorCode(), e.getErrorCode());
}
try {
- conn.createStatement().execute("UPSERT INTO v2(k2,k3) select k2, 3 from v2");
+ conn.createStatement().execute("UPSERT INTO " + fullViewName + "(k2,k3) select k2, 3 from " + fullViewName);
fail();
} catch (SQLException e) {
assertEquals(SQLExceptionCode.CANNOT_UPDATE_VIEW_COLUMN.getErrorCode(), e.getErrorCode());
@@ -179,11 +176,12 @@ public class ViewIT extends BaseViewIT {
@Test
public void testReadOnlyOnUpdatableView() throws Exception {
- testUpdatableView(null);
+ String viewName = testUpdatableView(null);
Connection conn = DriverManager.getConnection(getUrl());
- String ddl = "CREATE VIEW v2 AS SELECT * FROM v WHERE k3 > 1 and k3 < 50";
+ String fullViewName = "V_" + generateRandomString();
+ String ddl = "CREATE VIEW " + fullViewName + " AS SELECT * FROM " + viewName + " WHERE k3 > 1 and k3 < 50";
conn.createStatement().execute(ddl);
- ResultSet rs = conn.createStatement().executeQuery("SELECT k1, k2, k3 FROM v2");
+ ResultSet rs = conn.createStatement().executeQuery("SELECT k1, k2, k3 FROM " + fullViewName);
assertTrue(rs.next());
assertEquals(1, rs.getInt(1));
assertEquals(109, rs.getInt(2));
@@ -191,7 +189,7 @@ public class ViewIT extends BaseViewIT {
assertFalse(rs.next());
try {
- conn.createStatement().execute("UPSERT INTO v2 VALUES(1)");
+ conn.createStatement().execute("UPSERT INTO " + fullViewName + " VALUES(1)");
fail();
} catch (ReadOnlyTableException e) {
@@ -199,7 +197,7 @@ public class ViewIT extends BaseViewIT {
conn.createStatement().execute("UPSERT INTO " + fullTableName + "(k1, k2,k3) VALUES(1, 122, 5)");
conn.commit();
- rs = conn.createStatement().executeQuery("SELECT k1, k2, k3 FROM v2 WHERE k2 >= 120");
+ rs = conn.createStatement().executeQuery("SELECT k1, k2, k3 FROM " + fullViewName + " WHERE k2 >= 120");
assertTrue(rs.next());
assertEquals(1, rs.getInt(1));
assertEquals(122, rs.getInt(2));
@@ -212,32 +210,34 @@ public class ViewIT extends BaseViewIT {
Connection conn = DriverManager.getConnection(getUrl());
String ddl = "CREATE TABLE " + fullTableName + " (k1 INTEGER NOT NULL, k2 INTEGER NOT NULL, v1 DECIMAL, CONSTRAINT pk PRIMARY KEY (k1, k2))" + tableDDLOptions;
conn.createStatement().execute(ddl);
- ddl = "CREATE VIEW v1(v2 VARCHAR, v3 VARCHAR) AS SELECT * FROM " + fullTableName + " WHERE v1 = 1.0";
+ String fullViewName1 = "V_" + generateRandomString();
+ ddl = "CREATE VIEW " + fullViewName1 + "(v2 VARCHAR, v3 VARCHAR) AS SELECT * FROM " + fullTableName + " WHERE v1 = 1.0";
conn.createStatement().execute(ddl);
try {
- conn.createStatement().execute("ALTER VIEW v1 DROP COLUMN v1");
+ conn.createStatement().execute("ALTER VIEW " + fullViewName1 + " DROP COLUMN v1");
fail();
} catch (SQLException e) {
assertEquals(SQLExceptionCode.CANNOT_MUTATE_TABLE.getErrorCode(), e.getErrorCode());
}
- ddl = "CREATE VIEW v2 AS SELECT * FROM v1 WHERE v2 != 'foo'";
+ String fullViewName2 = "V_" + generateRandomString();
+ ddl = "CREATE VIEW " + fullViewName2 + " AS SELECT * FROM " + fullViewName1 + " WHERE v2 != 'foo'";
conn.createStatement().execute(ddl);
try {
- conn.createStatement().execute("ALTER VIEW v2 DROP COLUMN v1");
+ conn.createStatement().execute("ALTER VIEW " + fullViewName2 + " DROP COLUMN v1");
fail();
} catch (SQLException e) {
assertEquals(SQLExceptionCode.CANNOT_MUTATE_TABLE.getErrorCode(), e.getErrorCode());
}
try {
- conn.createStatement().execute("ALTER VIEW v2 DROP COLUMN v2");
+ conn.createStatement().execute("ALTER VIEW " + fullViewName2 + " DROP COLUMN v2");
fail();
} catch (SQLException e) {
assertEquals(SQLExceptionCode.CANNOT_MUTATE_TABLE.getErrorCode(), e.getErrorCode());
}
- conn.createStatement().execute("ALTER VIEW v2 DROP COLUMN v3");
+ conn.createStatement().execute("ALTER VIEW " + fullViewName2 + " DROP COLUMN v3");
}
@@ -245,13 +245,14 @@ public class ViewIT extends BaseViewIT {
public void testReadOnlyViewWithCaseSensitiveTableNames() throws Exception {
Connection earlierCon = DriverManager.getConnection(getUrl());
Connection conn = DriverManager.getConnection(getUrl());
- String caseSensitiveTableName = "\"case_SENSITIVE_table" + tableSuffix + "\"" ;
+ String caseSensitiveTableName = "\"t_" + generateRandomString() + "\"" ;
String ddl = "CREATE TABLE " + caseSensitiveTableName + " (k INTEGER NOT NULL PRIMARY KEY, v1 DATE)" + tableDDLOptions;
conn.createStatement().execute(ddl);
- ddl = "CREATE VIEW \"v\" (v2 VARCHAR) AS SELECT * FROM " + caseSensitiveTableName + " WHERE k > 5";
+ String caseSensitiveViewName = "\"v_" + generateRandomString() + "\"" ;
+ ddl = "CREATE VIEW " + caseSensitiveViewName + " (v2 VARCHAR) AS SELECT * FROM " + caseSensitiveTableName + " WHERE k > 5";
conn.createStatement().execute(ddl);
try {
- conn.createStatement().execute("UPSERT INTO \"v\" VALUES(1)");
+ conn.createStatement().execute("UPSERT INTO " + caseSensitiveViewName + " VALUES(1)");
fail();
} catch (ReadOnlyTableException e) {
@@ -262,14 +263,14 @@ public class ViewIT extends BaseViewIT {
conn.commit();
int count = 0;
- ResultSet rs = conn.createStatement().executeQuery("SELECT k FROM \"v\"");
+ ResultSet rs = conn.createStatement().executeQuery("SELECT k FROM " + caseSensitiveViewName);
while (rs.next()) {
count++;
assertEquals(count + 5, rs.getInt(1));
}
assertEquals(4, count);
count = 0;
- rs = earlierCon.createStatement().executeQuery("SELECT k FROM \"v\"");
+ rs = earlierCon.createStatement().executeQuery("SELECT k FROM " + caseSensitiveViewName);
while (rs.next()) {
count++;
assertEquals(count + 5, rs.getInt(1));
@@ -282,10 +283,11 @@ public class ViewIT extends BaseViewIT {
Connection conn = DriverManager.getConnection(getUrl());
String ddl = "CREATE TABLE " + fullTableName + " (\"k\" INTEGER NOT NULL PRIMARY KEY, \"v1\" INTEGER, \"a\".v2 VARCHAR)" + tableDDLOptions;
conn.createStatement().execute(ddl);
- ddl = "CREATE VIEW v (v VARCHAR) AS SELECT * FROM " + fullTableName + " WHERE \"k\" > 5 and \"v1\" > 1";
+ String viewName = "V_" + generateRandomString();
+ ddl = "CREATE VIEW " + viewName + " (v VARCHAR) AS SELECT * FROM " + fullTableName + " WHERE \"k\" > 5 and \"v1\" > 1";
conn.createStatement().execute(ddl);
try {
- conn.createStatement().execute("UPSERT INTO v VALUES(1)");
+ conn.createStatement().execute("UPSERT INTO " + viewName + " VALUES(1)");
fail();
} catch (ReadOnlyTableException e) {
@@ -296,7 +298,7 @@ public class ViewIT extends BaseViewIT {
conn.commit();
int count = 0;
- ResultSet rs = conn.createStatement().executeQuery("SELECT \"k\", \"v1\",\"a\".v2 FROM v");
+ ResultSet rs = conn.createStatement().executeQuery("SELECT \"k\", \"v1\",\"a\".v2 FROM " + viewName);
while (rs.next()) {
count++;
assertEquals(count + 5, rs.getInt(1));
@@ -309,10 +311,11 @@ public class ViewIT extends BaseViewIT {
Connection conn = DriverManager.getConnection(getUrl());
String ddl = "CREATE TABLE " + fullTableName + " (k INTEGER NOT NULL PRIMARY KEY, v1 INTEGER, v2 DATE)" + tableDDLOptions;
conn.createStatement().execute(ddl);
- ddl = "CREATE VIEW v (v VARCHAR) AS SELECT * FROM " + fullTableName + " WHERE v2 > CURRENT_DATE()-5 AND v2 > DATE '2010-01-01'";
+ String viewName = "V_" + generateRandomString();
+ ddl = "CREATE VIEW " + viewName + " (v VARCHAR) AS SELECT * FROM " + fullTableName + " WHERE v2 > CURRENT_DATE()-5 AND v2 > DATE '2010-01-01'";
conn.createStatement().execute(ddl);
try {
- conn.createStatement().execute("UPSERT INTO v VALUES(1)");
+ conn.createStatement().execute("UPSERT INTO " + viewName + " VALUES(1)");
fail();
} catch (ReadOnlyTableException e) {
@@ -323,7 +326,7 @@ public class ViewIT extends BaseViewIT {
conn.commit();
int count = 0;
- ResultSet rs = conn.createStatement().executeQuery("SELECT k FROM v");
+ ResultSet rs = conn.createStatement().executeQuery("SELECT k FROM " + viewName);
while (rs.next()) {
assertEquals(count, rs.getInt(1));
count++;
@@ -347,40 +350,45 @@ public class ViewIT extends BaseViewIT {
Properties props = new Properties();
props.setProperty(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, Boolean.toString(isNamespaceMapped));
Connection conn = DriverManager.getConnection(getUrl(),props);
- String fullTableName = "s1.t" + tableSuffix + (isNamespaceMapped ? "_N" : "");
+ String schemaName1 = "S_" + generateRandomString();
+ String fullTableName1 = SchemaUtil.getTableName(schemaName1, tableName);
if (isNamespaceMapped) {
- conn.createStatement().execute("CREATE SCHEMA IF NOT EXISTS s1");
+ conn.createStatement().execute("CREATE SCHEMA IF NOT EXISTS " + schemaName1);
}
- String ddl = "CREATE TABLE " + fullTableName + " (k INTEGER NOT NULL PRIMARY KEY, v1 DATE)" + tableDDLOptions;
+ String ddl = "CREATE TABLE " + fullTableName1 + " (k INTEGER NOT NULL PRIMARY KEY, v1 DATE)" + tableDDLOptions;
HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();
conn.createStatement().execute(ddl);
- assertTrue(admin.tableExists(SchemaUtil.getPhysicalTableName(SchemaUtil.normalizeIdentifier(fullTableName),
+ assertTrue(admin.tableExists(SchemaUtil.getPhysicalTableName(SchemaUtil.normalizeIdentifier(fullTableName1),
conn.unwrap(PhoenixConnection.class).getQueryServices().getProps())));
- ddl = "CREATE VIEW s2.v1 (v2 VARCHAR) AS SELECT * FROM " + fullTableName + " WHERE k > 5";
+ String viewName = "V_" + generateRandomString();
+ String viewSchemaName = "S_" + generateRandomString();
+ String fullViewName1 = SchemaUtil.getTableName(viewSchemaName, viewName);
+ ddl = "CREATE VIEW " + fullViewName1 + " (v2 VARCHAR) AS SELECT * FROM " + fullTableName1 + " WHERE k > 5";
conn.createStatement().execute(ddl);
- ddl = "CREATE VIEW v2 (v2 VARCHAR) AS SELECT * FROM " + fullTableName + " WHERE k > 5";
+ String fullViewName2 = "V_" + generateRandomString();
+ ddl = "CREATE VIEW " + fullViewName2 + " (v2 VARCHAR) AS SELECT * FROM " + fullTableName1 + " WHERE k > 5";
conn.createStatement().execute(ddl);
- conn.createStatement().executeQuery("SELECT * FROM s2.v1");
- conn.createStatement().executeQuery("SELECT * FROM v2");
- ddl = "DROP VIEW v1";
+ conn.createStatement().executeQuery("SELECT * FROM " + fullViewName1);
+ conn.createStatement().executeQuery("SELECT * FROM " + fullViewName2);
+ ddl = "DROP VIEW " + viewName;
try {
conn.createStatement().execute(ddl);
fail();
} catch (TableNotFoundException ignore) {
}
- ddl = "DROP VIEW s2.v1";
+ ddl = "DROP VIEW " + fullViewName1;
conn.createStatement().execute(ddl);
- ddl = "DROP VIEW s2.v2";
+ ddl = "DROP VIEW " + SchemaUtil.getTableName(viewSchemaName, generateRandomString());
try {
conn.createStatement().execute(ddl);
fail();
} catch (TableNotFoundException ignore) {
}
- ddl = "DROP TABLE " + fullTableName;
- validateCannotDropTableWithChildViewsWithoutCascade(conn, fullTableName);
- ddl = "DROP VIEW v2";
+ ddl = "DROP TABLE " + fullTableName1;
+ validateCannotDropTableWithChildViewsWithoutCascade(conn, fullTableName1);
+ ddl = "DROP VIEW " + fullViewName2;
conn.createStatement().execute(ddl);
- ddl = "DROP TABLE " + fullTableName;
+ ddl = "DROP TABLE " + fullTableName1;
conn.createStatement().execute(ddl);
}
@@ -390,7 +398,8 @@ public class ViewIT extends BaseViewIT {
Connection conn = DriverManager.getConnection(getUrl());
String ddl = "CREATE TABLE " + fullTableName + " (k1 INTEGER NOT NULL, k2 INTEGER NOT NULL, v1 DECIMAL, CONSTRAINT pk PRIMARY KEY (k1, k2))" + tableDDLOptions;
conn.createStatement().execute(ddl);
- ddl = "CREATE VIEW v1(v2 VARCHAR, v3 VARCHAR) AS SELECT * FROM " + fullTableName + " WHERE v1 = 1.0";
+ String viewName = "V_" + generateRandomString();
+ ddl = "CREATE VIEW " + viewName + "(v2 VARCHAR, v3 VARCHAR) AS SELECT * FROM " + fullTableName + " WHERE v1 = 1.0";
conn.createStatement().execute(ddl);
try {
@@ -404,15 +413,18 @@ public class ViewIT extends BaseViewIT {
@Test
public void testViewAndTableAndDropCascade() throws Exception {
// Setup
- String fullTableName = "s2.t"+tableSuffix;
Connection conn = DriverManager.getConnection(getUrl());
String ddl = "CREATE TABLE " + fullTableName + " (k INTEGER NOT NULL PRIMARY KEY, v1 DATE)" + tableDDLOptions;
conn.createStatement().execute(ddl);
- ddl = "CREATE VIEW s2.v1 (v2 VARCHAR) AS SELECT * FROM " + fullTableName + " WHERE k > 5";
+ String viewName = "V_" + generateRandomString();
+ String viewSchemaName = "S_" + generateRandomString();
+ String fullViewName1 = SchemaUtil.getTableName(viewSchemaName, viewName);
+ ddl = "CREATE VIEW " + fullViewName1 + " (v2 VARCHAR) AS SELECT * FROM " + fullTableName + " WHERE k > 5";
conn.createStatement().execute(ddl);
- ddl = "CREATE LOCAL INDEX idx on s2.v1(v2)";
+ ddl = "CREATE LOCAL INDEX idx on " + fullViewName1 + "(v2)";
conn.createStatement().execute(ddl);
- ddl = "CREATE VIEW s2.v2 (v2 VARCHAR) AS SELECT * FROM " + fullTableName + " WHERE k > 10";
+ String fullViewName2 = SchemaUtil.getTableName(viewSchemaName, "V_" + generateRandomString());
+ ddl = "CREATE VIEW " + fullViewName2 + "(v2 VARCHAR) AS SELECT * FROM " + fullTableName + " WHERE k > 10";
conn.createStatement().execute(ddl);
validateCannotDropTableWithChildViewsWithoutCascade(conn, fullTableName);
@@ -420,8 +432,8 @@ public class ViewIT extends BaseViewIT {
// Execute DROP...CASCADE
conn.createStatement().execute("DROP TABLE " + fullTableName + " CASCADE");
- validateViewDoesNotExist(conn, "s2.v1");
- validateViewDoesNotExist(conn, "s2.v2");
+ validateViewDoesNotExist(conn, fullViewName1);
+ validateViewDoesNotExist(conn, fullViewName2);
}
@Test
@@ -429,21 +441,26 @@ public class ViewIT extends BaseViewIT {
// Setup - Tables and Views with Indexes
Connection conn = DriverManager.getConnection(getUrl());
- String fullTableName = "s3.t"+tableSuffix;
if (tableDDLOptions.length()!=0)
tableDDLOptions+=",";
tableDDLOptions+="IMMUTABLE_ROWS=true";
String ddl = "CREATE TABLE " + fullTableName + " (k INTEGER NOT NULL PRIMARY KEY, v1 DATE)" + tableDDLOptions;
conn.createStatement().execute(ddl);
- ddl = "CREATE INDEX IDX1 ON " + fullTableName + " (v1)";
+ String viewSchemaName = "S_" + generateRandomString();
+ String fullViewName1 = SchemaUtil.getTableName(viewSchemaName, "V_" + generateRandomString());
+ String fullViewName2 = SchemaUtil.getTableName(viewSchemaName, "V_" + generateRandomString());
+ String indexName1 = "I_" + generateRandomString();
+ String indexName2 = "I_" + generateRandomString();
+ String indexName3 = "I_" + generateRandomString();
+ ddl = "CREATE INDEX " + indexName1 + " ON " + fullTableName + " (v1)";
conn.createStatement().execute(ddl);
- ddl = "CREATE VIEW s3.v1 (v2 VARCHAR) AS SELECT * FROM " + fullTableName + " WHERE k > 5";
+ ddl = "CREATE VIEW " + fullViewName1 + " (v2 VARCHAR) AS SELECT * FROM " + fullTableName + " WHERE k > 5";
conn.createStatement().execute(ddl);
- ddl = "CREATE INDEX IDX2 ON s3.v1 (v2)";
+ ddl = "CREATE INDEX " + indexName2 + " ON " + fullViewName1 + " (v2)";
conn.createStatement().execute(ddl);
- ddl = "CREATE VIEW s3.v2 (v2 VARCHAR) AS SELECT * FROM " + fullTableName + " WHERE k > 10";
+ ddl = "CREATE VIEW " + fullViewName2 + " (v2 VARCHAR) AS SELECT * FROM " + fullTableName + " WHERE k > 10";
conn.createStatement().execute(ddl);
- ddl = "CREATE INDEX IDX3 ON s3.v2 (v2)";
+ ddl = "CREATE INDEX " + indexName3 + " ON " + fullViewName2 + " (v2)";
conn.createStatement().execute(ddl);
validateCannotDropTableWithChildViewsWithoutCascade(conn, fullTableName);
@@ -452,8 +469,8 @@ public class ViewIT extends BaseViewIT {
conn.createStatement().execute("DROP TABLE " + fullTableName + " CASCADE");
// Validate Views were deleted - Try and delete child views, should throw TableNotFoundException
- validateViewDoesNotExist(conn, "s3.v1");
- validateViewDoesNotExist(conn, "s3.v2");
+ validateViewDoesNotExist(conn, fullViewName1);
+ validateViewDoesNotExist(conn, fullViewName2);
}
@@ -469,11 +486,11 @@ public class ViewIT extends BaseViewIT {
}
- private void validateViewDoesNotExist(Connection conn, String viewName) throws SQLException {
+ private void validateViewDoesNotExist(Connection conn, String fullViewName) throws SQLException {
try {
- String ddl1 = "DROP VIEW " + viewName;
+ String ddl1 = "DROP VIEW " + fullViewName;
conn.createStatement().execute(ddl1);
- fail("View s3.v1 should have been deleted when parent was dropped");
+ fail("View " + fullViewName + " should have been deleted when parent was dropped");
} catch (TableNotFoundException e) {
//Expected
}
@@ -498,10 +515,14 @@ public class ViewIT extends BaseViewIT {
Connection conn = DriverManager.getConnection(getUrl());
String ddl = "CREATE TABLE " + fullTableName + " (k1 INTEGER NOT NULL, k2 INTEGER NOT NULL, k3 DECIMAL, s1 VARCHAR, s2 VARCHAR CONSTRAINT pk PRIMARY KEY (k1, k2, k3))" + tableDDLOptions;
conn.createStatement().execute(ddl);
- conn.createStatement().execute("CREATE " + (localIndex ? "LOCAL " : "") + " INDEX i1 ON " + fullTableName + "(k3, k2) INCLUDE(s1, s2)");
- conn.createStatement().execute("CREATE INDEX i2 ON " + fullTableName + "(k3, k2, s2)");
+ String indexName1 = "I_" + generateRandomString();
+ String fullIndexName1 = SchemaUtil.getTableName(schemaName, indexName1);
+ conn.createStatement().execute("CREATE " + (localIndex ? "LOCAL " : "") + " INDEX " + indexName1 + " ON " + fullTableName + "(k3, k2) INCLUDE(s1, s2)");
+ String indexName2 = "I_" + generateRandomString();
+ conn.createStatement().execute("CREATE INDEX " + indexName2 + " ON " + fullTableName + "(k3, k2, s2)");
- ddl = "CREATE VIEW v AS SELECT * FROM " + fullTableName + " WHERE s1 = 'foo'";
+ String fullViewName = "V_" + generateRandomString();
+ ddl = "CREATE VIEW " + fullViewName + " AS SELECT * FROM " + fullTableName + " WHERE s1 = 'foo'";
conn.createStatement().execute(ddl);
String[] s1Values = {"foo","bar"};
for (int i = 0; i < 10; i++) {
@@ -509,14 +530,15 @@ public class ViewIT extends BaseViewIT {
}
conn.commit();
- rs = conn.createStatement().executeQuery("SELECT count(*) FROM v");
+ rs = conn.createStatement().executeQuery("SELECT count(*) FROM " + fullViewName);
assertTrue(rs.next());
assertEquals(5, rs.getLong(1));
assertFalse(rs.next());
- conn.createStatement().execute("CREATE INDEX vi1 on v(k2)");
+ String viewIndexName = "I_" + generateRandomString();
+ conn.createStatement().execute("CREATE INDEX " + viewIndexName + " on " + fullViewName + "(k2)");
- String query = "SELECT k2 FROM v WHERE k2 IN (100,109) AND k3 IN (1,2) AND s2='bas'";
+ String query = "SELECT k2 FROM " + fullViewName + " WHERE k2 IN (100,109) AND k3 IN (1,2) AND s2='bas'";
rs = conn.createStatement().executeQuery(query);
assertTrue(rs.next());
assertEquals(100, rs.getInt(1));
@@ -530,7 +552,7 @@ public class ViewIT extends BaseViewIT {
"CLIENT MERGE SORT", queryPlan);
} else {
assertEquals(
- "CLIENT PARALLEL 1-WAY SKIP SCAN ON 4 KEYS OVER I1 [1,100] - [2,109]\n" +
+ "CLIENT PARALLEL 1-WAY SKIP SCAN ON 4 KEYS OVER " + fullIndexName1 + " [1,100] - [2,109]\n" +
" SERVER FILTER BY (\"S2\" = 'bas' AND \"S1\" = 'foo')", queryPlan);
}
}
@@ -540,16 +562,17 @@ public class ViewIT extends BaseViewIT {
Connection conn = DriverManager.getConnection(getUrl());
String ddl = "CREATE TABLE " + fullTableName + " (k1 INTEGER NOT NULL, k2 INTEGER NOT NULL, v1 DECIMAL, CONSTRAINT pk PRIMARY KEY (k1, k2))" + tableDDLOptions;
conn.createStatement().execute(ddl);
- ddl = "CREATE VIEW v1(v2 VARCHAR, k3 VARCHAR PRIMARY KEY) AS SELECT * FROM " + fullTableName + " WHERE K1 = 1";
+ String fullViewName = "V_" + generateRandomString();
+ ddl = "CREATE VIEW " + fullViewName + "(v2 VARCHAR, k3 VARCHAR PRIMARY KEY) AS SELECT * FROM " + fullTableName + " WHERE K1 = 1";
conn.createStatement().execute(ddl);
// assert PK metadata
- ResultSet rs = conn.getMetaData().getPrimaryKeys(null, null, "V1");
+ ResultSet rs = conn.getMetaData().getPrimaryKeys(null, null, fullViewName);
assertPKs(rs, new String[] {"K1", "K2", "K3"});
// sanity check upserts into base table and view
conn.createStatement().executeUpdate("upsert into " + fullTableName + " (k1, k2, v1) values (1, 1, 1)");
- conn.createStatement().executeUpdate("upsert into v1 (k1, k2, k3, v2) values (1, 1, 'abc', 'def')");
+ conn.createStatement().executeUpdate("upsert into " + fullViewName + " (k1, k2, k3, v2) values (1, 1, 'abc', 'def')");
conn.commit();
// expect 2 rows in the base table
@@ -558,7 +581,7 @@ public class ViewIT extends BaseViewIT {
assertEquals(2, rs.getInt(1));
// expect 2 row in the view
- rs = conn.createStatement().executeQuery("select count(*) from v1");
+ rs = conn.createStatement().executeQuery("select count(*) from " + fullViewName);
assertTrue(rs.next());
assertEquals(2, rs.getInt(1));
}
@@ -568,11 +591,12 @@ public class ViewIT extends BaseViewIT {
Connection conn = DriverManager.getConnection(getUrl());
String ddl = "CREATE TABLE " + fullTableName + " (k1 INTEGER NOT NULL, k2 INTEGER NOT NULL, v1 DECIMAL, CONSTRAINT pk PRIMARY KEY (k1, k2))" + tableDDLOptions;
conn.createStatement().execute(ddl);
- ddl = "CREATE VIEW v1(v2 VARCHAR, k3 VARCHAR, k4 INTEGER NOT NULL, CONSTRAINT PKVEW PRIMARY KEY (k3, k4)) AS SELECT * FROM " + fullTableName + " WHERE K1 = 1";
+ String fullViewName = "V_" + generateRandomString();
+ ddl = "CREATE VIEW " + fullViewName + "(v2 VARCHAR, k3 VARCHAR, k4 INTEGER NOT NULL, CONSTRAINT PKVEW PRIMARY KEY (k3, k4)) AS SELECT * FROM " + fullTableName + " WHERE K1 = 1";
conn.createStatement().execute(ddl);
// assert PK metadata
- ResultSet rs = conn.getMetaData().getPrimaryKeys(null, null, "V1");
+ ResultSet rs = conn.getMetaData().getPrimaryKeys(null, null, fullViewName);
assertPKs(rs, new String[] {"K1", "K2", "K3", "K4"});
}
@@ -582,13 +606,14 @@ public class ViewIT extends BaseViewIT {
String fullTableName2 = fullTableName;
String ddl = "CREATE TABLE " + fullTableName2 + " (k1 INTEGER NOT NULL, k2 INTEGER NOT NULL, v1 DECIMAL, CONSTRAINT pk PRIMARY KEY (k1, k2))" + tableDDLOptions;
conn.createStatement().execute(ddl);
- ddl = "CREATE VIEW v1 AS SELECT * FROM " + fullTableName + " WHERE v1 = 1.0";
+ String fullViewName = "V_" + generateRandomString();
+ ddl = "CREATE VIEW " + fullViewName + " AS SELECT * FROM " + fullTableName + " WHERE v1 = 1.0";
conn.createStatement().execute(ddl);
- ddl = "ALTER VIEW V1 ADD k3 VARCHAR PRIMARY KEY, k4 VARCHAR PRIMARY KEY, v2 INTEGER";
+ ddl = "ALTER VIEW " + fullViewName + " ADD k3 VARCHAR PRIMARY KEY, k4 VARCHAR PRIMARY KEY, v2 INTEGER";
conn.createStatement().execute(ddl);
// assert PK metadata
- ResultSet rs = conn.getMetaData().getPrimaryKeys(null, null, "V1");
+ ResultSet rs = conn.getMetaData().getPrimaryKeys(null, null, fullViewName);
assertPKs(rs, new String[] {"K1", "K2", "K3", "K4"});
}
@@ -597,16 +622,18 @@ public class ViewIT extends BaseViewIT {
Connection conn = DriverManager.getConnection(getUrl());
String ddl = "CREATE TABLE " + fullTableName + " (k1 INTEGER NOT NULL, k2 VARCHAR NOT NULL, v1 DECIMAL, CONSTRAINT pk PRIMARY KEY (k1, k2))" + tableDDLOptions;
conn.createStatement().execute(ddl);
- ddl = "CREATE VIEW v1 AS SELECT * FROM " + fullTableName + " WHERE v1 = 1.0";
+ String fullViewName1 = "V_" + generateRandomString();
+ ddl = "CREATE VIEW " + fullViewName1 + " AS SELECT * FROM " + fullTableName + " WHERE v1 = 1.0";
conn.createStatement().execute(ddl);
- ddl = "ALTER VIEW V1 ADD k3 VARCHAR PRIMARY KEY, k4 VARCHAR PRIMARY KEY, v2 INTEGER";
+ ddl = "ALTER VIEW " + fullViewName1 + " ADD k3 VARCHAR PRIMARY KEY, k4 VARCHAR PRIMARY KEY, v2 INTEGER";
try {
conn.createStatement().execute(ddl);
fail("View cannot extend PK if parent's last PK is variable length. See https://issues.apache.org/jira/browse/PHOENIX-978.");
} catch (SQLException e) {
assertEquals(CANNOT_MODIFY_VIEW_PK.getErrorCode(), e.getErrorCode());
}
- ddl = "CREATE VIEW v2 (k3 VARCHAR PRIMARY KEY) AS SELECT * FROM " + fullTableName + " WHERE v1 = 1.0";
+ String fullViewName2 = "V_" + generateRandomString();
+ ddl = "CREATE VIEW " + fullViewName2 + " (k3 VARCHAR PRIMARY KEY) AS SELECT * FROM " + fullTableName + " WHERE v1 = 1.0";
try {
conn.createStatement().execute(ddl);
} catch (SQLException e) {
@@ -619,9 +646,10 @@ public class ViewIT extends BaseViewIT {
Connection conn = DriverManager.getConnection(getUrl());
String ddl = "CREATE TABLE " + fullTableName + " (k1 INTEGER NOT NULL, k2 INTEGER NOT NULL, v1 DECIMAL, CONSTRAINT pk PRIMARY KEY (k1, k2))" + tableDDLOptions;
conn.createStatement().execute(ddl);
- ddl = "CREATE VIEW v1 AS SELECT * FROM " + fullTableName + " WHERE v1 = 1.0";
+ String fullViewName = "V_" + generateRandomString();
+ ddl = "CREATE VIEW " + fullViewName + " AS SELECT * FROM " + fullTableName + " WHERE v1 = 1.0";
conn.createStatement().execute(ddl);
- ddl = "ALTER VIEW V1 ADD k3 VARCHAR PRIMARY KEY, k2 VARCHAR PRIMARY KEY, v2 INTEGER";
+ ddl = "ALTER VIEW " + fullViewName + " ADD k3 VARCHAR PRIMARY KEY, k2 VARCHAR PRIMARY KEY, v2 INTEGER";
conn.createStatement().execute(ddl);
}
@@ -630,10 +658,11 @@ public class ViewIT extends BaseViewIT {
Connection conn = DriverManager.getConnection(getUrl());
String ddl = "CREATE TABLE " + fullTableName + " (k1 INTEGER NOT NULL, k2 INTEGER NOT NULL, v1 DECIMAL, CONSTRAINT pk PRIMARY KEY (k1, k2))" + tableDDLOptions;
conn.createStatement().execute(ddl);
- ddl = "CREATE VIEW v1 AS SELECT * FROM " + fullTableName + " WHERE v1 = 1.0";
+ String fullViewName = "V_" + generateRandomString();
+ ddl = "CREATE VIEW " + fullViewName + " AS SELECT * FROM " + fullTableName + " WHERE v1 = 1.0";
conn.createStatement().execute(ddl);
try {
- ddl = "ALTER VIEW V1 ADD k3 VARCHAR NOT NULL PRIMARY KEY";
+ ddl = "ALTER VIEW " + fullViewName + " ADD k3 VARCHAR NOT NULL PRIMARY KEY";
conn.createStatement().execute(ddl);
fail("can only add nullable PKs via ALTER VIEW/TABLE");
} catch (SQLException e) {
@@ -646,17 +675,19 @@ public class ViewIT extends BaseViewIT {
Connection conn = DriverManager.getConnection(getUrl());
String sql = "CREATE TABLE " + fullTableName + " (k1 INTEGER NOT NULL, k2 INTEGER NOT NULL, v1 DECIMAL, CONSTRAINT pk PRIMARY KEY (k1, k2))" + tableDDLOptions;
conn.createStatement().execute(sql);
- sql = "CREATE VIEW v1 AS SELECT * FROM " + fullTableName;
+ String fullViewName1 = "V_" + generateRandomString();
+ sql = "CREATE VIEW " + fullViewName1 + " AS SELECT * FROM " + fullTableName;
conn.createStatement().execute(sql);
- sql = "CREATE VIEW v2 AS SELECT * FROM " + fullTableName + " WHERE k1 = 1.0";
+ String fullViewName2 = "V_" + generateRandomString();
+ sql = "CREATE VIEW " + fullViewName2 + " AS SELECT * FROM " + fullTableName + " WHERE k1 = 1.0";
conn.createStatement().execute(sql);
- sql = "SELECT * FROM v1 order by k1, k2";
+ sql = "SELECT * FROM " + fullViewName1 + " order by k1, k2";
PreparedStatement stmt = conn.prepareStatement(sql);
QueryPlan plan = PhoenixRuntime.getOptimizedQueryPlan(stmt);
assertEquals(0, plan.getOrderBy().getOrderByExpressions().size());
- sql = "SELECT * FROM v2 order by k1, k2";
+ sql = "SELECT * FROM " + fullViewName2 + " order by k1, k2";
stmt = conn.prepareStatement(sql);
plan = PhoenixRuntime.getOptimizedQueryPlan(stmt);
assertEquals(0, plan.getOrderBy().getOrderByExpressions().size());
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ImmutableIndexWithStatsIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ImmutableIndexWithStatsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ImmutableIndexWithStatsIT.java
index 32f272d..d03b6fa 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ImmutableIndexWithStatsIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ImmutableIndexWithStatsIT.java
@@ -25,39 +25,25 @@ import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
-import java.util.Map;
import java.util.Properties;
-import org.apache.phoenix.end2end.BaseOwnClusterHBaseManagedTimeIT;
-import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.end2end.ParallelStatsEnabledIT;
import org.apache.phoenix.util.PropertiesUtil;
import org.apache.phoenix.util.QueryUtil;
-import org.apache.phoenix.util.ReadOnlyProps;
-import org.apache.phoenix.util.TestUtil;
-import org.junit.BeforeClass;
import org.junit.Test;
-import com.google.common.collect.Maps;
-
-public class ImmutableIndexWithStatsIT extends BaseOwnClusterHBaseManagedTimeIT {
+public class ImmutableIndexWithStatsIT extends ParallelStatsEnabledIT {
- @BeforeClass
- public static void doSetup() throws Exception {
- Map<String,String> props = Maps.newHashMapWithExpectedSize(5);
- props.put(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, Long.toString(1));
- setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
- }
-
@Test
public void testIndexCreationDeadlockWithStats() throws Exception {
String query;
ResultSet rs;
+ String tableName = generateRandomString();
Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
Connection conn = DriverManager.getConnection(getUrl(), props);
conn.setAutoCommit(false);
- String tableName = TestUtil.DEFAULT_DATA_TABLE_FULL_NAME;
conn.createStatement().execute("CREATE TABLE " + tableName + " (k VARCHAR NOT NULL PRIMARY KEY, v VARCHAR) IMMUTABLE_ROWS=TRUE");
query = "SELECT * FROM " + tableName;
rs = conn.createStatement().executeQuery(query);
@@ -65,8 +51,8 @@ public class ImmutableIndexWithStatsIT extends BaseOwnClusterHBaseManagedTimeIT
PreparedStatement stmt = conn.prepareStatement("UPSERT INTO " + tableName + " VALUES(?,?)");
for (int i=0; i<6;i++) {
- stmt.setString(1,"k" + i);
- stmt.setString(2, "v" + i );
+ stmt.setString(1, "kkkkkkkkkk" + i);
+ stmt.setString(2, "vvvvvvvvvv" + i );
stmt.execute();
}
conn.commit();
@@ -76,7 +62,7 @@ public class ImmutableIndexWithStatsIT extends BaseOwnClusterHBaseManagedTimeIT
rs = conn.createStatement().executeQuery("EXPLAIN " + query);
assertTrue(QueryUtil.getExplainPlan(rs).startsWith("CLIENT PARALLEL 1-WAY FULL SCAN"));
- String indexName = TestUtil.DEFAULT_INDEX_TABLE_NAME;
+ String indexName = "I_" + generateRandomString();
conn.createStatement().execute("CREATE INDEX " + indexName + " ON " + tableName + " (v)");
query = "SELECT * FROM " + indexName;
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
index ff74dc8..0657d54 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
@@ -43,7 +43,7 @@ import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver;
import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.phoenix.end2end.BaseOwnClusterHBaseManagedTimeIT;
+import org.apache.phoenix.end2end.BaseOwnClusterIT;
import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.query.QueryConstants;
@@ -74,7 +74,7 @@ import com.google.common.collect.Maps;
@Category(NeedsOwnMiniClusterTest.class)
@RunWith(Parameterized.class)
-public class MutableIndexFailureIT extends BaseOwnClusterHBaseManagedTimeIT {
+public class MutableIndexFailureIT extends BaseOwnClusterIT {
public static volatile boolean FAIL_WRITE = false;
public static final String INDEX_NAME = "IDX";
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ReadOnlyIndexFailureIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ReadOnlyIndexFailureIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ReadOnlyIndexFailureIT.java
index 1dae126..a2213ea 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ReadOnlyIndexFailureIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ReadOnlyIndexFailureIT.java
@@ -43,7 +43,7 @@ import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver;
import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.phoenix.end2end.BaseOwnClusterHBaseManagedTimeIT;
+import org.apache.phoenix.end2end.BaseOwnClusterIT;
import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
import org.apache.phoenix.exception.SQLExceptionCode;
import org.apache.phoenix.hbase.index.Indexer;
@@ -77,7 +77,7 @@ import com.google.common.collect.Maps;
@Category(NeedsOwnMiniClusterTest.class)
@RunWith(Parameterized.class)
-public class ReadOnlyIndexFailureIT extends BaseOwnClusterHBaseManagedTimeIT {
+public class ReadOnlyIndexFailureIT extends BaseOwnClusterIT {
public static volatile boolean FAIL_WRITE = false;
public static final String INDEX_NAME = "IDX";
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/txn/TxWriteFailureIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/txn/TxWriteFailureIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/txn/TxWriteFailureIT.java
index 0202128..03990c1 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/txn/TxWriteFailureIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/txn/TxWriteFailureIT.java
@@ -41,7 +41,7 @@ import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.phoenix.end2end.BaseOwnClusterHBaseManagedTimeIT;
+import org.apache.phoenix.end2end.BaseOwnClusterIT;
import org.apache.phoenix.hbase.index.Indexer;
import org.apache.phoenix.query.QueryServices;
import org.apache.phoenix.util.PropertiesUtil;
@@ -57,7 +57,7 @@ import org.junit.runners.Parameterized.Parameters;
import com.google.common.collect.Maps;
@RunWith(Parameterized.class)
-public class TxWriteFailureIT extends BaseOwnClusterHBaseManagedTimeIT {
+public class TxWriteFailureIT extends BaseOwnClusterIT {
private static final String SCHEMA_NAME = "S";
private static final String DATA_TABLE_NAME = "T";
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/phoenix-core/src/it/java/org/apache/phoenix/execute/PartialCommitIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/execute/PartialCommitIT.java b/phoenix-core/src/it/java/org/apache/phoenix/execute/PartialCommitIT.java
index 6758abc..a5555f3 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/execute/PartialCommitIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/execute/PartialCommitIT.java
@@ -56,6 +56,7 @@ import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.query.QueryServices;
import org.apache.phoenix.schema.TableRef;
import org.apache.phoenix.util.ReadOnlyProps;
+import org.junit.After;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
@@ -81,6 +82,10 @@ public class PartialCommitIT extends BaseOwnClusterIT {
private static final byte[] ROW_TO_FAIL_DELETE_BYTES = Bytes.toBytes("fail me delete");
private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+ @Override
+ @After
+ public void cleanUpAfterTest() throws Exception {}
+
@BeforeClass
public static void doSetup() throws Exception {
Map<String, String> serverProps = Maps.newHashMapWithExpectedSize(10);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/phoenix-core/src/it/java/org/apache/phoenix/iterate/RoundRobinResultIteratorWithStatsIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/iterate/RoundRobinResultIteratorWithStatsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/iterate/RoundRobinResultIteratorWithStatsIT.java
index ddf17ba..c7c0b39 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/iterate/RoundRobinResultIteratorWithStatsIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/iterate/RoundRobinResultIteratorWithStatsIT.java
@@ -29,7 +29,7 @@ import java.util.List;
import java.util.Map;
import org.apache.phoenix.compile.StatementContext;
-import org.apache.phoenix.end2end.BaseOwnClusterHBaseManagedTimeIT;
+import org.apache.phoenix.end2end.BaseOwnClusterIT;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.jdbc.PhoenixResultSet;
import org.apache.phoenix.jdbc.PhoenixStatement;
@@ -41,7 +41,7 @@ import org.junit.Test;
import com.google.common.collect.Maps;
-public class RoundRobinResultIteratorWithStatsIT extends BaseOwnClusterHBaseManagedTimeIT {
+public class RoundRobinResultIteratorWithStatsIT extends BaseOwnClusterIT {
@BeforeClass
public static void doSetup() throws Exception {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
index eed0e48..fd6f91a 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
@@ -46,7 +46,7 @@ import java.util.Properties;
import java.util.Set;
import org.apache.phoenix.compile.StatementContext;
-import org.apache.phoenix.end2end.BaseOwnClusterHBaseManagedTimeIT;
+import org.apache.phoenix.end2end.BaseOwnClusterIT;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.jdbc.PhoenixResultSet;
import org.apache.phoenix.query.QueryServices;
@@ -61,7 +61,7 @@ import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
-public class PhoenixMetricsIT extends BaseOwnClusterHBaseManagedTimeIT {
+public class PhoenixMetricsIT extends BaseOwnClusterIT {
private static final List<String> mutationMetricsToSkip = Lists
.newArrayList(MetricType.MUTATION_COMMIT_TIME.name());
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixClientRpcIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixClientRpcIT.java b/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixClientRpcIT.java
index 2ccf490..c015f66 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixClientRpcIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixClientRpcIT.java
@@ -23,7 +23,7 @@ import java.util.Properties;
import org.apache.hadoop.hbase.ipc.CallRunner;
import org.apache.hadoop.hbase.regionserver.RSRpcServices;
-import org.apache.phoenix.end2end.BaseOwnClusterHBaseManagedTimeIT;
+import org.apache.phoenix.end2end.BaseOwnClusterIT;
import org.apache.phoenix.util.PropertiesUtil;
import org.apache.phoenix.util.QueryUtil;
import org.apache.phoenix.util.ReadOnlyProps;
@@ -33,7 +33,7 @@ import org.junit.BeforeClass;
import org.junit.Test;
import org.mockito.Mockito;
-public class PhoenixClientRpcIT extends BaseOwnClusterHBaseManagedTimeIT {
+public class PhoenixClientRpcIT extends BaseOwnClusterIT {
private static final String SCHEMA_NAME = "S";
private static final String INDEX_TABLE_NAME = "I";
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java b/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java
index 75eebc2..6605f16 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java
@@ -43,7 +43,7 @@ import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.regionserver.RSRpcServices;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.phoenix.end2end.BaseOwnClusterHBaseManagedTimeIT;
+import org.apache.phoenix.end2end.BaseOwnClusterIT;
import org.apache.phoenix.util.PropertiesUtil;
import org.apache.phoenix.util.QueryUtil;
import org.apache.phoenix.util.ReadOnlyProps;
@@ -53,7 +53,7 @@ import org.junit.BeforeClass;
import org.junit.Test;
import org.mockito.Mockito;
-public class PhoenixServerRpcIT extends BaseOwnClusterHBaseManagedTimeIT {
+public class PhoenixServerRpcIT extends BaseOwnClusterIT {
private static final String SCHEMA_NAME = "S";
private static final String INDEX_TABLE_NAME = "I";
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
index 18a0b23..c72e404 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
@@ -334,6 +334,10 @@ public class MetaDataUtil {
public static String getViewIndexSchemaName(String schemaName) {
return schemaName;
}
+
+ public static String getViewIndexName(String schemaName, String tableName) {
+ return SchemaUtil.getTableName(getViewIndexSchemaName(schemaName), getViewIndexTableName(tableName));
+ }
public static byte[] getIndexPhysicalName(byte[] physicalTableName, String indexPrefix) {
return getIndexPhysicalName(Bytes.toString(physicalTableName), indexPrefix).getBytes();
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
index 57df138..74fa3fa 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
@@ -49,7 +49,6 @@ import static org.apache.phoenix.util.TestUtil.JOIN_CUSTOMER_TABLE_FULL_NAME;
import static org.apache.phoenix.util.TestUtil.JOIN_ITEM_TABLE_FULL_NAME;
import static org.apache.phoenix.util.TestUtil.JOIN_ORDER_TABLE_FULL_NAME;
import static org.apache.phoenix.util.TestUtil.JOIN_SUPPLIER_TABLE_FULL_NAME;
-import static org.apache.phoenix.util.TestUtil.KEYONLY_NAME;
import static org.apache.phoenix.util.TestUtil.MDTEST_NAME;
import static org.apache.phoenix.util.TestUtil.MULTI_CF_NAME;
import static org.apache.phoenix.util.TestUtil.PARENTID1;
@@ -337,9 +336,6 @@ public abstract class BaseTest {
" \"1\".\"value\" integer,\n" +
" \"1\".\"_blah^\" varchar)"
);
- builder.put(KEYONLY_NAME,"create table " + KEYONLY_NAME +
- " (i1 integer not null, i2 integer not null\n" +
- " CONSTRAINT pk PRIMARY KEY (i1,i2))");
builder.put(MDTEST_NAME,"create table " + MDTEST_NAME +
" (id char(1) primary key,\n" +
" a.col1 integer,\n" +
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java b/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
index cddd762..5500e7a 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
@@ -569,14 +569,6 @@ public class TestUtil {
return tableStats.getGuidePosts().values();
}
- public static List<KeyRange> getSplits(Connection conn, byte[] lowerRange, byte[] upperRange) throws SQLException {
- return getSplits(conn, STABLE_NAME, STABLE_PK_NAME, lowerRange, upperRange, null, "COUNT(*)");
- }
-
- public static List<KeyRange> getAllSplits(Connection conn) throws SQLException {
- return getAllSplits(conn, STABLE_NAME);
- }
-
public static void analyzeTable(Connection conn, String tableName) throws IOException, SQLException {
analyzeTable(conn, tableName, false);
}
@@ -593,13 +585,8 @@ public class TestUtil {
conn.createStatement().execute(query);
}
- public static void analyzeTableColumns(Connection conn) throws IOException, SQLException {
- String query = "UPDATE STATISTICS " + STABLE_NAME+ " COLUMNS";
- conn.createStatement().execute(query);
- }
-
- public static void analyzeTable(Connection conn) throws IOException, SQLException {
- String query = "UPDATE STATISTICS " + STABLE_NAME;
+ public static void analyzeTableColumns(Connection conn, String tableName) throws IOException, SQLException {
+ String query = "UPDATE STATISTICS " + tableName + " COLUMNS";
conn.createStatement().execute(query);
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 3c000e2..825c0d7 100644
--- a/pom.xml
+++ b/pom.xml
@@ -250,9 +250,6 @@
<execution>
<id>ParallelStatsDisabledTest</id>
<configuration>
- <!--includes>
- <include>TenantIdTypeIT</include>
- </includes-->
<encoding>UTF-8</encoding>
<forkCount>${numForkedIT}</forkCount>
<runOrder>alphabetical</runOrder>
[3/4] phoenix git commit: PHOENIX-3290 Move and/or combine as many
NeedsOwnCluster tests to bring down test run time
Posted by ja...@apache.org.
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryWithOffsetIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryWithOffsetIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryWithOffsetIT.java
index 5e36784..6f190b6 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryWithOffsetIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryWithOffsetIT.java
@@ -38,6 +38,7 @@ import org.apache.phoenix.query.QueryServices;
import org.apache.phoenix.util.PropertiesUtil;
import org.apache.phoenix.util.QueryUtil;
import org.apache.phoenix.util.ReadOnlyProps;
+import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.runner.RunWith;
@@ -47,23 +48,17 @@ import org.junit.runners.Parameterized.Parameters;
import com.google.common.collect.Maps;
@RunWith(Parameterized.class)
-public class QueryWithOffsetIT extends BaseOwnClusterHBaseManagedTimeIT {
+public class QueryWithOffsetIT extends ParallelStatsDisabledIT {
- private String tableName = "T";
- private final String[] strings = { "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p",
- "q", "r", "s", "t", "u", "v", "w", "x", "y", "z" };
- private final String ddl;
+ private static final String[] STRINGS = { "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n",
+ "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z" };
private final boolean isSalted;
-
- public QueryWithOffsetIT(String preSplit) {
- this.tableName=tableName + "_" + preSplit.charAt(2);
- this.ddl = "CREATE TABLE " + tableName + " (t_id VARCHAR NOT NULL,\n" + "k1 INTEGER NOT NULL,\n"
- + "k2 INTEGER NOT NULL,\n" + "C3.k3 INTEGER,\n" + "C2.v1 VARCHAR,\n"
- + "CONSTRAINT pk PRIMARY KEY (t_id, k1, k2)) " + preSplit;
- this.isSalted = preSplit.startsWith(" SALT_BUCKETS");
- }
+ private final String preSplit;
+ private String ddl;
+ private String tableName;
@BeforeClass
+ @Shadower(classBeingShadowed = ParallelStatsDisabledIT.class)
public static void doSetup() throws Exception {
Map<String, String> props = Maps.newHashMapWithExpectedSize(1);
props.put(QueryServices.FORCE_ROW_KEY_ORDER_ATTRIB, Boolean.toString(true));
@@ -71,6 +66,19 @@ public class QueryWithOffsetIT extends BaseOwnClusterHBaseManagedTimeIT {
setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
}
+ public QueryWithOffsetIT(String preSplit) {
+ this.isSalted = preSplit.startsWith(" SALT_BUCKETS");
+ this.preSplit = preSplit;
+ }
+
+ @Before
+ public void initTest() {
+ tableName = "T_" + generateRandomString();
+ ddl = "CREATE TABLE " + tableName + " (t_id VARCHAR NOT NULL,\n" + "k1 INTEGER NOT NULL,\n"
+ + "k2 INTEGER NOT NULL,\n" + "C3.k3 INTEGER,\n" + "C2.v1 VARCHAR,\n"
+ + "CONSTRAINT pk PRIMARY KEY (t_id, k1, k2)) " + preSplit;
+ }
+
@Parameters(name="preSplit = {0}")
public static Collection<String> data() {
return Arrays.asList(new String[] { " SPLIT ON ('e','i','o')", " SALT_BUCKETS=10" });
@@ -92,7 +100,7 @@ public class QueryWithOffsetIT extends BaseOwnClusterHBaseManagedTimeIT {
int i = 0;
while (i < limit) {
assertTrue(rs.next());
- assertEquals("Expected string didn't match for i = " + i, strings[offset + i], rs.getString(1));
+ assertEquals("Expected string didn't match for i = " + i, STRINGS[offset + i], rs.getString(1));
i++;
}
@@ -100,14 +108,14 @@ public class QueryWithOffsetIT extends BaseOwnClusterHBaseManagedTimeIT {
rs = conn.createStatement().executeQuery("SELECT t_id from " + tableName + " union all SELECT t_id from "
+ tableName + " offset " + offset + " FETCH FIRST " + limit + " rows only");
i = 0;
- while (i++ < strings.length - offset) {
+ while (i++ < STRINGS.length - offset) {
assertTrue(rs.next());
- assertEquals(strings[offset + i - 1], rs.getString(1));
+ assertEquals(STRINGS[offset + i - 1], rs.getString(1));
}
i = 0;
- while (i++ < limit - strings.length - offset) {
+ while (i++ < limit - STRINGS.length - offset) {
assertTrue(rs.next());
- assertEquals(strings[i - 1], rs.getString(1));
+ assertEquals(STRINGS[i - 1], rs.getString(1));
}
conn.close();
}
@@ -124,25 +132,27 @@ public class QueryWithOffsetIT extends BaseOwnClusterHBaseManagedTimeIT {
String query = "SELECT t_id from " + tableName + " offset " + offset;
ResultSet rs = conn.createStatement().executeQuery("EXPLAIN " + query);
if(!isSalted){
- assertEquals("CLIENT SERIAL 1-WAY FULL SCAN OVER T_P\n" + " SERVER FILTER BY FIRST KEY ONLY\n"
+ assertEquals("CLIENT SERIAL 1-WAY FULL SCAN OVER " + tableName + "\n"
+ + " SERVER FILTER BY FIRST KEY ONLY\n"
+ " SERVER OFFSET " + offset, QueryUtil.getExplainPlan(rs));
}else{
- assertEquals("CLIENT PARALLEL 10-WAY FULL SCAN OVER T_A\n" + " SERVER FILTER BY FIRST KEY ONLY\n"
+ assertEquals("CLIENT PARALLEL 10-WAY FULL SCAN OVER " + tableName + "\n"
+ + " SERVER FILTER BY FIRST KEY ONLY\n"
+ "CLIENT MERGE SORT\n" + "CLIENT OFFSET " + offset, QueryUtil.getExplainPlan(rs));
}
rs = conn.createStatement().executeQuery(query);
int i = 0;
- while (i++ < strings.length - offset) {
+ while (i++ < STRINGS.length - offset) {
assertTrue(rs.next());
- assertEquals(strings[offset + i - 1], rs.getString(1));
+ assertEquals(STRINGS[offset + i - 1], rs.getString(1));
}
query = "SELECT t_id from " + tableName + " ORDER BY v1 offset " + offset;
rs = conn.createStatement().executeQuery("EXPLAIN " + query);
if (!isSalted) {
- assertEquals("CLIENT PARALLEL 5-WAY FULL SCAN OVER T_P\n" + " SERVER SORTED BY [C2.V1]\n"
+ assertEquals("CLIENT PARALLEL 5-WAY FULL SCAN OVER " + tableName + "\n" + " SERVER SORTED BY [C2.V1]\n"
+ "CLIENT MERGE SORT\n" + "CLIENT OFFSET " + offset, QueryUtil.getExplainPlan(rs));
} else {
- assertEquals("CLIENT PARALLEL 10-WAY FULL SCAN OVER T_A\n" + " SERVER SORTED BY [C2.V1]\n"
+ assertEquals("CLIENT PARALLEL 10-WAY FULL SCAN OVER " + tableName + "\n" + " SERVER SORTED BY [C2.V1]\n"
+ "CLIENT MERGE SORT\n" + "CLIENT OFFSET " + offset, QueryUtil.getExplainPlan(rs));
}
conn.close();
@@ -161,31 +171,31 @@ public class QueryWithOffsetIT extends BaseOwnClusterHBaseManagedTimeIT {
rs = conn.createStatement()
.executeQuery("SELECT t_id from " + tableName + " order by t_id offset " + offset + " row");
int i = 0;
- while (i++ < strings.length - offset) {
+ while (i++ < STRINGS.length - offset) {
assertTrue(rs.next());
- assertEquals(strings[offset + i - 1], rs.getString(1));
+ assertEquals(STRINGS[offset + i - 1], rs.getString(1));
}
rs = conn.createStatement().executeQuery(
"SELECT k3, count(*) from " + tableName + " group by k3 order by k3 desc offset " + offset + " row");
i = 0;
- while (i++ < strings.length - offset) {
+ while (i++ < STRINGS.length - offset) {
assertTrue(rs.next());
- assertEquals(strings.length - offset - i + 2, rs.getInt(1));
+ assertEquals(STRINGS.length - offset - i + 2, rs.getInt(1));
}
rs = conn.createStatement().executeQuery("SELECT t_id from " + tableName + " union all SELECT t_id from "
+ tableName + " offset " + offset + " rows");
i = 0;
- while (i++ < strings.length - offset) {
+ while (i++ < STRINGS.length - offset) {
assertTrue(rs.next());
- assertEquals(strings[offset + i - 1], rs.getString(1));
+ assertEquals(STRINGS[offset + i - 1], rs.getString(1));
}
i = 0;
- while (i++ < strings.length) {
+ while (i++ < STRINGS.length) {
assertTrue(rs.next());
- assertEquals(strings[i - 1], rs.getString(1));
+ assertEquals(STRINGS[i - 1], rs.getString(1));
}
conn.close();
}
@@ -210,8 +220,8 @@ public class QueryWithOffsetIT extends BaseOwnClusterHBaseManagedTimeIT {
private void initTableValues(Connection conn) throws SQLException {
for (int i = 0; i < 26; i++) {
- conn.createStatement().execute("UPSERT INTO " + tableName + " values('" + strings[i] + "'," + i + ","
- + (i + 1) + "," + (i + 2) + ",'" + strings[25 - i] + "')");
+ conn.createStatement().execute("UPSERT INTO " + tableName + " values('" + STRINGS[i] + "'," + i + ","
+ + (i + 1) + "," + (i + 2) + ",'" + STRINGS[25 - i] + "')");
}
conn.commit();
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/phoenix-core/src/it/java/org/apache/phoenix/end2end/RenewLeaseIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/RenewLeaseIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/RenewLeaseIT.java
index fa0bc8e..aba4ddb 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/RenewLeaseIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/RenewLeaseIT.java
@@ -42,7 +42,7 @@ import org.junit.Test;
import com.google.common.collect.Maps;
-public class RenewLeaseIT extends BaseOwnClusterHBaseManagedTimeIT {
+public class RenewLeaseIT extends BaseOwnClusterIT {
private static final long RPC_TIMEOUT = 2000;
private static volatile boolean SLEEP_NOW = false;
private static final String TABLE_NAME = "FOO_BAR";
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SpillableGroupByIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SpillableGroupByIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SpillableGroupByIT.java
index 22bf8ce..a11f808 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SpillableGroupByIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SpillableGroupByIT.java
@@ -34,8 +34,8 @@ import java.util.Map;
import java.util.Properties;
import org.apache.phoenix.query.QueryServices;
-import org.apache.phoenix.util.PhoenixRuntime;
import org.apache.phoenix.util.PropertiesUtil;
+import org.apache.phoenix.util.QueryUtil;
import org.apache.phoenix.util.ReadOnlyProps;
import org.junit.BeforeClass;
import org.junit.Test;
@@ -48,7 +48,7 @@ import com.google.common.collect.Maps;
* cluster.
*/
-public class SpillableGroupByIT extends BaseOwnClusterClientManagedTimeIT {
+public class SpillableGroupByIT extends BaseOwnClusterIT {
private static final int NUM_ROWS_INSERTED = 1000;
@@ -61,7 +61,7 @@ public class SpillableGroupByIT extends BaseOwnClusterClientManagedTimeIT {
@BeforeClass
public static void doSetup() throws Exception {
- Map<String, String> props = Maps.newHashMapWithExpectedSize(1);
+ Map<String, String> props = Maps.newHashMapWithExpectedSize(11);
// Set a very small cache size to force plenty of spilling
props.put(QueryServices.GROUPBY_MAX_CACHE_SIZE_ATTRIB,
Integer.toString(1));
@@ -70,6 +70,13 @@ public class SpillableGroupByIT extends BaseOwnClusterClientManagedTimeIT {
Integer.toString(1));
// Large enough to not run out of memory, but small enough to spill
props.put(QueryServices.MAX_MEMORY_SIZE_ATTRIB, Integer.toString(40000));
+
+ // Set guidepost width, but disable stats
+ props.put(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, Long.toString(20));
+ props.put(QueryServices.STATS_ENABLED_ATTRIB, Boolean.toString(false));
+ props.put(QueryServices.EXPLAIN_CHUNK_COUNT_ATTRIB, Boolean.TRUE.toString());
+ props.put(QueryServices.EXPLAIN_ROW_COUNT_ATTRIB, Boolean.TRUE.toString());
+ // Must update config before starting server
setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
}
@@ -77,10 +84,7 @@ public class SpillableGroupByIT extends BaseOwnClusterClientManagedTimeIT {
createGroupByTestTable(conn, tableName);
}
- private void loadData(long ts) throws SQLException {
- Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
- props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts));
- Connection conn = DriverManager.getConnection(getUrl(), props);
+ private void loadData(Connection conn) throws SQLException {
int groupFactor = NUM_ROWS_INSERTED / 2;
for (int i = 0; i < NUM_ROWS_INSERTED; i++) {
insertRow(conn, Integer.toString(i % (groupFactor)), 10);
@@ -90,7 +94,6 @@ public class SpillableGroupByIT extends BaseOwnClusterClientManagedTimeIT {
}
}
conn.commit();
- conn.close();
}
private void insertRow(Connection conn, String uri, int appcpu)
@@ -107,72 +110,66 @@ public class SpillableGroupByIT extends BaseOwnClusterClientManagedTimeIT {
@Test
public void testScanUri() throws Exception {
- long ts = nextTimestamp();
SpillableGroupByIT spGpByT = new SpillableGroupByIT();
Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
- props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB,
- Long.toString(ts));
Connection conn = DriverManager.getConnection(getUrl(), props);
createTable(conn, GROUPBYTEST_NAME);
- ts += 2;
- spGpByT.loadData(ts);
+ spGpByT.loadData(conn);
props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
- props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB,
- Long.toString(ts + 10));
- conn = DriverManager.getConnection(getUrl(), props);
- try {
- Statement stmt = conn.createStatement();
- ResultSet rs = stmt.executeQuery(GROUPBY1);
-
- int count = 0;
- while (rs.next()) {
- String uri = rs.getString(5);
- assertEquals(2, rs.getInt(1));
- assertEquals(1, rs.getInt(2));
- assertEquals(20, rs.getInt(3));
- assertEquals(10, rs.getInt(4));
- int a = Integer.valueOf(rs.getString(6)).intValue();
- int b = Integer.valueOf(rs.getString(7)).intValue();
- assertEquals(Integer.valueOf(uri).intValue(), Math.min(a, b));
- assertEquals(NUM_ROWS_INSERTED / 2 + Integer.valueOf(uri), Math.max(a, b));
- count++;
- }
- assertEquals(NUM_ROWS_INSERTED / 2, count);
-
- } finally {
- conn.close();
+ Statement stmt = conn.createStatement();
+ ResultSet rs = stmt.executeQuery(GROUPBY1);
+
+ int count = 0;
+ while (rs.next()) {
+ String uri = rs.getString(5);
+ assertEquals(2, rs.getInt(1));
+ assertEquals(1, rs.getInt(2));
+ assertEquals(20, rs.getInt(3));
+ assertEquals(10, rs.getInt(4));
+ int a = Integer.valueOf(rs.getString(6)).intValue();
+ int b = Integer.valueOf(rs.getString(7)).intValue();
+ assertEquals(Integer.valueOf(uri).intValue(), Math.min(a, b));
+ assertEquals(NUM_ROWS_INSERTED / 2 + Integer.valueOf(uri), Math.max(a, b));
+ count++;
}
+ assertEquals(NUM_ROWS_INSERTED / 2, count);
- // Test group by with limit that will exit after first row
- props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB,
- Long.toString(ts + 10));
- conn = DriverManager.getConnection(getUrl(), props);
- try {
- Statement stmt = conn.createStatement();
- ResultSet rs = stmt.executeQuery("SELECT appcpu FROM " + GROUPBYTEST_NAME + " group by appcpu limit 1");
-
- assertTrue(rs.next());
- assertEquals(10,rs.getInt(1));
- assertFalse(rs.next());
- } finally {
- conn.close();
- }
+ conn.createStatement();
+ rs = stmt.executeQuery("SELECT appcpu FROM " + GROUPBYTEST_NAME + " group by appcpu limit 1");
+
+ assertTrue(rs.next());
+ assertEquals(10,rs.getInt(1));
+ assertFalse(rs.next());
- // Test group by with limit that will do spilling before exiting
- props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB,
- Long.toString(ts + 10));
- conn = DriverManager.getConnection(getUrl(), props);
- try {
- Statement stmt = conn.createStatement();
- ResultSet rs = stmt.executeQuery("SELECT to_number(uri) FROM " + GROUPBYTEST_NAME + " group by to_number(uri) limit 100");
- int count = 0;
- while (rs.next()) {
- count++;
- }
- assertEquals(100, count);
- } finally {
- conn.close();
+ stmt = conn.createStatement();
+ rs = stmt.executeQuery("SELECT to_number(uri) FROM " + GROUPBYTEST_NAME + " group by to_number(uri) limit 100");
+ count = 0;
+ while (rs.next()) {
+ count++;
}
+ assertEquals(100, count);
}
+ @Test
+ public void testStatisticsAreNotWritten() throws SQLException {
+ Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+ Connection conn = DriverManager.getConnection(getUrl(), props);
+ Statement stmt = conn.createStatement();
+ stmt.execute("CREATE TABLE T1 (ID INTEGER NOT NULL PRIMARY KEY, NAME VARCHAR)");
+ stmt.execute("UPSERT INTO T1 VALUES (1, 'NAME1')");
+ stmt.execute("UPSERT INTO T1 VALUES (2, 'NAME2')");
+ stmt.execute("UPSERT INTO T1 VALUES (3, 'NAME3')");
+ conn.commit();
+ stmt.execute("UPDATE STATISTICS T1");
+ ResultSet rs = stmt.executeQuery("SELECT * FROM SYSTEM.STATS");
+ assertFalse(rs.next());
+ rs.close();
+ stmt.close();
+ rs = conn.createStatement().executeQuery("EXPLAIN SELECT * FROM T1");
+ String explainPlan = QueryUtil.getExplainPlan(rs);
+ assertEquals(
+ "CLIENT 1-CHUNK PARALLEL 1-WAY FULL SCAN OVER T1",
+ explainPlan);
+ conn.close();
+ }
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectionDisabledIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectionDisabledIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectionDisabledIT.java
deleted file mode 100644
index 54ffa7c..0000000
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectionDisabledIT.java
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.end2end;
-
-import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.Map;
-import java.util.Properties;
-
-import org.apache.phoenix.query.QueryServices;
-import org.apache.phoenix.util.PropertiesUtil;
-import org.apache.phoenix.util.QueryUtil;
-import org.apache.phoenix.util.ReadOnlyProps;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import com.google.common.collect.Maps;
-
-/**
- * Verifies that statistics are not collected if they are disabled via a setting
- */
-public class StatsCollectionDisabledIT extends StatsCollectorAbstractIT {
-
- @BeforeClass
- public static void doSetup() throws Exception {
- Map<String,String> props = Maps.newHashMapWithExpectedSize(5);
- // Must update config before starting server
- props.put(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, Long.toString(20));
- props.put(QueryServices.STATS_ENABLED_ATTRIB, Boolean.toString(false));
- props.put(QueryServices.EXPLAIN_CHUNK_COUNT_ATTRIB, Boolean.TRUE.toString());
- props.put(QueryServices.EXPLAIN_ROW_COUNT_ATTRIB, Boolean.TRUE.toString());
- setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
- }
-
- @Test
- public void testStatisticsAreNotWritten() throws SQLException {
- Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
- Connection conn = DriverManager.getConnection(getUrl(), props);
- Statement stmt = conn.createStatement();
- stmt.execute("CREATE TABLE T1 (ID INTEGER NOT NULL PRIMARY KEY, NAME VARCHAR)");
- stmt.execute("UPSERT INTO T1 VALUES (1, 'NAME1')");
- stmt.execute("UPSERT INTO T1 VALUES (2, 'NAME2')");
- stmt.execute("UPSERT INTO T1 VALUES (3, 'NAME3')");
- conn.commit();
- stmt.execute("UPDATE STATISTICS T1");
- ResultSet rs = stmt.executeQuery("SELECT * FROM SYSTEM.STATS");
- assertFalse(rs.next());
- rs.close();
- stmt.close();
- rs = conn.createStatement().executeQuery("EXPLAIN SELECT * FROM T1");
- String explainPlan = QueryUtil.getExplainPlan(rs);
- assertEquals(
- "CLIENT 1-CHUNK PARALLEL 1-WAY FULL SCAN OVER T1",
- explainPlan);
- conn.close();
- }
-}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorAbstractIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorAbstractIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorAbstractIT.java
deleted file mode 100644
index ab337d6..0000000
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorAbstractIT.java
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.end2end;
-
-import static org.junit.Assert.fail;
-
-import java.io.IOException;
-import java.sql.Connection;
-import java.sql.SQLException;
-import java.util.Map;
-
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.phoenix.jdbc.PhoenixConnection;
-import org.apache.phoenix.query.ConnectionQueryServices;
-import org.apache.phoenix.query.QueryServices;
-import org.apache.phoenix.util.ReadOnlyProps;
-import org.junit.BeforeClass;
-import org.junit.experimental.categories.Category;
-
-import com.google.common.collect.Maps;
-@Category(NeedsOwnMiniClusterTest.class)
-public abstract class StatsCollectorAbstractIT extends BaseOwnClusterHBaseManagedTimeIT {
- protected static final String STATS_TEST_TABLE_NAME = "S";
- protected static final String STATS_TEST_TABLE_NAME_NEW = "S_NEW";
- protected static final byte[] STATS_TEST_TABLE_BYTES = Bytes.toBytes(STATS_TEST_TABLE_NAME);
- protected static final byte[] STATS_TEST_TABLE_BYTES_NEW = Bytes.toBytes(STATS_TEST_TABLE_NAME_NEW);
-
- @BeforeClass
- public static void doSetup() throws Exception {
- Map<String,String> props = Maps.newHashMapWithExpectedSize(3);
- // Must update config before starting server
- props.put(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, Long.toString(20));
- props.put(QueryServices.EXPLAIN_CHUNK_COUNT_ATTRIB, Boolean.TRUE.toString());
- setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
- }
-
- protected void splitTable(Connection conn, byte[] splitPoint, byte[] tabName) throws IOException, InterruptedException, SQLException {
- ConnectionQueryServices services = conn.unwrap(PhoenixConnection.class).getQueryServices();
- int nRegionsNow = services.getAllTableRegions(tabName).size();
- HBaseAdmin admin = services.getAdmin();
- try {
- admin.split(tabName, splitPoint);
- int nTries = 0;
- int nRegions;
- do {
- Thread.sleep(2000);
- services.clearTableRegionCache(tabName);
- nRegions = services.getAllTableRegions(tabName).size();
- nTries++;
- } while (nRegions == nRegionsNow && nTries < 10);
- if (nRegions == nRegionsNow) {
- fail();
- }
- // FIXME: I see the commit of the stats finishing before this with a lower timestamp that the scan timestamp,
- // yet without this sleep, the query finds the old data. Seems like an HBase bug and a potentially serious one.
- Thread.sleep(8000);
- } finally {
- admin.close();
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
index f0fe346..dd7741a 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
@@ -20,6 +20,7 @@ package org.apache.phoenix.end2end;
import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
import static org.apache.phoenix.util.TestUtil.getAllSplits;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertTrue;
@@ -35,9 +36,12 @@ import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Properties;
+import java.util.Random;
import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
import org.apache.phoenix.query.ConnectionQueryServices;
@@ -49,6 +53,7 @@ import org.apache.phoenix.util.QueryUtil;
import org.apache.phoenix.util.ReadOnlyProps;
import org.apache.phoenix.util.SchemaUtil;
import org.apache.phoenix.util.TestUtil;
+import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.runner.RunWith;
@@ -58,14 +63,14 @@ import org.junit.runners.Parameterized.Parameters;
import com.google.common.collect.Maps;
@RunWith(Parameterized.class)
-public class StatsCollectorIT extends StatsCollectorAbstractIT {
- private static final String STATS_TEST_TABLE_NAME = "S";
-
+public class StatsCollectorIT extends ParallelStatsEnabledIT {
private final String tableDDLOptions;
- private final String tableName;
- private final String fullTableName;
+ private String tableName;
+ private String schemaName;
+ private String fullTableName;
@BeforeClass
+ @Shadower(classBeingShadowed = ParallelStatsEnabledIT.class)
public static void doSetup() throws Exception {
Map<String,String> props = Maps.newHashMapWithExpectedSize(10);
// Must update config before starting server
@@ -79,10 +84,15 @@ public class StatsCollectorIT extends StatsCollectorAbstractIT {
public StatsCollectorIT( boolean transactional) {
this.tableDDLOptions= transactional ? " TRANSACTIONAL=true" : "";
- this.tableName = TestUtil.DEFAULT_DATA_TABLE_NAME + ( transactional ? "_TXN" : "");
- this.fullTableName = SchemaUtil.getTableName(TestUtil.DEFAULT_SCHEMA_NAME, tableName);
}
+ @Before
+ public void generateTableNames() {
+ schemaName = TestUtil.DEFAULT_SCHEMA_NAME;
+ tableName = "T_" + generateRandomString();
+ fullTableName = SchemaUtil.getTableName(schemaName, tableName);
+ }
+
@Parameters(name="transactional = {0}")
public static Collection<Boolean> data() {
return Arrays.asList(false,true);
@@ -110,8 +120,6 @@ public class StatsCollectorIT extends StatsCollectorAbstractIT {
public void testSomeUpdateEmptyStats() throws Exception {
Connection conn;
Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
- String fullTableName = this.fullTableName + "_SALTED";
- // props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 10));
conn = DriverManager.getConnection(getUrl(), props);
conn.setAutoCommit(true);
conn.createStatement().execute(
@@ -150,7 +158,6 @@ public class StatsCollectorIT extends StatsCollectorAbstractIT {
PreparedStatement stmt;
ResultSet rs;
Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
- // props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 10));
conn = DriverManager.getConnection(getUrl(), props);
conn.createStatement().execute(
"CREATE TABLE " + fullTableName +" ( k VARCHAR, a_string_array VARCHAR(100) ARRAY[4], b_string_array VARCHAR(100) ARRAY[4] \n"
@@ -217,12 +224,11 @@ public class StatsCollectorIT extends StatsCollectorAbstractIT {
@Test
public void testUpdateStatsWithMultipleTables() throws Throwable {
- String fullTableName2 = fullTableName+"_2";
+ String fullTableName2 = SchemaUtil.getTableName(schemaName, "T_" + generateRandomString());
Connection conn;
PreparedStatement stmt;
ResultSet rs;
Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
- // props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 10));
conn = DriverManager.getConnection(getUrl(), props);
conn.createStatement().execute(
"CREATE TABLE " + fullTableName +" ( k VARCHAR, a_string_array VARCHAR(100) ARRAY[4], b_string_array VARCHAR(100) ARRAY[4] \n"
@@ -271,7 +277,6 @@ public class StatsCollectorIT extends StatsCollectorAbstractIT {
InterruptedException {
Connection conn;
PreparedStatement stmt;
- // props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 30));
conn = DriverManager.getConnection(getUrl(), props);
stmt = upsertStmt(conn, tableName);
stmt.setString(1, "a");
@@ -356,12 +361,12 @@ public class StatsCollectorIT extends StatsCollectorAbstractIT {
@Test
public void testCompactUpdatesStats() throws Exception {
- testCompactUpdatesStats(null, STATS_TEST_TABLE_NAME + 1);
+ testCompactUpdatesStats(null, fullTableName);
}
@Test
public void testCompactUpdatesStatsWithMinStatsUpdateFreq() throws Exception {
- testCompactUpdatesStats(QueryServicesOptions.DEFAULT_STATS_UPDATE_FREQ_MS, STATS_TEST_TABLE_NAME + 2);
+ testCompactUpdatesStats(QueryServicesOptions.DEFAULT_STATS_UPDATE_FREQ_MS, fullTableName);
}
private void testCompactUpdatesStats(Integer minStatsUpdateFreq, String tableName) throws Exception {
@@ -426,4 +431,130 @@ public class StatsCollectorIT extends StatsCollectorAbstractIT {
assertEquals(nRows - nDeletedRows, rs.getLong(1));
}
+
+ @Test
+ public void testWithMultiCF() throws Exception {
+ int nRows = 20;
+ Connection conn;
+ PreparedStatement stmt;
+ Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+ conn = DriverManager.getConnection(getUrl(), props);
+ conn.createStatement().execute(
+ "CREATE TABLE " + fullTableName
+ + "(k VARCHAR PRIMARY KEY, a.v INTEGER, b.v INTEGER, c.v INTEGER NULL, d.v INTEGER NULL) ");
+ stmt = conn.prepareStatement("UPSERT INTO " + fullTableName + " VALUES(?,?, ?, ?, ?)");
+ byte[] val = new byte[250];
+ for (int i = 0; i < nRows; i++) {
+ stmt.setString(1, Character.toString((char)('a' + i)) + Bytes.toString(val));
+ stmt.setInt(2, i);
+ stmt.setInt(3, i);
+ stmt.setInt(4, i);
+ stmt.setInt(5, i);
+ stmt.executeUpdate();
+ }
+ conn.commit();
+ stmt = conn.prepareStatement("UPSERT INTO " + fullTableName + "(k, c.v, d.v) VALUES(?,?,?)");
+ for (int i = 0; i < 5; i++) {
+ stmt.setString(1, Character.toString((char)('a' + 'z' + i)) + Bytes.toString(val));
+ stmt.setInt(2, i);
+ stmt.setInt(3, i);
+ stmt.executeUpdate();
+ }
+ conn.commit();
+
+ ResultSet rs;
+ TestUtil.analyzeTable(conn, fullTableName);
+ List<KeyRange> keyRanges = getAllSplits(conn, fullTableName);
+ assertEquals(26, keyRanges.size());
+ rs = conn.createStatement().executeQuery("EXPLAIN SELECT * FROM " + fullTableName);
+ assertEquals("CLIENT 26-CHUNK 25 ROWS 12420 BYTES PARALLEL 1-WAY FULL SCAN OVER " + fullTableName,
+ QueryUtil.getExplainPlan(rs));
+
+ ConnectionQueryServices services = conn.unwrap(PhoenixConnection.class).getQueryServices();
+ List<HRegionLocation> regions = services.getAllTableRegions(Bytes.toBytes(fullTableName));
+ assertEquals(1, regions.size());
+
+ TestUtil.analyzeTable(conn, fullTableName);
+ String query = "UPDATE STATISTICS " + fullTableName + " SET \""
+ + QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB + "\"=" + Long.toString(1000);
+ conn.createStatement().execute(query);
+ keyRanges = getAllSplits(conn, fullTableName);
+ assertEquals(12, keyRanges.size());
+
+ rs = conn
+ .createStatement()
+ .executeQuery(
+ "SELECT COLUMN_FAMILY,SUM(GUIDE_POSTS_ROW_COUNT),SUM(GUIDE_POSTS_WIDTH),COUNT(*) from SYSTEM.STATS where PHYSICAL_NAME = '"
+ + fullTableName + "' GROUP BY COLUMN_FAMILY ORDER BY COLUMN_FAMILY");
+
+ assertTrue(rs.next());
+ assertEquals("A", rs.getString(1));
+ assertEquals(24, rs.getInt(2));
+ assertEquals(12144, rs.getInt(3));
+ assertEquals(11, rs.getInt(4));
+
+ assertTrue(rs.next());
+ assertEquals("B", rs.getString(1));
+ assertEquals(20, rs.getInt(2));
+ assertEquals(5540, rs.getInt(3));
+ assertEquals(5, rs.getInt(4));
+
+ assertTrue(rs.next());
+ assertEquals("C", rs.getString(1));
+ assertEquals(24, rs.getInt(2));
+ assertEquals(6652, rs.getInt(3));
+ assertEquals(6, rs.getInt(4));
+
+ assertTrue(rs.next());
+ assertEquals("D", rs.getString(1));
+ assertEquals(24, rs.getInt(2));
+ assertEquals(6652, rs.getInt(3));
+ assertEquals(6, rs.getInt(4));
+
+ assertFalse(rs.next());
+ }
+
+ @Test
+ public void testRowCountAndByteCounts() throws SQLException {
+ Connection conn;
+ Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+ conn = DriverManager.getConnection(getUrl(), props);
+ String ddl = "CREATE TABLE " + fullTableName + " (t_id VARCHAR NOT NULL,\n" + "k1 INTEGER NOT NULL,\n"
+ + "k2 INTEGER NOT NULL,\n" + "C3.k3 INTEGER,\n" + "C2.v1 VARCHAR,\n"
+ + "CONSTRAINT pk PRIMARY KEY (t_id, k1, k2)) split on ('e','j','o')";
+ conn.createStatement().execute(ddl);
+ String[] strings = { "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r",
+ "s", "t", "u", "v", "w", "x", "y", "z" };
+ for (int i = 0; i < 26; i++) {
+ conn.createStatement().execute(
+ "UPSERT INTO " + fullTableName + " values('" + strings[i] + "'," + i + "," + (i + 1) + ","
+ + (i + 2) + ",'" + strings[25 - i] + "')");
+ }
+ conn.commit();
+ ResultSet rs;
+ String query = "UPDATE STATISTICS " + fullTableName + " SET \""
+ + QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB + "\"=" + Long.toString(20);
+ conn.createStatement().execute(query);
+ Random r = new Random();
+ int count = 0;
+ while (count < 4) {
+ int startIndex = r.nextInt(strings.length);
+ int endIndex = r.nextInt(strings.length - startIndex) + startIndex;
+ long rows = endIndex - startIndex;
+ long c2Bytes = rows * 35;
+ System.out.println(rows + ":" + startIndex + ":" + endIndex);
+ rs = conn.createStatement().executeQuery(
+ "SELECT COLUMN_FAMILY,SUM(GUIDE_POSTS_ROW_COUNT),SUM(GUIDE_POSTS_WIDTH) from SYSTEM.STATS where PHYSICAL_NAME = '"
+ + fullTableName + "' AND GUIDE_POST_KEY>= cast('" + strings[startIndex]
+ + "' as varbinary) AND GUIDE_POST_KEY<cast('" + strings[endIndex]
+ + "' as varbinary) and COLUMN_FAMILY='C2' group by COLUMN_FAMILY");
+ if (startIndex < endIndex) {
+ assertTrue(rs.next());
+ assertEquals("C2", rs.getString(1));
+ assertEquals(rows, rs.getLong(2));
+ assertEquals(c2Bytes, rs.getLong(3));
+ count++;
+ }
+ }
+ }
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorWithSplitsAndMultiCFIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorWithSplitsAndMultiCFIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorWithSplitsAndMultiCFIT.java
deleted file mode 100644
index d922ad9..0000000
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorWithSplitsAndMultiCFIT.java
+++ /dev/null
@@ -1,186 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.end2end;
-
-import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
-import static org.apache.phoenix.util.TestUtil.getAllSplits;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-import java.util.Random;
-
-import org.apache.hadoop.hbase.HRegionLocation;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.phoenix.jdbc.PhoenixConnection;
-import org.apache.phoenix.query.ConnectionQueryServices;
-import org.apache.phoenix.query.KeyRange;
-import org.apache.phoenix.query.QueryServices;
-import org.apache.phoenix.util.PropertiesUtil;
-import org.apache.phoenix.util.QueryUtil;
-import org.apache.phoenix.util.ReadOnlyProps;
-import org.apache.phoenix.util.TestUtil;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import com.google.common.collect.Maps;
-
-public class StatsCollectorWithSplitsAndMultiCFIT extends StatsCollectorAbstractIT {
- private static final String STATS_TEST_TABLE_NAME_NEW = "S_NEW";
- private static final byte[] STATS_TEST_TABLE_NEW_BYTES = Bytes.toBytes(STATS_TEST_TABLE_NAME_NEW);
-
- @BeforeClass
- public static void doSetup() throws Exception {
- Map<String, String> props = Maps.newHashMapWithExpectedSize(3);
- // Must update config before starting server
- props.put(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, Long.toString(1000));
- props.put(QueryServices.EXPLAIN_CHUNK_COUNT_ATTRIB, Boolean.TRUE.toString());
- props.put(QueryServices.QUEUE_SIZE_ATTRIB, Integer.toString(1024));
- setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
- }
-
- @Test
- public void testWithMultiCF() throws Exception {
- int nRows = 20;
- Connection conn;
- PreparedStatement stmt;
- Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
- conn = DriverManager.getConnection(getUrl(), props);
- conn.createStatement().execute("CREATE TABLE " + STATS_TEST_TABLE_NAME_NEW
- + "(k VARCHAR PRIMARY KEY, a.v INTEGER, b.v INTEGER, c.v INTEGER NULL, d.v INTEGER NULL) ");
- stmt = conn.prepareStatement("UPSERT INTO " + STATS_TEST_TABLE_NAME_NEW + " VALUES(?,?, ?, ?, ?)");
- byte[] val = new byte[250];
- for (int i = 0; i < nRows; i++) {
- stmt.setString(1, Character.toString((char)('a' + i)) + Bytes.toString(val));
- stmt.setInt(2, i);
- stmt.setInt(3, i);
- stmt.setInt(4, i);
- stmt.setInt(5, i);
- stmt.executeUpdate();
- }
- conn.commit();
- stmt = conn.prepareStatement("UPSERT INTO " + STATS_TEST_TABLE_NAME_NEW + "(k, c.v, d.v) VALUES(?,?,?)");
- for (int i = 0; i < 5; i++) {
- stmt.setString(1, Character.toString((char)('a' + 'z' + i)) + Bytes.toString(val));
- stmt.setInt(2, i);
- stmt.setInt(3, i);
- stmt.executeUpdate();
- }
- conn.commit();
-
- ResultSet rs;
- TestUtil.analyzeTable(conn, STATS_TEST_TABLE_NAME_NEW);
- List<KeyRange> keyRanges = getAllSplits(conn, STATS_TEST_TABLE_NAME_NEW);
- assertEquals(12, keyRanges.size());
- rs = conn.createStatement().executeQuery("EXPLAIN SELECT * FROM " + STATS_TEST_TABLE_NAME_NEW);
- assertEquals("CLIENT " + (12) + "-CHUNK " + "PARALLEL 1-WAY FULL SCAN OVER " + STATS_TEST_TABLE_NAME_NEW,
- QueryUtil.getExplainPlan(rs));
-
- ConnectionQueryServices services = conn.unwrap(PhoenixConnection.class).getQueryServices();
- List<HRegionLocation> regions = services.getAllTableRegions(STATS_TEST_TABLE_NEW_BYTES);
- assertEquals(1, regions.size());
-
- TestUtil.analyzeTable(conn, STATS_TEST_TABLE_NAME_NEW);
- String query = "UPDATE STATISTICS " + STATS_TEST_TABLE_NAME_NEW + " SET \""
- + QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB + "\"=" + Long.toString(250);
- conn.createStatement().execute(query);
- keyRanges = getAllSplits(conn, STATS_TEST_TABLE_NAME_NEW);
- assertEquals(26, keyRanges.size());
-
- rs = conn.createStatement().executeQuery(
- "SELECT COLUMN_FAMILY,SUM(GUIDE_POSTS_ROW_COUNT),SUM(GUIDE_POSTS_WIDTH),COUNT(*) from SYSTEM.STATS where PHYSICAL_NAME = '"
- + STATS_TEST_TABLE_NAME_NEW + "' GROUP BY COLUMN_FAMILY ORDER BY COLUMN_FAMILY");
-
- assertTrue(rs.next());
- assertEquals("A", rs.getString(1));
- assertEquals(25, rs.getInt(2));
- assertEquals(12420, rs.getInt(3));
- assertEquals(25, rs.getInt(4));
-
- assertTrue(rs.next());
- assertEquals("B", rs.getString(1));
- assertEquals(20, rs.getInt(2));
- assertEquals(5540, rs.getInt(3));
- assertEquals(20, rs.getInt(4));
-
- assertTrue(rs.next());
- assertEquals("C", rs.getString(1));
- assertEquals(25, rs.getInt(2));
- assertEquals(6930, rs.getInt(3));
- assertEquals(25, rs.getInt(4));
-
- assertTrue(rs.next());
- assertEquals("D", rs.getString(1));
- assertEquals(25, rs.getInt(2));
- assertEquals(6930, rs.getInt(3));
- assertEquals(25, rs.getInt(4));
-
- }
-
- @Test
- public void testRowCountAndByteCounts() throws SQLException {
- Connection conn;
- Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
- conn = DriverManager.getConnection(getUrl(), props);
- String tableName = "T";
- String ddl = "CREATE TABLE " + tableName + " (t_id VARCHAR NOT NULL,\n" + "k1 INTEGER NOT NULL,\n"
- + "k2 INTEGER NOT NULL,\n" + "C3.k3 INTEGER,\n" + "C2.v1 VARCHAR,\n"
- + "CONSTRAINT pk PRIMARY KEY (t_id, k1, k2)) split on ('e','j','o')";
- conn.createStatement().execute(ddl);
- String[] strings = { "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r",
- "s", "t", "u", "v", "w", "x", "y", "z" };
- for (int i = 0; i < 26; i++) {
- conn.createStatement().execute("UPSERT INTO " + tableName + " values('" + strings[i] + "'," + i + ","
- + (i + 1) + "," + (i + 2) + ",'" + strings[25 - i] + "')");
- }
- conn.commit();
- ResultSet rs;
- String query = "UPDATE STATISTICS " + tableName + " SET \"" + QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB
- + "\"=" + Long.toString(20);
- conn.createStatement().execute(query);
- Random r = new Random();
- int count = 0;
- while (count < 4) {
- int startIndex = r.nextInt(strings.length);
- int endIndex = r.nextInt(strings.length - startIndex) + startIndex;
- long rows = endIndex - startIndex;
- long c2Bytes = rows * 35;
- System.out.println(rows + ":" + startIndex + ":" + endIndex);
- rs = conn.createStatement().executeQuery(
- "SELECT COLUMN_FAMILY,SUM(GUIDE_POSTS_ROW_COUNT),SUM(GUIDE_POSTS_WIDTH) from SYSTEM.STATS where PHYSICAL_NAME = '"
- + tableName + "' AND GUIDE_POST_KEY>= cast('" + strings[startIndex]
- + "' as varbinary) AND GUIDE_POST_KEY<cast('" + strings[endIndex]
- + "' as varbinary) and COLUMN_FAMILY='C2' group by COLUMN_FAMILY");
- if (startIndex < endIndex) {
- assertTrue(rs.next());
- assertEquals("C2", rs.getString(1));
- assertEquals(rows, rs.getLong(2));
- assertEquals(c2Bytes, rs.getLong(3));
- count++;
- }
- }
- }
-
-}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDDLIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDDLIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDDLIT.java
index 69b3d00..11eb40e 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDDLIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDDLIT.java
@@ -23,9 +23,7 @@ import static org.apache.phoenix.exception.SQLExceptionCode.TABLE_UNDEFINED;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.KEY_SEQ;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.ORDINAL_POSITION;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_FUNCTION_TABLE;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TYPE_SEQUENCE;
import static org.apache.phoenix.schema.PTableType.SYSTEM;
import static org.apache.phoenix.schema.PTableType.TABLE;
import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
@@ -40,10 +38,16 @@ import java.sql.DatabaseMetaData;
import java.sql.DriverManager;
import java.sql.ResultSet;
import java.sql.SQLException;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
import java.util.Properties;
+import java.util.Set;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
import org.apache.phoenix.schema.ColumnAlreadyExistsException;
import org.apache.phoenix.schema.ColumnNotFoundException;
@@ -62,14 +66,17 @@ public class TenantSpecificTablesDDLIT extends BaseTenantSpecificTablesIT {
@Test
public void testCreateTenantSpecificTable() throws Exception {
// ensure we didn't create a physical HBase table for the tenant-specific table
- HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).getAdmin();
+ Connection conn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES));
+ HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();
assertEquals(0, admin.listTables(TENANT_TABLE_NAME).length);
}
@Test
public void testCreateTenantTableTwice() throws Exception {
try {
- createTestTable(PHOENIX_JDBC_TENANT_SPECIFIC_URL, TENANT_TABLE_DDL, null, nextTimestamp(), false);
+ Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+ Connection conn = DriverManager.getConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL, props);
+ conn.createStatement().execute(TENANT_TABLE_DDL);
fail();
}
catch (TableAlreadyExistsException expected) {}
@@ -77,11 +84,13 @@ public class TenantSpecificTablesDDLIT extends BaseTenantSpecificTablesIT {
@Test
public void testCreateTenantViewFromNonMultiTenant() throws Exception {
- createTestTable(getUrl(), "CREATE TABLE NON_MULTI_TENANT_TABLE (K VARCHAR PRIMARY KEY)", null, nextTimestamp());
+ String tableName = generateRandomString();
+ createTestTable(getUrl(), "CREATE TABLE " + tableName + " (K VARCHAR PRIMARY KEY)");
try {
+ String viewName = generateRandomString();
// Only way to get this exception is to attempt to derive from a global, multi-type table, as we won't find
// a tenant-specific table when we attempt to resolve the base table.
- createTestTable(PHOENIX_JDBC_TENANT_SPECIFIC_URL, "CREATE VIEW TENANT_TABLE2 (COL VARCHAR) AS SELECT * FROM NON_MULTI_TENANT_TABLE", null, nextTimestamp());
+ createTestTable(PHOENIX_JDBC_TENANT_SPECIFIC_URL, "CREATE VIEW " + viewName + " (COL VARCHAR) AS SELECT * FROM " + tableName);
}
catch (TableNotFoundException expected) {
}
@@ -89,10 +98,9 @@ public class TenantSpecificTablesDDLIT extends BaseTenantSpecificTablesIT {
@Test
public void testAlteringMultiTenancyForTableWithViewsNotAllowed() throws Exception {
- Properties props = new Properties();
- props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(nextTimestamp()));
- String multiTenantTable = "BASE_MULTI_TENANT_SWITCH";
- String globalTable = "GLOBAL_TABLE_SWITCH";
+ Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+ String multiTenantTable = "MT_" + generateRandomString();
+ String globalTable = "G_" + generateRandomString();
// create the two base tables
try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
String ddl = "CREATE TABLE " + multiTenantTable + " (TENANT_ID VARCHAR NOT NULL, PK1 VARCHAR NOT NULL, V1 VARCHAR, V2 VARCHAR, V3 VARCHAR CONSTRAINT NAME_PK PRIMARY KEY(TENANT_ID, PK1)) MULTI_TENANT = true ";
@@ -100,23 +108,21 @@ public class TenantSpecificTablesDDLIT extends BaseTenantSpecificTablesIT {
ddl = "CREATE TABLE " + globalTable + " (TENANT_ID VARCHAR NOT NULL, PK1 VARCHAR NOT NULL, V1 VARCHAR, V2 VARCHAR, V3 VARCHAR CONSTRAINT NAME_PK PRIMARY KEY(TENANT_ID, PK1)) ";
conn.createStatement().execute(ddl);
}
- props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(nextTimestamp()));
- props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, "tenant1");
+ String t1 = generateRandomString();
+ props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, t1);
// create view on multi-tenant table
try (Connection tenantConn = DriverManager.getConnection(getUrl(), props)) {
- String viewName = "tenantview";
+ String viewName = "V_" + generateRandomString();
String viewDDL = "CREATE VIEW " + viewName + " AS SELECT * FROM " + multiTenantTable;
tenantConn.createStatement().execute(viewDDL);
}
- props = new Properties();
- props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(nextTimestamp()));
+ props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
// create view on global table
try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
- String viewName = "globalView";
+ String viewName = "V_" + generateRandomString();
conn.createStatement().execute("CREATE VIEW " + viewName + " AS SELECT * FROM " + globalTable);
}
- props = new Properties();
- props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(nextTimestamp()));
+ props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
try {
conn.createStatement().execute("ALTER TABLE " + globalTable + " SET MULTI_TENANT = " + true);
@@ -134,29 +140,21 @@ public class TenantSpecificTablesDDLIT extends BaseTenantSpecificTablesIT {
}
}
- @Test
- public void testCreateTenantTableWithSameWhereClause() throws Exception {
- createTestTable(getUrl(), PARENT_TABLE_DDL.replace(PARENT_TABLE_NAME, PARENT_TABLE_NAME + "_II"), null, nextTimestamp());
- createTestTable(PHOENIX_JDBC_TENANT_SPECIFIC_URL, TENANT_TABLE_DDL.replace(TENANT_TABLE_NAME, TENANT_TABLE_NAME + "2"), null, nextTimestamp());
- }
-
@Test(expected=TableNotFoundException.class)
public void testDeletionOfParentTableFailsOnTenantSpecificConnection() throws Exception {
- createTestTable(getUrl(), PARENT_TABLE_DDL.replace(PARENT_TABLE_NAME, "TEMP_PARENT"), null, nextTimestamp());
- Properties props = new Properties();
- props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(nextTimestamp()));
+ Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, TENANT_ID); // connection is tenant-specific
Connection conn = DriverManager.getConnection(getUrl(), props);
- conn.createStatement().execute("DROP TABLE TEMP_PARENT");
+ conn.createStatement().execute("DROP TABLE " + PARENT_TABLE_NAME);
conn.close();
}
public void testCreationOfParentTableFailsOnTenantSpecificConnection() throws Exception {
try {
- createTestTable(PHOENIX_JDBC_TENANT_SPECIFIC_URL, "CREATE TABLE PARENT_TABLE ( \n" +
+ createTestTable(PHOENIX_JDBC_TENANT_SPECIFIC_URL, "CREATE TABLE " + generateRandomString() + "( \n" +
" user VARCHAR ,\n" +
" id INTEGER not null primary key desc\n" +
- " ) ", null, nextTimestamp());
+ " ) ");
fail();
} catch (SQLException e) {
assertEquals(SQLExceptionCode.CANNOT_CREATE_TENANT_SPECIFIC_TABLE.getErrorCode(), e.getErrorCode());
@@ -165,13 +163,14 @@ public class TenantSpecificTablesDDLIT extends BaseTenantSpecificTablesIT {
@Test
public void testTenantSpecificAndParentTablesMayBeInDifferentSchemas() throws SQLException {
- createTestTable(PHOENIX_JDBC_TENANT_SPECIFIC_URL, "CREATE VIEW DIFFSCHEMA.TENANT_TABLE ( \n" +
+ String fullTableName = "DIFFSCHEMA." + generateRandomString();
+ createTestTable(PHOENIX_JDBC_TENANT_SPECIFIC_URL, "CREATE VIEW " + fullTableName + " ( \n" +
" tenant_col VARCHAR) AS SELECT * \n" +
- " FROM " + PARENT_TABLE_NAME + " WHERE tenant_type_id = 'aaa'", null, nextTimestamp());
+ " FROM " + PARENT_TABLE_NAME + " WHERE tenant_type_id = 'aaa'");
try {
- createTestTable(PHOENIX_JDBC_TENANT_SPECIFIC_URL, "CREATE VIEW DIFFSCHEMA.TENANT_TABLE ( \n" +
+ createTestTable(PHOENIX_JDBC_TENANT_SPECIFIC_URL, "CREATE VIEW " + fullTableName + "( \n" +
" tenant_col VARCHAR) AS SELECT *\n"+
- " FROM DIFFSCHEMA." + PARENT_TABLE_NAME + " WHERE tenant_type_id = 'aaa'", null, nextTimestamp());
+ " FROM DIFFSCHEMA." + PARENT_TABLE_NAME + " WHERE tenant_type_id = 'aaa'");
fail();
}
catch (SQLException expected) {
@@ -184,31 +183,33 @@ public class TenantSpecificTablesDDLIT extends BaseTenantSpecificTablesIT {
" tenant_type_id VARCHAR(3) NOT NULL, \n" +
" id INTEGER NOT NULL\n" +
" CONSTRAINT pk PRIMARY KEY (tenant_id, tenant_type_id, id)) MULTI_TENANT=true";
- createTestTable(getUrl(), newDDL, null, nextTimestamp());
- createTestTable(PHOENIX_JDBC_TENANT_SPECIFIC_URL, "CREATE VIEW DIFFSCHEMA.TENANT_TABLE ( \n" +
+ createTestTable(getUrl(), newDDL);
+ createTestTable(PHOENIX_JDBC_TENANT_SPECIFIC_URL, "CREATE VIEW " + fullTableName + "( \n" +
" tenant_col VARCHAR) AS SELECT *\n"+
- " FROM DIFFSCHEMA." + PARENT_TABLE_NAME + " WHERE tenant_type_id = 'aaa'", null, nextTimestamp());
+ " FROM DIFFSCHEMA." + PARENT_TABLE_NAME + " WHERE tenant_type_id = 'aaa'");
}
@Test
public void testTenantSpecificTableCanDeclarePK() throws SQLException {
- createTestTable(PHOENIX_JDBC_TENANT_SPECIFIC_URL, "CREATE VIEW TENANT_TABLE2 ( \n" +
+ createTestTable(PHOENIX_JDBC_TENANT_SPECIFIC_URL, "CREATE VIEW " + generateRandomString() + "( \n" +
" tenant_col VARCHAR PRIMARY KEY) AS SELECT *\n" +
- " FROM PARENT_TABLE", null, nextTimestamp());
+ " FROM " + PARENT_TABLE_NAME);
}
@Test(expected=ColumnAlreadyExistsException.class)
public void testTenantSpecificTableCannotOverrideParentCol() throws SQLException {
- createTestTable(PHOENIX_JDBC_TENANT_SPECIFIC_URL, "CREATE VIEW TENANT_TABLE2 ( \n" +
+ createTestTable(PHOENIX_JDBC_TENANT_SPECIFIC_URL, "CREATE VIEW " + generateRandomString() + " ( \n" +
" user INTEGER) AS SELECT *\n" +
- " FROM PARENT_TABLE", null, nextTimestamp());
+ " FROM " + PARENT_TABLE_NAME);
}
@Test
public void testBaseTableWrongFormatWithTenantTypeId() throws Exception {
// only two PK columns for multi_tenant, multi_type
try {
- createTestTable(getUrl(), "CREATE TABLE BASE_TABLE2 (TENANT_ID VARCHAR NOT NULL PRIMARY KEY, ID VARCHAR, A INTEGER) MULTI_TENANT=true", null, nextTimestamp());
+ createTestTable(getUrl(),
+ "CREATE TABLE " + generateRandomString() +
+ "(TENANT_ID VARCHAR NOT NULL PRIMARY KEY, ID VARCHAR, A INTEGER) MULTI_TENANT=true");
fail();
}
catch (SQLException expected) {
@@ -218,25 +219,13 @@ public class TenantSpecificTablesDDLIT extends BaseTenantSpecificTablesIT {
@Test
public void testAddDropColumn() throws Exception {
- Properties props = new Properties();
- props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(nextTimestamp()));
+ Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
Connection conn = DriverManager.getConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL, props);
conn.setAutoCommit(true);
try {
- conn.createStatement().executeUpdate("upsert into " + TENANT_TABLE_NAME + " (id, tenant_col) values (1, 'Viva Las Vegas')");
-
+ conn.createStatement().execute("upsert into " + TENANT_TABLE_NAME + " (id, tenant_col) values (1, 'Viva Las Vegas')");
conn.createStatement().execute("alter view " + TENANT_TABLE_NAME + " add tenant_col2 char(1) null");
-
- conn.close();
- props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(nextTimestamp()));
- conn = DriverManager.getConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL, props);
- conn.setAutoCommit(true);
-
- conn.createStatement().executeUpdate("upsert into " + TENANT_TABLE_NAME + " (id, tenant_col2) values (2, 'a')");
- conn.close();
- props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(nextTimestamp()));
- conn = DriverManager.getConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL, props);
- conn.setAutoCommit(true);
+ conn.createStatement().execute("upsert into " + TENANT_TABLE_NAME + " (id, tenant_col2) values (2, 'a')");
ResultSet rs = conn.createStatement().executeQuery("select count(*) from " + TENANT_TABLE_NAME);
rs.next();
@@ -246,24 +235,13 @@ public class TenantSpecificTablesDDLIT extends BaseTenantSpecificTablesIT {
rs.next();
assertEquals(1, rs.getInt(1));
- conn.close();
- props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(nextTimestamp()));
- conn = DriverManager.getConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL, props);
- conn.setAutoCommit(true);
-
conn.createStatement().execute("alter view " + TENANT_TABLE_NAME + " drop column tenant_col");
-
- conn.close();
- props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(nextTimestamp()));
- conn = DriverManager.getConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL, props);
- conn.setAutoCommit(true);
-
rs = conn.createStatement().executeQuery("select count(*) from " + TENANT_TABLE_NAME + "");
rs.next();
assertEquals(2, rs.getInt(1));
try {
- rs = conn.createStatement().executeQuery("select tenant_col from TENANT_TABLE");
+ rs = conn.createStatement().executeQuery("select tenant_col from " + TENANT_TABLE_NAME);
fail();
}
catch (ColumnNotFoundException expected) {}
@@ -275,8 +253,7 @@ public class TenantSpecificTablesDDLIT extends BaseTenantSpecificTablesIT {
@Test
public void testDropOfPKInTenantTablesNotAllowed() throws Exception {
- Properties props = new Properties();
- props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(nextTimestamp()));
+ Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
Connection conn = DriverManager.getConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL, props);
try {
// try removing a PK col
@@ -295,8 +272,7 @@ public class TenantSpecificTablesDDLIT extends BaseTenantSpecificTablesIT {
@Test
public void testColumnMutationInParentTableWithExistingTenantTable() throws Exception {
- Properties props = new Properties();
- props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(nextTimestamp()));
+ Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
Connection conn = DriverManager.getConnection(getUrl(), props);
try {
try {
@@ -322,8 +298,7 @@ public class TenantSpecificTablesDDLIT extends BaseTenantSpecificTablesIT {
@Test
public void testDisallowDropParentTableWithExistingTenantTable() throws Exception {
- Properties props = new Properties();
- props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(nextTimestamp()));
+ Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
Connection conn = DriverManager.getConnection(getUrl(), props);
try {
conn.createStatement().executeUpdate("drop table " + PARENT_TABLE_NAME);
@@ -339,18 +314,13 @@ public class TenantSpecificTablesDDLIT extends BaseTenantSpecificTablesIT {
@Test
public void testAllowDropParentTableWithCascadeAndSingleTenantTable() throws Exception {
- long ts = nextTimestamp();
- Properties props = new Properties();
- props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts));
+ Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
Connection conn = DriverManager.getConnection(getUrl(), props);
Connection connTenant = null;
try {
// Drop Parent Table
conn.createStatement().executeUpdate("DROP TABLE " + PARENT_TABLE_NAME + " CASCADE");
- conn.close();
-
- props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 10));
connTenant = DriverManager.getConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL, props);
validateTenantViewIsDropped(conn);
@@ -368,56 +338,58 @@ public class TenantSpecificTablesDDLIT extends BaseTenantSpecificTablesIT {
@Test
public void testAllDropParentTableWithCascadeWithMultipleTenantTablesAndIndexes() throws Exception {
// Create a second tenant table
- createTestTable(PHOENIX_JDBC_TENANT_SPECIFIC_URL2, TENANT_TABLE_DDL, null, nextTimestamp());
+ String tenantTable2 = "V_" + generateRandomString();
+ createTestTable(PHOENIX_JDBC_TENANT_SPECIFIC_URL2, TENANT_TABLE_DDL.replace(TENANT_TABLE_NAME, tenantTable2));
//TODO Create some tenant specific table indexes
- long ts = nextTimestamp();
- Properties props = new Properties();
- props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts));
+ Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
Connection conn = null;
Connection connTenant1 = null;
Connection connTenant2 = null;
try {
+ List<String> sortedCatalogs = Arrays.asList(TENANT_ID, TENANT_ID2);
+ Collections.sort(sortedCatalogs);
conn = DriverManager.getConnection(getUrl(), props);
DatabaseMetaData meta = conn.getMetaData();
- ResultSet rs = meta.getSuperTables(null, null, StringUtil.escapeLike(TENANT_TABLE_NAME) + "%");
- assertTrue(rs.next());
- assertEquals(TENANT_ID2, rs.getString(PhoenixDatabaseMetaData.TABLE_CAT));
- assertEquals(TENANT_TABLE_NAME, rs.getString(PhoenixDatabaseMetaData.TABLE_NAME));
- assertEquals(PARENT_TABLE_NAME, rs.getString(PhoenixDatabaseMetaData.SUPERTABLE_NAME));
+ ResultSet rs = meta.getTables(null, "", StringUtil.escapeLike(TENANT_TABLE_NAME), new String[] {PTableType.VIEW.getValue().getString()});
assertTrue(rs.next());
assertEquals(TENANT_ID, rs.getString(PhoenixDatabaseMetaData.TABLE_CAT));
- assertEquals(TENANT_TABLE_NAME, rs.getString(PhoenixDatabaseMetaData.TABLE_NAME));
- assertEquals(PARENT_TABLE_NAME, rs.getString(PhoenixDatabaseMetaData.SUPERTABLE_NAME));
+ assertTableMetaData(rs, null, TENANT_TABLE_NAME, PTableType.VIEW);
+ assertFalse(rs.next());
+
+ rs = meta.getTables(null, "", StringUtil.escapeLike(tenantTable2), new String[] {PTableType.VIEW.getValue().getString()});
+ assertTrue(rs.next());
+ assertEquals(TENANT_ID2, rs.getString(PhoenixDatabaseMetaData.TABLE_CAT));
+ assertTableMetaData(rs, null, tenantTable2, PTableType.VIEW);
+ assertFalse(rs.next());
+
+ rs = meta.getTables(null, "", StringUtil.escapeLike(TENANT_TABLE_NAME_NO_TENANT_TYPE_ID), new String[] {PTableType.VIEW.getValue().getString()});
assertTrue(rs.next());
assertEquals(TENANT_ID, rs.getString(PhoenixDatabaseMetaData.TABLE_CAT));
- assertEquals(TENANT_TABLE_NAME_NO_TENANT_TYPE_ID, rs.getString(PhoenixDatabaseMetaData.TABLE_NAME));
- assertEquals(PARENT_TABLE_NAME_NO_TENANT_TYPE_ID, rs.getString(PhoenixDatabaseMetaData.SUPERTABLE_NAME));
+ assertTableMetaData(rs, null, TENANT_TABLE_NAME_NO_TENANT_TYPE_ID, PTableType.VIEW);
assertFalse(rs.next());
- rs.close();
- conn.close();
// Drop Parent Table
conn.createStatement().executeUpdate("DROP TABLE " + PARENT_TABLE_NAME + " CASCADE");
// Validate Tenant Views are dropped
- props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 10));
connTenant1 = DriverManager.getConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL, props);
validateTenantViewIsDropped(connTenant1);
connTenant2 = DriverManager.getConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL2, props);
validateTenantViewIsDropped(connTenant2);
// Validate Tenant Metadata is gone for the Tenant Table TENANT_TABLE_NAME
- conn = DriverManager.getConnection(getUrl(), props);
- meta = conn.getMetaData();
- rs = meta.getSuperTables(null, null, StringUtil.escapeLike(TENANT_TABLE_NAME) + "%");
+ rs = meta.getTables(null, "", StringUtil.escapeLike(TENANT_TABLE_NAME), new String[] {PTableType.VIEW.getValue().getString()});
+ assertFalse(rs.next());
+ rs = meta.getTables(null, "", StringUtil.escapeLike(tenantTable2), new String[] {PTableType.VIEW.getValue().getString()});
+ assertFalse(rs.next());
+
+ rs = meta.getTables(null, "", StringUtil.escapeLike(TENANT_TABLE_NAME_NO_TENANT_TYPE_ID), new String[] {PTableType.VIEW.getValue().getString()});
assertTrue(rs.next());
assertEquals(TENANT_ID, rs.getString(PhoenixDatabaseMetaData.TABLE_CAT));
- assertEquals(TENANT_TABLE_NAME_NO_TENANT_TYPE_ID, rs.getString(PhoenixDatabaseMetaData.TABLE_NAME));
- assertEquals(PARENT_TABLE_NAME_NO_TENANT_TYPE_ID, rs.getString(PhoenixDatabaseMetaData.SUPERTABLE_NAME));
+ assertTableMetaData(rs, null, TENANT_TABLE_NAME_NO_TENANT_TYPE_ID, PTableType.VIEW);
assertFalse(rs.next());
- rs.close();
} finally {
if (conn != null) {
@@ -446,29 +418,23 @@ public class TenantSpecificTablesDDLIT extends BaseTenantSpecificTablesIT {
@Test
public void testTableMetadataScan() throws Exception {
// create a tenant table with same name for a different tenant to make sure we are not picking it up in metadata scans for TENANT_ID
- String tenantId2 = "tenant2";
+ String tenantId2 = "T_" + generateRandomString();
String secondTenatConnectionURL = PHOENIX_JDBC_TENANT_SPECIFIC_URL.replace(TENANT_ID, tenantId2);
- String tenantTable2 = TENANT_TABLE_NAME+"2";
- createTestTable(secondTenatConnectionURL, TENANT_TABLE_DDL.replace(TENANT_TABLE_NAME, tenantTable2), null, nextTimestamp(), false);
+ String tenantTable2 = "V_" + generateRandomString();
+ createTestTable(secondTenatConnectionURL, TENANT_TABLE_DDL.replace(TENANT_TABLE_NAME, tenantTable2));
- Properties props = new Properties();
- props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(nextTimestamp()));
+ Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
Connection conn = DriverManager.getConnection(getUrl(), props);
try {
// empty string means global tenant id
// make sure connections w/o tenant id only see non-tenant-specific tables, both SYSTEM and USER
DatabaseMetaData meta = conn.getMetaData();
- ResultSet rs = meta.getTables("", null, null, null);
- assertTrue(rs.next());
- assertTableMetaData(rs, SYSTEM_CATALOG_SCHEMA, SYSTEM_CATALOG_TABLE, SYSTEM);
- assertTrue(rs.next());
- assertTableMetaData(rs, SYSTEM_CATALOG_SCHEMA, SYSTEM_FUNCTION_TABLE, SYSTEM);
- assertTrue(rs.next());
- assertTableMetaData(rs, SYSTEM_CATALOG_SCHEMA, TYPE_SEQUENCE, SYSTEM);
- assertTrue(rs.next());
- assertTableMetaData(rs, SYSTEM_CATALOG_SCHEMA, PhoenixDatabaseMetaData.SYSTEM_STATS_TABLE, SYSTEM);
+ ResultSet rs = meta.getTables("", "", StringUtil.escapeLike(PARENT_TABLE_NAME), new String[] {TABLE.getValue().getString()});
assertTrue(rs.next());
assertTableMetaData(rs, null, PARENT_TABLE_NAME, TABLE);
+ assertFalse(rs.next());
+
+ rs = meta.getTables("", "", StringUtil.escapeLike(PARENT_TABLE_NAME_NO_TENANT_TYPE_ID), new String[] {TABLE.getValue().getString()});
assertTrue(rs.next());
assertTableMetaData(rs, null, PARENT_TABLE_NAME_NO_TENANT_TYPE_ID, TABLE);
assertFalse(rs.next());
@@ -480,16 +446,26 @@ public class TenantSpecificTablesDDLIT extends BaseTenantSpecificTablesIT {
assertNotEquals(tenantTable2, rs.getString("TABLE_NAME"));
}
- // null catalog means across all tenant_ids
- rs = meta.getSuperTables(null, null, StringUtil.escapeLike(TENANT_TABLE_NAME) + "%");
+ List<String> sortedTableNames = Arrays.asList(TENANT_TABLE_NAME, TENANT_TABLE_NAME_NO_TENANT_TYPE_ID);
+ Collections.sort(sortedTableNames);
+ List<String> sortedParentNames;
+ if (sortedTableNames.get(0).equals(TENANT_TABLE_NAME)) {
+ sortedParentNames = Arrays.asList(PARENT_TABLE_NAME, PARENT_TABLE_NAME_NO_TENANT_TYPE_ID);
+ } else {
+ sortedParentNames = Arrays.asList(PARENT_TABLE_NAME_NO_TENANT_TYPE_ID, PARENT_TABLE_NAME);
+ }
+ rs = meta.getSuperTables(TENANT_ID, null, null);
assertTrue(rs.next());
assertEquals(TENANT_ID, rs.getString(PhoenixDatabaseMetaData.TABLE_CAT));
- assertEquals(TENANT_TABLE_NAME, rs.getString(PhoenixDatabaseMetaData.TABLE_NAME));
- assertEquals(PARENT_TABLE_NAME, rs.getString(PhoenixDatabaseMetaData.SUPERTABLE_NAME));
+ assertEquals(sortedTableNames.get(0), rs.getString(PhoenixDatabaseMetaData.TABLE_NAME));
+ assertEquals(sortedParentNames.get(0), rs.getString(PhoenixDatabaseMetaData.SUPERTABLE_NAME));
assertTrue(rs.next());
assertEquals(TENANT_ID, rs.getString(PhoenixDatabaseMetaData.TABLE_CAT));
- assertEquals(TENANT_TABLE_NAME_NO_TENANT_TYPE_ID, rs.getString(PhoenixDatabaseMetaData.TABLE_NAME));
- assertEquals(PARENT_TABLE_NAME_NO_TENANT_TYPE_ID, rs.getString(PhoenixDatabaseMetaData.SUPERTABLE_NAME));
+ assertEquals(sortedTableNames.get(1), rs.getString(PhoenixDatabaseMetaData.TABLE_NAME));
+ assertEquals(sortedParentNames.get(1), rs.getString(PhoenixDatabaseMetaData.SUPERTABLE_NAME));
+ assertFalse(rs.next());
+
+ rs = meta.getSuperTables(tenantId2, null, null);
assertTrue(rs.next());
assertEquals(tenantId2, rs.getString(PhoenixDatabaseMetaData.TABLE_CAT));
assertEquals(tenantTable2, rs.getString(PhoenixDatabaseMetaData.TABLE_NAME));
@@ -497,36 +473,22 @@ public class TenantSpecificTablesDDLIT extends BaseTenantSpecificTablesIT {
assertFalse(rs.next());
conn.close();
- // Global connection sees all tenant tables
- conn = DriverManager.getConnection(getUrl(), props);
- rs = conn.getMetaData().getSuperTables(TENANT_ID, null, null);
- assertTrue(rs.next());
- assertEquals(TENANT_ID, rs.getString(PhoenixDatabaseMetaData.TABLE_CAT));
- assertEquals(TENANT_TABLE_NAME, rs.getString(PhoenixDatabaseMetaData.TABLE_NAME));
- assertEquals(PARENT_TABLE_NAME, rs.getString(PhoenixDatabaseMetaData.SUPERTABLE_NAME));
- assertTrue(rs.next());
- assertEquals(TENANT_ID, rs.getString(PhoenixDatabaseMetaData.TABLE_CAT));
- assertEquals(TENANT_TABLE_NAME_NO_TENANT_TYPE_ID, rs.getString(PhoenixDatabaseMetaData.TABLE_NAME));
- assertEquals(PARENT_TABLE_NAME_NO_TENANT_TYPE_ID, rs.getString(PhoenixDatabaseMetaData.SUPERTABLE_NAME));
- assertFalse(rs.next());
-
+ Set<String> sortedCatalogs = new HashSet<>(Arrays.asList(TENANT_ID, tenantId2));
rs = conn.getMetaData().getCatalogs();
- assertTrue(rs.next());
- assertEquals(TENANT_ID, rs.getString(PhoenixDatabaseMetaData.TABLE_CAT));
- assertTrue(rs.next());
- assertEquals(tenantId2, rs.getString(PhoenixDatabaseMetaData.TABLE_CAT));
- assertFalse(rs.next());
+ while (rs.next()) {
+ sortedCatalogs.remove(rs.getString(PhoenixDatabaseMetaData.TABLE_CAT));
+ }
+ assertTrue("Should have found both tenant IDs", sortedCatalogs.isEmpty());
} finally {
props.clear();
conn.close();
}
- props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(nextTimestamp()));
conn = DriverManager.getConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL, props);
try {
// make sure tenant-specific connections only see their own tables and the global tables
DatabaseMetaData meta = conn.getMetaData();
- ResultSet rs = meta.getTables(null, null, null, null);
+ ResultSet rs = meta.getTables("", SYSTEM_CATALOG_SCHEMA, null, new String[] {PTableType.SYSTEM.getValue().getString()});
assertTrue(rs.next());
assertTableMetaData(rs, PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA, PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE, PTableType.SYSTEM);
assertTrue(rs.next());
@@ -535,18 +497,33 @@ public class TenantSpecificTablesDDLIT extends BaseTenantSpecificTablesIT {
assertTableMetaData(rs, PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA, PhoenixDatabaseMetaData.TYPE_SEQUENCE, PTableType.SYSTEM);
assertTrue(rs.next());
assertTableMetaData(rs, SYSTEM_CATALOG_SCHEMA, PhoenixDatabaseMetaData.SYSTEM_STATS_TABLE, PTableType.SYSTEM);
+ assertFalse(rs.next());
+
+ rs = meta.getTables(null, "", StringUtil.escapeLike(tenantTable2), new String[] {TABLE.getValue().getString()});
+ assertFalse(rs.next());
+
+ rs = meta.getTables(null, "", StringUtil.escapeLike(PARENT_TABLE_NAME), new String[] {TABLE.getValue().getString()});
assertTrue(rs.next());
- assertTableMetaData(rs, null, PARENT_TABLE_NAME, PTableType.TABLE);
+ assertTableMetaData(rs, null, PARENT_TABLE_NAME, TABLE);
+ assertFalse(rs.next());
+
+ rs = meta.getTables(null, "", StringUtil.escapeLike(PARENT_TABLE_NAME_NO_TENANT_TYPE_ID), new String[] {TABLE.getValue().getString()});
assertTrue(rs.next());
- assertTableMetaData(rs, null, PARENT_TABLE_NAME_NO_TENANT_TYPE_ID, PTableType.TABLE);
+ assertTableMetaData(rs, null, PARENT_TABLE_NAME_NO_TENANT_TYPE_ID, TABLE);
+ assertFalse(rs.next());
+
+ rs = meta.getTables(null, "", StringUtil.escapeLike(TENANT_TABLE_NAME), new String[] {PTableType.VIEW.getValue().getString()});
assertTrue(rs.next());
assertTableMetaData(rs, null, TENANT_TABLE_NAME, PTableType.VIEW);
+ assertFalse(rs.next());
+
+ rs = meta.getTables(null, "", StringUtil.escapeLike(TENANT_TABLE_NAME_NO_TENANT_TYPE_ID), new String[] {PTableType.VIEW.getValue().getString()});
assertTrue(rs.next());
assertTableMetaData(rs, null, TENANT_TABLE_NAME_NO_TENANT_TYPE_ID, PTableType.VIEW);
assertFalse(rs.next());
// make sure tenants see parent table's columns and their own
- rs = meta.getColumns(null, null, StringUtil.escapeLike(TENANT_TABLE_NAME) + "%", null);
+ rs = meta.getColumns(null, null, StringUtil.escapeLike(TENANT_TABLE_NAME), null);
assertTrue(rs.next());
assertColumnMetaData(rs, null, TENANT_TABLE_NAME, "user", 1);
assertTrue(rs.next());
@@ -557,6 +534,9 @@ public class TenantSpecificTablesDDLIT extends BaseTenantSpecificTablesIT {
assertColumnMetaData(rs, null, TENANT_TABLE_NAME, "id", 3);
assertTrue(rs.next());
assertColumnMetaData(rs, null, TENANT_TABLE_NAME, "tenant_col", 4);
+ assertFalse(rs.next());
+
+ rs = meta.getColumns(null, null, StringUtil.escapeLike(TENANT_TABLE_NAME_NO_TENANT_TYPE_ID), null);
assertTrue(rs.next());
assertColumnMetaData(rs, null, TENANT_TABLE_NAME_NO_TENANT_TYPE_ID, "user", 1);
assertTrue(rs.next());
[4/4] phoenix git commit: PHOENIX-3290 Move and/or combine as many
NeedsOwnCluster tests to bring down test run time
Posted by ja...@apache.org.
PHOENIX-3290 Move and/or combine as many NeedsOwnCluster tests to bring down test run time
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/2d27179b
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/2d27179b
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/2d27179b
Branch: refs/heads/master
Commit: 2d27179b3fcfc7a8d2c8f7bad4a37b3071c81961
Parents: 7601d59
Author: James Taylor <ja...@apache.org>
Authored: Tue Sep 20 18:39:05 2016 -0700
Committer: James Taylor <ja...@apache.org>
Committed: Tue Sep 20 18:39:05 2016 -0700
----------------------------------------------------------------------
.../StatisticsCollectionRunTrackerIT.java | 25 +-
.../BaseOwnClusterClientManagedTimeIT.java | 29 --
.../BaseOwnClusterHBaseManagedTimeIT.java | 29 --
.../phoenix/end2end/BaseOwnClusterIT.java | 7 +
.../end2end/BaseTenantSpecificTablesIT.java | 87 +++---
.../org/apache/phoenix/end2end/BaseViewIT.java | 81 ++---
.../end2end/CountDistinctCompressionIT.java | 2 +-
.../phoenix/end2end/CsvBulkLoadToolIT.java | 2 +-
.../apache/phoenix/end2end/IndexExtendedIT.java | 2 +-
.../org/apache/phoenix/end2end/KeyOnlyIT.java | 127 +++-----
.../phoenix/end2end/MultiCfQueryExecIT.java | 306 ++++++++-----------
.../phoenix/end2end/ParallelIteratorsIT.java | 112 +++----
.../phoenix/end2end/ParallelStatsEnabledIT.java | 16 +-
.../apache/phoenix/end2end/QueryTimeoutIT.java | 2 +-
.../phoenix/end2end/QueryWithLimitIT.java | 10 +-
.../phoenix/end2end/QueryWithOffsetIT.java | 78 ++---
.../apache/phoenix/end2end/RenewLeaseIT.java | 2 +-
.../phoenix/end2end/SpillableGroupByIT.java | 127 ++++----
.../end2end/StatsCollectionDisabledIT.java | 79 -----
.../end2end/StatsCollectorAbstractIT.java | 77 -----
.../phoenix/end2end/StatsCollectorIT.java | 161 +++++++++-
.../StatsCollectorWithSplitsAndMultiCFIT.java | 186 -----------
.../end2end/TenantSpecificTablesDDLIT.java | 272 ++++++++---------
.../end2end/TenantSpecificTablesDMLIT.java | 269 ++++++----------
.../phoenix/end2end/TransactionalViewIT.java | 48 ++-
.../org/apache/phoenix/end2end/UpgradeIT.java | 104 ++++---
.../phoenix/end2end/UserDefinedFunctionsIT.java | 7 +-
.../java/org/apache/phoenix/end2end/ViewIT.java | 249 ++++++++-------
.../index/ImmutableIndexWithStatsIT.java | 26 +-
.../end2end/index/MutableIndexFailureIT.java | 4 +-
.../end2end/index/ReadOnlyIndexFailureIT.java | 4 +-
.../end2end/index/txn/TxWriteFailureIT.java | 4 +-
.../apache/phoenix/execute/PartialCommitIT.java | 5 +
.../RoundRobinResultIteratorWithStatsIT.java | 4 +-
.../phoenix/monitoring/PhoenixMetricsIT.java | 4 +-
.../apache/phoenix/rpc/PhoenixClientRpcIT.java | 4 +-
.../apache/phoenix/rpc/PhoenixServerRpcIT.java | 4 +-
.../org/apache/phoenix/util/MetaDataUtil.java | 4 +
.../java/org/apache/phoenix/query/BaseTest.java | 4 -
.../java/org/apache/phoenix/util/TestUtil.java | 17 +-
pom.xml | 3 -
41 files changed, 1105 insertions(+), 1478 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/StatisticsCollectionRunTrackerIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/StatisticsCollectionRunTrackerIT.java b/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/StatisticsCollectionRunTrackerIT.java
index bf567f0..bd88922 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/StatisticsCollectionRunTrackerIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/StatisticsCollectionRunTrackerIT.java
@@ -33,20 +33,26 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.phoenix.end2end.BaseOwnClusterHBaseManagedTimeIT;
+import org.apache.phoenix.end2end.ParallelStatsEnabledIT;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.query.QueryServices;
import org.apache.phoenix.schema.stats.StatisticsCollectionRunTracker;
import org.apache.phoenix.util.ReadOnlyProps;
+import org.apache.phoenix.util.SchemaUtil;
+import org.apache.phoenix.util.TestUtil;
import org.junit.Assert;
+import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import com.google.common.collect.Maps;
-public class StatisticsCollectionRunTrackerIT extends BaseOwnClusterHBaseManagedTimeIT {
+public class StatisticsCollectionRunTrackerIT extends ParallelStatsEnabledIT {
private static final StatisticsCollectionRunTracker tracker = StatisticsCollectionRunTracker
.getInstance(new Configuration());
+
+ private String fullTableName;
+
@BeforeClass
public static void doSetup() throws Exception {
Map<String,String> props = Maps.newHashMapWithExpectedSize(3);
@@ -56,9 +62,16 @@ public class StatisticsCollectionRunTrackerIT extends BaseOwnClusterHBaseManaged
setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
}
+ @Before
+ public void generateTableNames() {
+ String schemaName = TestUtil.DEFAULT_SCHEMA_NAME;
+ String tableName = "T_" + generateRandomString();
+ fullTableName = SchemaUtil.getTableName(schemaName, tableName);
+ }
+
@Test
public void testStateBeforeAndAfterUpdateStatsCommand() throws Exception {
- String tableName = "testStateBeforeAndAfterUpdateStatsCommand".toUpperCase();
+ String tableName = fullTableName;
HRegionInfo regionInfo = createTableAndGetRegion(tableName);
StatisticsCollectionRunTracker tracker =
StatisticsCollectionRunTracker.getInstance(new Configuration());
@@ -73,7 +86,7 @@ public class StatisticsCollectionRunTrackerIT extends BaseOwnClusterHBaseManaged
@Test
public void testStateBeforeAndAfterMajorCompaction() throws Exception {
- String tableName = "testStateBeforeAndAfterMajorCompaction".toUpperCase();
+ String tableName = fullTableName;
HRegionInfo regionInfo = createTableAndGetRegion(tableName);
StatisticsCollectionRunTracker tracker =
StatisticsCollectionRunTracker.getInstance(new Configuration());
@@ -101,7 +114,7 @@ public class StatisticsCollectionRunTrackerIT extends BaseOwnClusterHBaseManaged
@Test
public void testMajorCompactionPreventsUpdateStatsFromRunning() throws Exception {
- String tableName = "testMajorCompactionPreventsUpdateStatsFromRunning".toUpperCase();
+ String tableName = fullTableName;
HRegionInfo regionInfo = createTableAndGetRegion(tableName);
// simulate stats collection via major compaction by marking the region as compacting in the tracker
markRegionAsCompacting(regionInfo);
@@ -114,7 +127,7 @@ public class StatisticsCollectionRunTrackerIT extends BaseOwnClusterHBaseManaged
@Test
public void testUpdateStatsPreventsAnotherUpdateStatsFromRunning() throws Exception {
- String tableName = "testUpdateStatsPreventsAnotherUpdateStatsFromRunning".toUpperCase();
+ String tableName = fullTableName;
HRegionInfo regionInfo = createTableAndGetRegion(tableName);
markRunningUpdateStats(regionInfo);
Assert.assertEquals("Row count didn't match", CONCURRENT_UPDATE_STATS_ROW_COUNT,
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseOwnClusterClientManagedTimeIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseOwnClusterClientManagedTimeIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseOwnClusterClientManagedTimeIT.java
deleted file mode 100644
index 6ece674..0000000
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseOwnClusterClientManagedTimeIT.java
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you maynot use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicablelaw or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.end2end;
-
-import org.junit.After;
-
-
-public class BaseOwnClusterClientManagedTimeIT extends BaseOwnClusterIT {
- @After
- public void cleanUpAfterTest() throws Exception {
- long ts = nextTimestamp();
- deletePriorMetaData(ts - 1, getUrl());
- }
-}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseOwnClusterHBaseManagedTimeIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseOwnClusterHBaseManagedTimeIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseOwnClusterHBaseManagedTimeIT.java
deleted file mode 100644
index 63a4300..0000000
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseOwnClusterHBaseManagedTimeIT.java
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you maynot use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicablelaw or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.end2end;
-
-import org.apache.hadoop.hbase.HConstants;
-import org.junit.After;
-
-
-public class BaseOwnClusterHBaseManagedTimeIT extends BaseOwnClusterIT {
- @After
- public void cleanUpAfterTest() throws Exception {
- deletePriorMetaData(HConstants.LATEST_TIMESTAMP, getUrl());
- }
-}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseOwnClusterIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseOwnClusterIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseOwnClusterIT.java
index 222efcb..44bd3a1 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseOwnClusterIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseOwnClusterIT.java
@@ -17,7 +17,9 @@
*/
package org.apache.phoenix.end2end;
+import org.apache.hadoop.hbase.HConstants;
import org.apache.phoenix.query.BaseTest;
+import org.junit.After;
import org.junit.AfterClass;
import org.junit.experimental.categories.Category;
@@ -27,4 +29,9 @@ public class BaseOwnClusterIT extends BaseTest {
public static void doTeardown() throws Exception {
tearDownMiniCluster();
}
+
+ @After
+ public void cleanUpAfterTest() throws Exception {
+ deletePriorMetaData(HConstants.LATEST_TIMESTAMP, getUrl());
+ }
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseTenantSpecificTablesIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseTenantSpecificTablesIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseTenantSpecificTablesIT.java
index 1e87b8f..17918d6 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseTenantSpecificTablesIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseTenantSpecificTablesIT.java
@@ -20,64 +20,61 @@ package org.apache.phoenix.end2end;
import static org.apache.phoenix.util.PhoenixRuntime.TENANT_ID_ATTRIB;
import java.sql.SQLException;
-import java.util.Map;
-import org.apache.phoenix.query.QueryServices;
-import org.apache.phoenix.util.ReadOnlyProps;
+import org.apache.phoenix.query.BaseTest;
import org.junit.Before;
-import org.junit.BeforeClass;
-import com.google.common.collect.Maps;
-
-public abstract class BaseTenantSpecificTablesIT extends BaseOwnClusterClientManagedTimeIT {
- protected static final String TENANT_ID = "ZZTop";
- protected static final String TENANT_TYPE_ID = "abc";
- protected static String PHOENIX_JDBC_TENANT_SPECIFIC_URL;
- protected static final String TENANT_ID2 = "Styx";
- protected static String PHOENIX_JDBC_TENANT_SPECIFIC_URL2;
+public abstract class BaseTenantSpecificTablesIT extends ParallelStatsEnabledIT {
+ protected String TENANT_ID;
+ protected String TENANT_TYPE_ID = "abc";
+ protected String PHOENIX_JDBC_TENANT_SPECIFIC_URL;
+ protected String TENANT_ID2;
+ protected String PHOENIX_JDBC_TENANT_SPECIFIC_URL2;
- protected static final String PARENT_TABLE_NAME = "PARENT_TABLE";
- protected static final String PARENT_TABLE_DDL = "CREATE TABLE " + PARENT_TABLE_NAME + " ( \n" +
- " user VARCHAR ,\n" +
- " tenant_id VARCHAR(5) NOT NULL,\n" +
- " tenant_type_id VARCHAR(3) NOT NULL, \n" +
- " id INTEGER NOT NULL\n" +
- " CONSTRAINT pk PRIMARY KEY (tenant_id, tenant_type_id, id)) MULTI_TENANT=true, IMMUTABLE_ROWS=true";
+ protected String PARENT_TABLE_NAME;
+ protected String PARENT_TABLE_DDL;
- protected static final String TENANT_TABLE_NAME = "TENANT_TABLE";
- protected static final String TENANT_TABLE_DDL = "CREATE VIEW " + TENANT_TABLE_NAME + " ( \n" +
- " tenant_col VARCHAR) AS SELECT *\n" +
- " FROM " + PARENT_TABLE_NAME + " WHERE tenant_type_id= '" + TENANT_TYPE_ID + "'";
+ protected String TENANT_TABLE_NAME;
+ protected String TENANT_TABLE_DDL;
- protected static final String PARENT_TABLE_NAME_NO_TENANT_TYPE_ID = "PARENT_TABLE_NO_TENANT_TYPE_ID";
- protected static final String PARENT_TABLE_DDL_NO_TENANT_TYPE_ID = "CREATE TABLE " + PARENT_TABLE_NAME_NO_TENANT_TYPE_ID + " ( \n" +
- " user VARCHAR ,\n" +
- " tenant_id VARCHAR(5) NOT NULL,\n" +
- " id INTEGER NOT NULL,\n" +
- " CONSTRAINT pk PRIMARY KEY (tenant_id, id)) MULTI_TENANT=true, IMMUTABLE_ROWS=true";
+ protected String PARENT_TABLE_NAME_NO_TENANT_TYPE_ID;
+ protected String PARENT_TABLE_DDL_NO_TENANT_TYPE_ID;
- protected static final String TENANT_TABLE_NAME_NO_TENANT_TYPE_ID = "TENANT_TABLE_NO_TENANT_TYPE_ID";
- protected static final String TENANT_TABLE_DDL_NO_TENANT_TYPE_ID = "CREATE VIEW " + TENANT_TABLE_NAME_NO_TENANT_TYPE_ID + " ( \n" +
- " tenant_col VARCHAR) AS SELECT *\n" +
- " FROM " + PARENT_TABLE_NAME_NO_TENANT_TYPE_ID;
+ protected String TENANT_TABLE_NAME_NO_TENANT_TYPE_ID;
+ protected String TENANT_TABLE_DDL_NO_TENANT_TYPE_ID;
@Before
public void createTables() throws SQLException {
- createTestTable(getUrl(), PARENT_TABLE_DDL, null, nextTimestamp());
- createTestTable(getUrl(), PARENT_TABLE_DDL_NO_TENANT_TYPE_ID, null, nextTimestamp());
- createTestTable(PHOENIX_JDBC_TENANT_SPECIFIC_URL, TENANT_TABLE_DDL, null, nextTimestamp());
- createTestTable(PHOENIX_JDBC_TENANT_SPECIFIC_URL, TENANT_TABLE_DDL_NO_TENANT_TYPE_ID, null, nextTimestamp());
- }
-
- @BeforeClass
- public static void doSetup() throws Exception {
- Map<String,String> props = Maps.newHashMapWithExpectedSize(3);
- // Must update config before starting server
- props.put(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, Long.toString(20));
- setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
+ TENANT_ID = "T_" + BaseTest.generateRandomString();
+ TENANT_ID2 = "T_" + BaseTest.generateRandomString();
PHOENIX_JDBC_TENANT_SPECIFIC_URL = getUrl() + ';' + TENANT_ID_ATTRIB + '=' + TENANT_ID;
PHOENIX_JDBC_TENANT_SPECIFIC_URL2 = getUrl() + ';' + TENANT_ID_ATTRIB + '=' + TENANT_ID2;
+ PARENT_TABLE_NAME = "P_" + BaseTest.generateRandomString();
+ TENANT_TABLE_NAME = "V_" + BaseTest.generateRandomString();
+ PARENT_TABLE_NAME_NO_TENANT_TYPE_ID = "P_" + BaseTest.generateRandomString();
+ TENANT_TABLE_NAME_NO_TENANT_TYPE_ID = "V_" + BaseTest.generateRandomString();
+ PARENT_TABLE_DDL = "CREATE TABLE " + PARENT_TABLE_NAME + " ( \n" +
+ " user VARCHAR ,\n" +
+ " tenant_id VARCHAR NOT NULL,\n" +
+ " tenant_type_id VARCHAR(3) NOT NULL, \n" +
+ " id INTEGER NOT NULL\n" +
+ " CONSTRAINT pk PRIMARY KEY (tenant_id, tenant_type_id, id)) MULTI_TENANT=true, IMMUTABLE_ROWS=true";
+ TENANT_TABLE_DDL = "CREATE VIEW " + TENANT_TABLE_NAME + " ( \n" +
+ " tenant_col VARCHAR) AS SELECT *\n" +
+ " FROM " + PARENT_TABLE_NAME + " WHERE tenant_type_id= '" + TENANT_TYPE_ID + "'";
+ PARENT_TABLE_DDL_NO_TENANT_TYPE_ID = "CREATE TABLE " + PARENT_TABLE_NAME_NO_TENANT_TYPE_ID + " ( \n" +
+ " user VARCHAR ,\n" +
+ " tenant_id VARCHAR NOT NULL,\n" +
+ " id INTEGER NOT NULL,\n" +
+ " CONSTRAINT pk PRIMARY KEY (tenant_id, id)) MULTI_TENANT=true, IMMUTABLE_ROWS=true";
+ TENANT_TABLE_DDL_NO_TENANT_TYPE_ID = "CREATE VIEW " + TENANT_TABLE_NAME_NO_TENANT_TYPE_ID + " ( \n" +
+ " tenant_col VARCHAR) AS SELECT *\n" +
+ " FROM " + PARENT_TABLE_NAME_NO_TENANT_TYPE_ID;
+ createTestTable(getUrl(), PARENT_TABLE_DDL);
+ createTestTable(getUrl(), PARENT_TABLE_DDL_NO_TENANT_TYPE_ID);
+ createTestTable(PHOENIX_JDBC_TENANT_SPECIFIC_URL, TENANT_TABLE_DDL);
+ createTestTable(PHOENIX_JDBC_TENANT_SPECIFIC_URL, TENANT_TABLE_DDL_NO_TENANT_TYPE_ID);
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseViewIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseViewIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseViewIT.java
index a3c36fa..559c000 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseViewIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseViewIT.java
@@ -44,6 +44,7 @@ import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.jdbc.PhoenixStatement;
import org.apache.phoenix.query.KeyRange;
import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.util.MetaDataUtil;
import org.apache.phoenix.util.QueryUtil;
import org.apache.phoenix.util.ReadOnlyProps;
import org.apache.phoenix.util.ScanUtil;
@@ -57,19 +58,20 @@ import org.junit.runners.Parameterized.Parameters;
import com.google.common.collect.Maps;
@RunWith(Parameterized.class)
-public abstract class BaseViewIT extends BaseOwnClusterHBaseManagedTimeIT {
+public abstract class BaseViewIT extends ParallelStatsEnabledIT {
protected String tableName;
+ protected String schemaName;
protected String fullTableName;
protected String tableDDLOptions;
- protected String tableSuffix;
protected boolean transactional;
@BeforeClass
public static void doSetup() throws Exception {
Map<String,String> props = Maps.newHashMapWithExpectedSize(3);
- props.put(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, Integer.toString(20));
props.put(QueryServices.TRANSACTIONS_ENABLED, Boolean.toString(true));
+ // TODO: don't repeat this
+ props.put(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, Long.toString(20));
setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
}
@@ -79,10 +81,10 @@ public abstract class BaseViewIT extends BaseOwnClusterHBaseManagedTimeIT {
if (transactional) {
optionBuilder.append(" TRANSACTIONAL=true ");
}
+ this.schemaName = TestUtil.DEFAULT_SCHEMA_NAME;
this.tableDDLOptions = optionBuilder.toString();
- tableSuffix = transactional ? "_TXN" : "";
- this.tableName = TestUtil.DEFAULT_DATA_TABLE_NAME + tableSuffix;
- this.fullTableName = SchemaUtil.getTableName(TestUtil.DEFAULT_SCHEMA_NAME, tableName);
+ this.tableName = "T_" + generateRandomString();
+ this.fullTableName = SchemaUtil.getTableName(schemaName, tableName);
}
@Parameters(name="transactional = {0}")
@@ -92,7 +94,7 @@ public abstract class BaseViewIT extends BaseOwnClusterHBaseManagedTimeIT {
protected void testUpdatableViewWithIndex(Integer saltBuckets, boolean localIndex) throws Exception {
String viewName = testUpdatableView(saltBuckets);
- Pair<String,Scan> pair = testUpdatableViewIndex(saltBuckets, localIndex);
+ Pair<String,Scan> pair = testUpdatableViewIndex(saltBuckets, localIndex, viewName);
Scan scan = pair.getSecond();
String tableName = pair.getFirst();
// Confirm that dropping the view also deletes the rows in the index
@@ -124,7 +126,7 @@ public abstract class BaseViewIT extends BaseOwnClusterHBaseManagedTimeIT {
tableDDLOptions+=",";
tableDDLOptions+=(" SALT_BUCKETS="+saltBuckets);
}
- String viewName = "V";
+ String viewName = "V_" + generateRandomString();
String ddl = "CREATE TABLE " + fullTableName + " (k1 INTEGER NOT NULL, k2 INTEGER NOT NULL, k3 DECIMAL, s VARCHAR CONSTRAINT pk PRIMARY KEY (k1, k2, k3))" + tableDDLOptions;
conn.createStatement().execute(ddl);
ddl = "CREATE VIEW " + viewName + " AS SELECT * FROM " + fullTableName + " WHERE k1 = 1";
@@ -138,10 +140,10 @@ public abstract class BaseViewIT extends BaseOwnClusterHBaseManagedTimeIT {
rs = conn.createStatement().executeQuery("SELECT count(*) FROM " + fullTableName);
assertTrue(rs.next());
assertEquals(10, rs.getInt(1));
- rs = conn.createStatement().executeQuery("SELECT count(*) FROM v");
+ rs = conn.createStatement().executeQuery("SELECT count(*) FROM " + viewName);
assertTrue(rs.next());
assertEquals(3, rs.getInt(1));
- rs = conn.createStatement().executeQuery("SELECT k1, k2, k3 FROM v");
+ rs = conn.createStatement().executeQuery("SELECT k1, k2, k3 FROM " + viewName);
assertTrue(rs.next());
assertEquals(1, rs.getInt(1));
assertEquals(101, rs.getInt(2));
@@ -156,10 +158,10 @@ public abstract class BaseViewIT extends BaseOwnClusterHBaseManagedTimeIT {
assertEquals(2, rs.getInt(3));
assertFalse(rs.next());
- conn.createStatement().execute("UPSERT INTO v(k2,S,k3) VALUES(120,'foo',50.0)");
- conn.createStatement().execute("UPSERT INTO v(k2,S,k3) VALUES(121,'bar',51.0)");
+ conn.createStatement().execute("UPSERT INTO " + viewName + "(k2,S,k3) VALUES(120,'foo',50.0)");
+ conn.createStatement().execute("UPSERT INTO " + viewName + "(k2,S,k3) VALUES(121,'bar',51.0)");
conn.commit();
- rs = conn.createStatement().executeQuery("SELECT k1, k2 FROM v WHERE k2 >= 120");
+ rs = conn.createStatement().executeQuery("SELECT k1, k2 FROM " + viewName + " WHERE k2 >= 120");
assertTrue(rs.next());
assertEquals(1, rs.getInt(1));
assertEquals(120, rs.getInt(2));
@@ -171,27 +173,29 @@ public abstract class BaseViewIT extends BaseOwnClusterHBaseManagedTimeIT {
return viewName;
}
- protected Pair<String,Scan> testUpdatableViewIndex(Integer saltBuckets) throws Exception {
- return testUpdatableViewIndex(saltBuckets, false);
+ protected Pair<String,Scan> testUpdatableViewIndex(Integer saltBuckets, String viewName) throws Exception {
+ return testUpdatableViewIndex(saltBuckets, false, viewName);
}
- protected Pair<String,Scan> testUpdatableViewIndex(Integer saltBuckets, boolean localIndex) throws Exception {
+ protected Pair<String,Scan> testUpdatableViewIndex(Integer saltBuckets, boolean localIndex, String viewName) throws Exception {
ResultSet rs;
Connection conn = DriverManager.getConnection(getUrl());
+ String viewIndexName1 = "I_" + generateRandomString();
+ String viewIndexPhysicalName = MetaDataUtil.getViewIndexName(schemaName, tableName);
if (localIndex) {
- conn.createStatement().execute("CREATE LOCAL INDEX i1 on v(k3)");
+ conn.createStatement().execute("CREATE LOCAL INDEX " + viewIndexName1 + " on " + viewName + "(k3)");
} else {
- conn.createStatement().execute("CREATE INDEX i1 on v(k3) include (s)");
+ conn.createStatement().execute("CREATE INDEX " + viewIndexName1 + " on " + viewName + "(k3) include (s)");
}
- conn.createStatement().execute("UPSERT INTO v(k2,S,k3) VALUES(120,'foo',50.0)");
+ conn.createStatement().execute("UPSERT INTO " + viewName + "(k2,S,k3) VALUES(120,'foo',50.0)");
conn.commit();
- analyzeTable(conn, "v");
- List<KeyRange> splits = getAllSplits(conn, "i1");
+ analyzeTable(conn, viewName);
+ List<KeyRange> splits = getAllSplits(conn, viewIndexName1);
// More guideposts with salted, since it's already pre-split at salt buckets
assertEquals(saltBuckets == null ? 6 : 8, splits.size());
- String query = "SELECT k1, k2, k3, s FROM v WHERE k3 = 51.0";
+ String query = "SELECT k1, k2, k3, s FROM " + viewName + " WHERE k3 = 51.0";
rs = conn.createStatement().executeQuery(query);
assertTrue(rs.next());
assertEquals(1, rs.getInt(1));
@@ -202,34 +206,35 @@ public abstract class BaseViewIT extends BaseOwnClusterHBaseManagedTimeIT {
rs = conn.createStatement().executeQuery("EXPLAIN " + query);
String queryPlan = QueryUtil.getExplainPlan(rs);
if (localIndex) {
- assertEquals("CLIENT PARALLEL "+ (saltBuckets == null ? 1 : saltBuckets) +"-WAY RANGE SCAN OVER " + tableName +" [1,51]\n"
+ assertEquals("CLIENT PARALLEL "+ (saltBuckets == null ? 1 : saltBuckets) +"-WAY RANGE SCAN OVER " + fullTableName +" [1,51]\n"
+ " SERVER FILTER BY FIRST KEY ONLY\n"
+ "CLIENT MERGE SORT",
queryPlan);
} else {
assertEquals(saltBuckets == null
- ? "CLIENT PARALLEL 1-WAY RANGE SCAN OVER _IDX_" + tableName +" [" + Short.MIN_VALUE + ",51]"
- : "CLIENT PARALLEL " + saltBuckets + "-WAY RANGE SCAN OVER _IDX_T" + (transactional ? "_TXN" : "") + " [0," + Short.MIN_VALUE + ",51] - ["+(saltBuckets.intValue()-1)+"," + Short.MIN_VALUE + ",51]\nCLIENT MERGE SORT",
+ ? "CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + viewIndexPhysicalName +" [" + Short.MIN_VALUE + ",51]"
+ : "CLIENT PARALLEL " + saltBuckets + "-WAY RANGE SCAN OVER " + viewIndexPhysicalName + " [0," + Short.MIN_VALUE + ",51] - ["+(saltBuckets.intValue()-1)+"," + Short.MIN_VALUE + ",51]\nCLIENT MERGE SORT",
queryPlan);
}
+ String viewIndexName2 = "I_" + generateRandomString();
if (localIndex) {
- conn.createStatement().execute("CREATE LOCAL INDEX i2 on v(s)");
+ conn.createStatement().execute("CREATE LOCAL INDEX " + viewIndexName2 + " on " + viewName + "(s)");
} else {
- conn.createStatement().execute("CREATE INDEX i2 on v(s)");
+ conn.createStatement().execute("CREATE INDEX " + viewIndexName2 + " on " + viewName + "(s)");
}
// new index hasn't been analyzed yet
- splits = getAllSplits(conn, "i2");
+ splits = getAllSplits(conn, viewIndexName2);
assertEquals(saltBuckets == null ? 1 : 3, splits.size());
// analyze table should analyze all view data
- analyzeTable(conn, tableName);
- splits = getAllSplits(conn, "i2");
+ analyzeTable(conn, fullTableName);
+ splits = getAllSplits(conn, viewIndexName2);
assertEquals(saltBuckets == null ? 6 : 8, splits.size());
- query = "SELECT k1, k2, s FROM v WHERE s = 'foo'";
+ query = "SELECT k1, k2, s FROM " + viewName + " WHERE s = 'foo'";
Statement statement = conn.createStatement();
rs = statement.executeQuery(query);
Scan scan = statement.unwrap(PhoenixStatement.class).getQueryPlan().getContext().getScan();
@@ -238,24 +243,24 @@ public abstract class BaseViewIT extends BaseOwnClusterHBaseManagedTimeIT {
assertEquals(120, rs.getInt(2));
assertEquals("foo", rs.getString(3));
assertFalse(rs.next());
- String htableName;
rs = conn.createStatement().executeQuery("EXPLAIN " + query);
+ String physicalTableName;
if (localIndex) {
- htableName = tableName;
- assertEquals("CLIENT PARALLEL "+ (saltBuckets == null ? 1 : saltBuckets) +"-WAY RANGE SCAN OVER " + htableName +" [" + (2) + ",'foo']\n"
+ physicalTableName = tableName;
+ assertEquals("CLIENT PARALLEL "+ (saltBuckets == null ? 1 : saltBuckets) +"-WAY RANGE SCAN OVER " + fullTableName +" [" + (2) + ",'foo']\n"
+ " SERVER FILTER BY FIRST KEY ONLY\n"
+ "CLIENT MERGE SORT",QueryUtil.getExplainPlan(rs));
} else {
- htableName = "_IDX_" + tableName;
+ physicalTableName = viewIndexPhysicalName;
assertEquals(saltBuckets == null
- ? "CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + htableName +" [" + (Short.MIN_VALUE+1) + ",'foo']\n"
+ ? "CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + viewIndexPhysicalName +" [" + (Short.MIN_VALUE+1) + ",'foo']\n"
+ " SERVER FILTER BY FIRST KEY ONLY"
- : "CLIENT PARALLEL " + saltBuckets + "-WAY RANGE SCAN OVER " + htableName + " [0," + (Short.MIN_VALUE+1) + ",'foo'] - ["+(saltBuckets.intValue()-1)+"," + (Short.MIN_VALUE+1) + ",'foo']\n"
+ : "CLIENT PARALLEL " + saltBuckets + "-WAY RANGE SCAN OVER " + viewIndexPhysicalName + " [0," + (Short.MIN_VALUE+1) + ",'foo'] - ["+(saltBuckets.intValue()-1)+"," + (Short.MIN_VALUE+1) + ",'foo']\n"
+ " SERVER FILTER BY FIRST KEY ONLY\n"
+ "CLIENT MERGE SORT",
QueryUtil.getExplainPlan(rs));
}
conn.close();
- return new Pair<>(htableName,scan);
+ return new Pair<>(physicalTableName,scan);
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CountDistinctCompressionIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CountDistinctCompressionIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CountDistinctCompressionIT.java
index 677d76f..aa7a89c 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CountDistinctCompressionIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CountDistinctCompressionIT.java
@@ -40,7 +40,7 @@ import org.junit.Test;
import com.google.common.collect.Maps;
-public class CountDistinctCompressionIT extends BaseOwnClusterHBaseManagedTimeIT {
+public class CountDistinctCompressionIT extends BaseOwnClusterIT {
@BeforeClass
public static void doSetup() throws Exception {
Map<String, String> props = Maps.newHashMapWithExpectedSize(3);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CsvBulkLoadToolIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CsvBulkLoadToolIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CsvBulkLoadToolIT.java
index 4971fc3..6aaaeb1 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CsvBulkLoadToolIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CsvBulkLoadToolIT.java
@@ -48,7 +48,7 @@ import org.junit.Test;
import com.google.common.collect.Maps;
-public class CsvBulkLoadToolIT extends BaseOwnClusterHBaseManagedTimeIT {
+public class CsvBulkLoadToolIT extends BaseOwnClusterIT {
private static Connection conn;
private static String zkQuorum;
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexExtendedIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexExtendedIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexExtendedIT.java
index b23e342..8314850 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexExtendedIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexExtendedIT.java
@@ -64,7 +64,7 @@ import com.google.common.collect.Maps;
* Tests for the {@link IndexTool}
*/
@RunWith(Parameterized.class)
-public class IndexExtendedIT extends BaseOwnClusterHBaseManagedTimeIT {
+public class IndexExtendedIT extends BaseOwnClusterIT {
private final boolean localIndex;
private final boolean transactional;
private final boolean directApi;
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/phoenix-core/src/it/java/org/apache/phoenix/end2end/KeyOnlyIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/KeyOnlyIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/KeyOnlyIT.java
index 50e0709..7ec37ce 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/KeyOnlyIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/KeyOnlyIT.java
@@ -17,7 +17,6 @@
*/
package org.apache.phoenix.end2end;
-import static org.apache.phoenix.util.TestUtil.KEYONLY_NAME;
import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
import static org.apache.phoenix.util.TestUtil.analyzeTable;
import static org.apache.phoenix.util.TestUtil.getAllSplits;
@@ -29,48 +28,41 @@ import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
+import java.sql.SQLException;
import java.util.List;
-import java.util.Map;
import java.util.Properties;
import org.apache.phoenix.query.KeyRange;
-import org.apache.phoenix.query.QueryServices;
-import org.apache.phoenix.util.PhoenixRuntime;
import org.apache.phoenix.util.PropertiesUtil;
import org.apache.phoenix.util.QueryUtil;
-import org.apache.phoenix.util.ReadOnlyProps;
import org.apache.phoenix.util.TestUtil;
-import org.junit.BeforeClass;
+import org.junit.Before;
import org.junit.Test;
-import com.google.common.collect.Maps;
-
-public class KeyOnlyIT extends BaseOwnClusterClientManagedTimeIT {
+public class KeyOnlyIT extends ParallelStatsEnabledIT {
+ private String tableName;
- @BeforeClass
- public static void doSetup() throws Exception {
- Map<String,String> props = Maps.newHashMapWithExpectedSize(3);
- // Must update config before starting server
- props.put(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, Long.toString(20));
- setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
+ @Before
+ public void createTable() throws SQLException {
+ tableName = generateRandomString();
+ Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+ try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
+ conn.createStatement().execute("create table " + tableName +
+ " (i1 integer not null, i2 integer not null\n" +
+ " CONSTRAINT pk PRIMARY KEY (i1,i2))");
+ }
}
@Test
public void testKeyOnly() throws Exception {
- long ts = nextTimestamp();
- ensureTableCreated(getUrl(),KEYONLY_NAME,KEYONLY_NAME,null, ts);
- initTableValues(ts+1);
Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
- props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts+30));
- Connection conn3 = DriverManager.getConnection(getUrl(), props);
- analyzeTable(conn3, KEYONLY_NAME);
- conn3.close();
+ Connection conn = DriverManager.getConnection(getUrl(), props);
+ initTableValues(conn);
+ analyzeTable(conn, tableName);
- props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts+50));
- Connection conn5 = DriverManager.getConnection(getUrl(), props);
- String query = "SELECT i1, i2 FROM KEYONLY";
- PreparedStatement statement = conn5.prepareStatement(query);
+ String query = "SELECT i1, i2 FROM " + tableName;
+ PreparedStatement statement = conn.prepareStatement(query);
ResultSet rs = statement.executeQuery();
assertTrue(rs.next());
assertEquals(1, rs.getInt(1));
@@ -79,36 +71,24 @@ public class KeyOnlyIT extends BaseOwnClusterClientManagedTimeIT {
assertEquals(3, rs.getInt(1));
assertEquals(4, rs.getInt(2));
assertFalse(rs.next());
- List<KeyRange> splits = getAllSplits(conn5, "KEYONLY");
+ List<KeyRange> splits = getAllSplits(conn, tableName);
assertEquals(3, splits.size());
- conn5.close();
- props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts+60));
- Connection conn6 = DriverManager.getConnection(getUrl(), props);
- conn6.createStatement().execute("ALTER TABLE KEYONLY ADD s1 varchar");
- conn6.close();
+ conn.createStatement().execute("ALTER TABLE " + tableName + " ADD s1 varchar");
- props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts+70));
- Connection conn7 = DriverManager.getConnection(getUrl(), props);
- PreparedStatement stmt = conn7.prepareStatement(
+ PreparedStatement stmt = conn.prepareStatement(
"upsert into " +
- "KEYONLY VALUES (?, ?, ?)");
+ tableName + " VALUES (?, ?, ?)");
stmt.setInt(1, 5);
stmt.setInt(2, 6);
stmt.setString(3, "foo");
stmt.execute();
- conn7.commit();
- conn7.close();
+ conn.commit();
- props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts+80));
- Connection conn8 = DriverManager.getConnection(getUrl(), props);
- analyzeTable(conn8, KEYONLY_NAME);
- conn8.close();
+ analyzeTable(conn, tableName);
- props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts+90));
- Connection conn9 = DriverManager.getConnection(getUrl(), props);
- query = "SELECT i1 FROM KEYONLY";
- statement = conn9.prepareStatement(query);
+ query = "SELECT i1 FROM " + tableName;
+ statement = conn.prepareStatement(query);
rs = statement.executeQuery();
assertTrue(rs.next());
assertEquals(1, rs.getInt(1));
@@ -118,8 +98,8 @@ public class KeyOnlyIT extends BaseOwnClusterClientManagedTimeIT {
assertEquals(5, rs.getInt(1));
assertFalse(rs.next());
- query = "SELECT i1,s1 FROM KEYONLY";
- statement = conn9.prepareStatement(query);
+ query = "SELECT i1,s1 FROM " + tableName;
+ statement = conn.prepareStatement(query);
rs = statement.executeQuery();
assertTrue(rs.next());
assertEquals(1, rs.getInt(1));
@@ -131,67 +111,49 @@ public class KeyOnlyIT extends BaseOwnClusterClientManagedTimeIT {
assertEquals(5, rs.getInt(1));
assertEquals("foo", rs.getString(2));
assertFalse(rs.next());
-
- conn9.close();
}
@Test
public void testOr() throws Exception {
- long ts = nextTimestamp();
- ensureTableCreated(getUrl(),KEYONLY_NAME,KEYONLY_NAME,null, ts);
- initTableValues(ts+1);
Properties props = new Properties();
+ Connection conn = DriverManager.getConnection(getUrl(), props);
+ initTableValues(conn);
+ analyzeTable(conn, tableName);
- props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts+3));
- Connection conn3 = DriverManager.getConnection(getUrl(), props);
- analyzeTable(conn3, KEYONLY_NAME);
- conn3.close();
-
- props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts+5));
- Connection conn5 = DriverManager.getConnection(getUrl(), props);
- String query = "SELECT i1 FROM KEYONLY WHERE i1 < 2 or i1 = 3";
- PreparedStatement statement = conn5.prepareStatement(query);
+ String query = "SELECT i1 FROM " + tableName + " WHERE i1 < 2 or i1 = 3";
+ PreparedStatement statement = conn.prepareStatement(query);
ResultSet rs = statement.executeQuery();
assertTrue(rs.next());
assertEquals(1, rs.getInt(1));
assertTrue(rs.next());
assertEquals(3, rs.getInt(1));
assertFalse(rs.next());
- conn5.close();
}
@Test
public void testQueryWithLimitAndStats() throws Exception {
- long ts = nextTimestamp();
- ensureTableCreated(getUrl(),KEYONLY_NAME,KEYONLY_NAME,null, ts);
- initTableValues(ts+1, 100);
-
- TestUtil.analyzeTable(getUrl(), ts+10, KEYONLY_NAME);
Properties props = PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES);
+ Connection conn = DriverManager.getConnection(getUrl(), props);
+ initTableValues(conn, 100);
+ analyzeTable(conn, tableName);
- props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts+50));
- Connection conn = DriverManager.getConnection(getUrl(), props);
- String query = "SELECT i1 FROM KEYONLY LIMIT 1";
+ String query = "SELECT i1 FROM " + tableName + " LIMIT 1";
ResultSet rs = conn.createStatement().executeQuery(query);
assertTrue(rs.next());
assertEquals(0, rs.getInt(1));
assertFalse(rs.next());
rs = conn.createStatement().executeQuery("EXPLAIN " + query);
- assertEquals("CLIENT SERIAL 1-WAY FULL SCAN OVER KEYONLY\n" +
+ assertEquals("CLIENT SERIAL 1-WAY FULL SCAN OVER " + tableName + "\n" +
" SERVER FILTER BY FIRST KEY ONLY\n" +
" SERVER 1 ROW LIMIT\n" +
"CLIENT 1 ROW LIMIT", QueryUtil.getExplainPlan(rs));
- conn.close();
}
- protected static void initTableValues(long ts) throws Exception {
- String url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" + ts;
- Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
- Connection conn = DriverManager.getConnection(url, props);
+ private void initTableValues(Connection conn) throws Exception {
PreparedStatement stmt = conn.prepareStatement(
"upsert into " +
- "KEYONLY VALUES (?, ?)");
+ tableName + " VALUES (?, ?)");
stmt.setInt(1, 1);
stmt.setInt(2, 2);
stmt.execute();
@@ -201,16 +163,12 @@ public class KeyOnlyIT extends BaseOwnClusterClientManagedTimeIT {
stmt.execute();
conn.commit();
- conn.close();
}
- protected static void initTableValues(long ts, int nRows) throws Exception {
- String url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" + ts;
- Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
- Connection conn = DriverManager.getConnection(url, props);
+ private void initTableValues(Connection conn, int nRows) throws Exception {
PreparedStatement stmt = conn.prepareStatement(
"upsert into " +
- "KEYONLY VALUES (?, ?)");
+ tableName + " VALUES (?, ?)");
for (int i = 0; i < nRows; i++) {
stmt.setInt(1, i);
stmt.setInt(2, i+1);
@@ -218,6 +176,5 @@ public class KeyOnlyIT extends BaseOwnClusterClientManagedTimeIT {
}
conn.commit();
- conn.close();
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MultiCfQueryExecIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MultiCfQueryExecIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MultiCfQueryExecIT.java
index b9d27ca..4a0bb01 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MultiCfQueryExecIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MultiCfQueryExecIT.java
@@ -29,50 +29,44 @@ import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
+import java.sql.SQLException;
import java.util.List;
-import java.util.Map;
import java.util.Properties;
import org.apache.phoenix.query.KeyRange;
-import org.apache.phoenix.query.QueryServices;
-import org.apache.phoenix.util.PhoenixRuntime;
import org.apache.phoenix.util.PropertiesUtil;
-import org.apache.phoenix.util.ReadOnlyProps;
-import org.junit.BeforeClass;
+import org.apache.phoenix.util.SchemaUtil;
+import org.apache.phoenix.util.TestUtil;
+import org.junit.Before;
import org.junit.Test;
-import com.google.common.collect.Maps;
+public class MultiCfQueryExecIT extends ParallelStatsEnabledIT {
+ private String fullTableName;
-public class MultiCfQueryExecIT extends BaseOwnClusterClientManagedTimeIT {
- private static final String MULTI_CF = "MULTI_CF";
-
- @BeforeClass
- public static void doSetup() throws Exception {
- Map<String,String> props = Maps.newHashMapWithExpectedSize(3);
- // Must update config before starting server
- props.put(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, Long.toString(20));
- setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
+ @Before
+ public void generateTableNames() throws SQLException {
+ String schemaName = TestUtil.DEFAULT_SCHEMA_NAME;
+ String tableName = "T_" + generateRandomString();
+ fullTableName = SchemaUtil.getTableName(schemaName, tableName);
}
-
- protected static void initTableValues(long ts) throws Exception {
- ensureTableCreated(getUrl(),MULTI_CF,MULTI_CF,null, ts-2);
-
- String url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" + ts;
- Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
- Connection conn = DriverManager.getConnection(url, props);
- conn.setAutoCommit(true);
+
+ private void createTable(Connection conn) throws SQLException {
+ conn.createStatement().execute(
+ "create table " + fullTableName + " (id char(15) not null primary key,\n"
+ + " a.unique_user_count integer,\n" + " b.unique_org_count integer,\n"
+ + " c.db_cpu_utilization decimal(31,10),\n" + " d.transaction_count bigint,\n"
+ + " e.cpu_utilization decimal(31,10),\n" + " f.response_time bigint,\n"
+ + " g.response_time bigint)");
+ }
+
+ private void initTableValues(Connection conn) throws Exception {
// Insert all rows at ts
PreparedStatement stmt = conn.prepareStatement(
- "upsert into " +
- "MULTI_CF(" +
- " ID, " +
- " TRANSACTION_COUNT, " +
- " CPU_UTILIZATION, " +
- " DB_CPU_UTILIZATION," +
- " UNIQUE_USER_COUNT," +
- " F.RESPONSE_TIME," +
- " G.RESPONSE_TIME)" +
+"upsert into " + fullTableName + "(" + " ID, "
+ + " TRANSACTION_COUNT, " + " CPU_UTILIZATION, " + " DB_CPU_UTILIZATION,"
+ + " UNIQUE_USER_COUNT," + " F.RESPONSE_TIME," + " G.RESPONSE_TIME)"
+ +
"VALUES (?, ?, ?, ?, ?, ?, ?)");
stmt.setString(1, "000000000000001");
stmt.setInt(2, 100);
@@ -90,18 +84,18 @@ public class MultiCfQueryExecIT extends BaseOwnClusterClientManagedTimeIT {
stmt.setLong(6, 2222);
stmt.setLong(7, 22222);
stmt.execute();
+ conn.commit();
}
@Test
public void testConstantCount() throws Exception {
- long ts = nextTimestamp();
- String query = "SELECT count(1) from multi_cf";
- String url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" + (ts + 5); // Run query at timestamp 5
+ String query = "SELECT count(1) from " + fullTableName;
Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
Connection conn = DriverManager.getConnection(url, props);
try {
- initTableValues(ts);
- analyzeTable(getUrl(), ts + 3, "MULTI_CF");
+ createTable(conn);
+ initTableValues(conn);
+ analyzeTable(conn, fullTableName);
PreparedStatement statement = conn.prepareStatement(query);
ResultSet rs = statement.executeQuery();
assertTrue(rs.next());
@@ -111,17 +105,16 @@ public class MultiCfQueryExecIT extends BaseOwnClusterClientManagedTimeIT {
conn.close();
}
}
-
+
@Test
public void testCFToDisambiguateInSelectOnly1() throws Exception {
- long ts = nextTimestamp();
- String query = "SELECT F.RESPONSE_TIME,G.RESPONSE_TIME from multi_cf where ID = '000000000000002'";
- String url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" + (ts + 5); // Run query at timestamp 5
+ String query = "SELECT F.RESPONSE_TIME,G.RESPONSE_TIME from " + fullTableName + " where ID = '000000000000002'";
Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
Connection conn = DriverManager.getConnection(url, props);
try {
- initTableValues(ts);
- analyzeTable(getUrl(), ts + 3, "MULTI_CF");
+ createTable(conn);
+ initTableValues(conn);
+ analyzeTable(conn, fullTableName);
PreparedStatement statement = conn.prepareStatement(query);
ResultSet rs = statement.executeQuery();
assertTrue(rs.next());
@@ -132,17 +125,16 @@ public class MultiCfQueryExecIT extends BaseOwnClusterClientManagedTimeIT {
conn.close();
}
}
-
+
@Test
public void testCFToDisambiguateInSelectOnly2() throws Exception {
- long ts = nextTimestamp();
- String query = "SELECT F.RESPONSE_TIME,G.RESPONSE_TIME from multi_cf where TRANSACTION_COUNT = 200";
- String url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" + (ts + 5); // Run query at timestamp 5
+ String query = "SELECT F.RESPONSE_TIME,G.RESPONSE_TIME from " + fullTableName + " where TRANSACTION_COUNT = 200";
Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
Connection conn = DriverManager.getConnection(url, props);
try {
- initTableValues(ts);
- analyzeTable(getUrl(), ts + 3, "MULTI_CF");
+ createTable(conn);
+ initTableValues(conn);
+ analyzeTable(conn, fullTableName);
PreparedStatement statement = conn.prepareStatement(query);
ResultSet rs = statement.executeQuery();
assertTrue(rs.next());
@@ -153,17 +145,16 @@ public class MultiCfQueryExecIT extends BaseOwnClusterClientManagedTimeIT {
conn.close();
}
}
-
+
@Test
public void testGuidePostsForMultiCFs() throws Exception {
- long ts = nextTimestamp();
- initTableValues(ts);
- String query = "SELECT F.RESPONSE_TIME,G.RESPONSE_TIME from multi_cf where F.RESPONSE_TIME = 2222";
- String url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" + (ts + 5); // Run query at timestamp 5
+ String query = "SELECT F.RESPONSE_TIME,G.RESPONSE_TIME from " + fullTableName + " where F.RESPONSE_TIME = 2222";
Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
Connection conn = DriverManager.getConnection(url, props);
try {
- analyzeTable(getUrl(), ts + 3, "MULTI_CF");
+ createTable(conn);
+ initTableValues(conn);
+ analyzeTable(conn, fullTableName);
PreparedStatement statement = conn.prepareStatement(query);
ResultSet rs = statement.executeQuery();
assertTrue(rs.next());
@@ -172,11 +163,11 @@ public class MultiCfQueryExecIT extends BaseOwnClusterClientManagedTimeIT {
assertFalse(rs.next());
// Use E column family. Since the column family with the empty key value (the first one, A)
// is always added to the scan, we never really use other guideposts (but this may change).
- List<KeyRange> splits = getAllSplits(conn, "MULTI_CF", "e.cpu_utilization IS NOT NULL", "COUNT(*)");
+ List<KeyRange> splits = getAllSplits(conn, fullTableName, "e.cpu_utilization IS NOT NULL", "COUNT(*)");
// Since the E column family is not populated, it won't have as many splits
assertEquals(3, splits.size());
// Same as above for G column family.
- splits = getAllSplits(conn, "MULTI_CF", "g.response_time IS NOT NULL", "COUNT(*)");
+ splits = getAllSplits(conn, fullTableName, "g.response_time IS NOT NULL", "COUNT(*)");
assertEquals(3, splits.size());
} finally {
conn.close();
@@ -185,48 +176,38 @@ public class MultiCfQueryExecIT extends BaseOwnClusterClientManagedTimeIT {
@Test
public void testGuidePostsForMultiCFsOverUnevenDistrib() throws Exception {
- long ts = nextTimestamp();
Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
- props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 10));
Connection conn = DriverManager.getConnection(getUrl(), props);
-
- conn.createStatement().execute("CREATE TABLE T_6CF (K1 CHAR(1) NOT NULL, "
- + "K2 VARCHAR NOT NULL, "
- + "CF1.A INTEGER, "
- + "CF2.B INTEGER, "
- + "CF3.C INTEGER, "
- + "CF4.D INTEGER, "
- + "CF5.E INTEGER, "
- + "CF6.F INTEGER "
- + "CONSTRAINT PK PRIMARY KEY (K1,K2)) SPLIT ON ('B','C','D')");
- conn.close();
- props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 20));
- conn = DriverManager.getConnection(getUrl(), props);
+ conn.createStatement().execute(
+ "CREATE TABLE " + fullTableName + " (K1 CHAR(1) NOT NULL, "
+ + "K2 VARCHAR NOT NULL, " + "CF1.A INTEGER, "
+ + "CF2.B INTEGER, " + "CF3.C INTEGER, " + "CF4.D INTEGER, " + "CF5.E INTEGER, "
+ + "CF6.F INTEGER " + "CONSTRAINT PK PRIMARY KEY (K1,K2)) SPLIT ON ('B','C','D')");
+
for (int i = 0; i < 100; i++) {
- String upsert = "UPSERT INTO T_6CF(K1,K2,A) VALUES('" + Character.toString((char)('A'+i%10)) + "','" + (i*10) + "'," + i + ")";
+ String upsert = "UPSERT INTO " + fullTableName + "(K1,K2,A) VALUES('" + Character.toString((char)('A' + i % 10))
+ + "','" + (i * 10) + "'," + i + ")";
conn.createStatement().execute(upsert);
if (i % 10 == 0) {
- conn.createStatement().execute("UPSERT INTO T_6CF(K1,K2,F) VALUES('" + Character.toString((char)('A'+i%10)) + "','" + (i*10) + "'," + (i * 10) + ")");
+ conn.createStatement().execute(
+ "UPSERT INTO " + fullTableName + "(K1,K2,F) VALUES('" + Character.toString((char)('A' + i % 10))
+ + "','" + (i * 10) + "'," + (i * 10) + ")");
}
}
conn.commit();
- conn.close();
-
- props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 40));
- conn = DriverManager.getConnection(getUrl(), props);
try {
- analyzeTable(getUrl(), ts + 30, "T_6CF");
- PreparedStatement statement = conn.prepareStatement("select count(*) from T_6CF where f < 400");
+ analyzeTable(conn, fullTableName);
+ PreparedStatement statement = conn.prepareStatement("select count(*) from " + fullTableName + " where f < 400");
ResultSet rs = statement.executeQuery();
assertTrue(rs.next());
assertEquals(4, rs.getLong(1));
assertFalse(rs.next());
- List<KeyRange> splits = getAllSplits(conn, "T_6CF", "f < 400", "COUNT(*)");
+ List<KeyRange> splits = getAllSplits(conn, fullTableName, "f < 400", "COUNT(*)");
// Uses less populated column f
assertEquals(14, splits.size());
// Uses more populated column a
- splits = getAllSplits(conn, "T_6CF", "a < 80", "COUNT(*)");
+ splits = getAllSplits(conn, fullTableName, "a < 80", "COUNT(*)");
assertEquals(104, splits.size());
} finally {
conn.close();
@@ -235,77 +216,67 @@ public class MultiCfQueryExecIT extends BaseOwnClusterClientManagedTimeIT {
@Test
public void testGuidePostsRetrievedForMultiCF() throws Exception {
- Connection conn;
- PreparedStatement stmt;
- ResultSet rs;
+ Connection conn;
+ PreparedStatement stmt;
+ ResultSet rs;
- long ts = nextTimestamp();
- Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
- props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 10));
- conn = DriverManager.getConnection(getUrl(), props);
- conn.createStatement()
- .execute(
- "CREATE TABLE T ( k INTEGER PRIMARY KEY, A.V1 VARCHAR, B.V2 VARCHAR, C.V3 VARCHAR)");
- conn.close();
+ Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+ conn = DriverManager.getConnection(getUrl(), props);
+ conn.createStatement().execute(
+ "CREATE TABLE " + fullTableName + " ( k INTEGER PRIMARY KEY, A.V1 VARCHAR, B.V2 VARCHAR, C.V3 VARCHAR)");
- props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 30));
- conn = DriverManager.getConnection(getUrl(), props);
- stmt = conn.prepareStatement("UPSERT INTO T VALUES(?,?,?,?)");
- stmt.setInt(1, 1);
- stmt.setString(2, "A");
- stmt.setString(3, "B");
- stmt.setString(4, "C");
- stmt.execute();
- conn.commit();
-
- stmt = conn.prepareStatement("UPSERT INTO T VALUES(?,?,?,?)");
- stmt.setInt(1, 2);
- stmt.setString(2, "D");
- stmt.setString(3, "E");
- stmt.setString(4, "F");
- stmt.execute();
- conn.commit();
-
- stmt = conn.prepareStatement("UPSERT INTO T(k, A.V1, C.V3) VALUES(?,?,?)");
- stmt.setInt(1, 3);
- stmt.setString(2, "E");
- stmt.setString(3, "X");
- stmt.execute();
- conn.commit();
-
- stmt = conn.prepareStatement("UPSERT INTO T(k, A.V1, C.V3) VALUES(?,?,?)");
- stmt.setInt(1, 4);
- stmt.setString(2, "F");
- stmt.setString(3, "F");
- stmt.execute();
- conn.commit();
-
- conn.close();
-
- analyzeTable(getUrl(), ts + 50, "T");
+ stmt = conn.prepareStatement("UPSERT INTO " + fullTableName + " VALUES(?,?,?,?)");
+ stmt.setInt(1, 1);
+ stmt.setString(2, "A");
+ stmt.setString(3, "B");
+ stmt.setString(4, "C");
+ stmt.execute();
+ conn.commit();
- props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 60));
- conn = DriverManager.getConnection(getUrl(), props);
- rs = conn.createStatement().executeQuery("SELECT B.V2 FROM T WHERE B.V2 = 'B'");
- assertTrue(rs.next());
- assertEquals("B",rs.getString(1));
- List<KeyRange> splits = getAllSplits(conn, "T", "C.V3 = 'X'", "A.V1");
- assertEquals(5, splits.size());
- splits = getAllSplits(conn, "T", "B.V2 = 'B'", "B.V2");
- assertEquals(3, splits.size());
- conn.close();
+ stmt = conn.prepareStatement("UPSERT INTO " + fullTableName + " VALUES(?,?,?,?)");
+ stmt.setInt(1, 2);
+ stmt.setString(2, "D");
+ stmt.setString(3, "E");
+ stmt.setString(4, "F");
+ stmt.execute();
+ conn.commit();
+
+ stmt = conn.prepareStatement("UPSERT INTO " + fullTableName + "(k, A.V1, C.V3) VALUES(?,?,?)");
+ stmt.setInt(1, 3);
+ stmt.setString(2, "E");
+ stmt.setString(3, "X");
+ stmt.execute();
+ conn.commit();
+
+ stmt = conn.prepareStatement("UPSERT INTO " + fullTableName + "(k, A.V1, C.V3) VALUES(?,?,?)");
+ stmt.setInt(1, 4);
+ stmt.setString(2, "F");
+ stmt.setString(3, "F");
+ stmt.execute();
+ conn.commit();
+
+ analyzeTable(conn, fullTableName);
+
+ rs = conn.createStatement().executeQuery("SELECT B.V2 FROM " + fullTableName + " WHERE B.V2 = 'B'");
+ assertTrue(rs.next());
+ assertEquals("B", rs.getString(1));
+ List<KeyRange> splits = getAllSplits(conn, fullTableName, "C.V3 = 'X'", "A.V1");
+ assertEquals(5, splits.size());
+ splits = getAllSplits(conn, fullTableName, "B.V2 = 'B'", "B.V2");
+ assertEquals(3, splits.size());
+ conn.close();
}
@Test
public void testCFToDisambiguate2() throws Exception {
- long ts = nextTimestamp();
- String query = "SELECT F.RESPONSE_TIME,G.RESPONSE_TIME from multi_cf where G.RESPONSE_TIME-1 = F.RESPONSE_TIME";
- String url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" + (ts + 5); // Run query at timestamp 5
+ String query = "SELECT F.RESPONSE_TIME,G.RESPONSE_TIME from " + fullTableName
+ + " where G.RESPONSE_TIME-1 = F.RESPONSE_TIME";
Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
Connection conn = DriverManager.getConnection(url, props);
try {
- initTableValues(ts);
- analyzeTable(getUrl(), ts + 3, "MULTI_CF");
+ createTable(conn);
+ initTableValues(conn);
+ analyzeTable(conn, fullTableName);
PreparedStatement statement = conn.prepareStatement(query);
ResultSet rs = statement.executeQuery();
assertTrue(rs.next());
@@ -316,35 +287,25 @@ public class MultiCfQueryExecIT extends BaseOwnClusterClientManagedTimeIT {
conn.close();
}
}
-
+
@Test
public void testDefaultCFToDisambiguate() throws Exception {
- long ts = nextTimestamp();
- initTableValues(ts);
- String ddl = "ALTER TABLE multi_cf ADD response_time BIGINT";
- String url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" + (ts + 10);
- Connection conn = DriverManager.getConnection(url);
+ Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+ Connection conn = DriverManager.getConnection(url, props);
+ createTable(conn);
+ initTableValues(conn);
+
+ String ddl = "ALTER TABLE " + fullTableName + " ADD response_time BIGINT";
conn.createStatement().execute(ddl);
- conn.close();
-
- analyzeTable(getUrl(), ts + 15, "MULTI_CF");
-
- String dml = "upsert into " +
- "MULTI_CF(" +
- " ID, " +
- " RESPONSE_TIME)" +
- "VALUES ('000000000000003', 333)";
- url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" + (ts + 20);
- conn = DriverManager.getConnection(url);
+
+ String dml = "upsert into " + fullTableName + "(" + " ID, " + " RESPONSE_TIME)"
+ + "VALUES ('000000000000003', 333)";
conn.createStatement().execute(dml);
conn.commit();
- conn.close();
-
- analyzeTable(getUrl(), ts + 25, "MULTI_CF");
-
- String query = "SELECT ID,RESPONSE_TIME from multi_cf where RESPONSE_TIME = 333";
- url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" + (ts + 30); // Run query at timestamp 5
- conn = DriverManager.getConnection(url);
+
+ analyzeTable(conn, fullTableName);
+
+ String query = "SELECT ID,RESPONSE_TIME from " + fullTableName + " where RESPONSE_TIME = 333";
try {
PreparedStatement statement = conn.prepareStatement(query);
ResultSet rs = statement.executeQuery();
@@ -356,17 +317,16 @@ public class MultiCfQueryExecIT extends BaseOwnClusterClientManagedTimeIT {
conn.close();
}
}
-
+
@Test
public void testEssentialColumnFamilyForRowKeyFilter() throws Exception {
- long ts = nextTimestamp();
- String query = "SELECT F.RESPONSE_TIME,G.RESPONSE_TIME from multi_cf where SUBSTR(ID, 15) = '2'";
- String url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" + (ts + 5); // Run query at timestamp 5
+ String query = "SELECT F.RESPONSE_TIME,G.RESPONSE_TIME from " + fullTableName + " where SUBSTR(ID, 15) = '2'";
Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
Connection conn = DriverManager.getConnection(url, props);
try {
- initTableValues(ts);
- analyzeTable(getUrl(), ts + 3, "MULTI_CF");
+ createTable(conn);
+ initTableValues(conn);
+ analyzeTable(conn, fullTableName);
PreparedStatement statement = conn.prepareStatement(query);
ResultSet rs = statement.executeQuery();
assertTrue(rs.next());
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelIteratorsIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelIteratorsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelIteratorsIT.java
index 4e1e983..dfcf68c 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelIteratorsIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelIteratorsIT.java
@@ -17,13 +17,12 @@
*/
package org.apache.phoenix.end2end;
-import static org.apache.phoenix.util.TestUtil.STABLE_NAME;
+import static org.apache.phoenix.util.TestUtil.STABLE_PK_NAME;
import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
import static org.apache.phoenix.util.TestUtil.analyzeTable;
import static org.apache.phoenix.util.TestUtil.analyzeTableColumns;
import static org.apache.phoenix.util.TestUtil.analyzeTableIndex;
import static org.apache.phoenix.util.TestUtil.getAllSplits;
-import static org.apache.phoenix.util.TestUtil.getSplits;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
@@ -31,8 +30,9 @@ import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.Collections;
import java.util.List;
-import java.util.Map;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.Scan;
@@ -40,18 +40,16 @@ import org.apache.phoenix.compile.QueryPlan;
import org.apache.phoenix.coprocessor.BaseScannerRegionObserver;
import org.apache.phoenix.jdbc.PhoenixStatement;
import org.apache.phoenix.query.KeyRange;
-import org.apache.phoenix.query.QueryServices;
import org.apache.phoenix.schema.types.PChar;
-import org.apache.phoenix.util.ReadOnlyProps;
-import org.junit.BeforeClass;
+import org.apache.phoenix.util.TestUtil;
+import org.junit.Before;
import org.junit.Test;
-import com.google.common.collect.Maps;
+import com.google.common.base.Joiner;
-public class ParallelIteratorsIT extends BaseOwnClusterHBaseManagedTimeIT {
+public class ParallelIteratorsIT extends ParallelStatsEnabledIT {
- private static final String STABLE_INDEX = "STABLE_INDEX";
protected static final byte[] KMIN = new byte[] {'!'};
protected static final byte[] KMIN2 = new byte[] {'.'};
protected static final byte[] K1 = new byte[] {'a'};
@@ -66,27 +64,39 @@ public class ParallelIteratorsIT extends BaseOwnClusterHBaseManagedTimeIT {
protected static final byte[] KMAX2 = new byte[] {'z'};
protected static final byte[] KR = new byte[] { 'r' };
protected static final byte[] KP = new byte[] { 'p' };
+
+ private String tableName;
+ private String indexName;
+
+ @Before
+ public void generateTableNames() {
+ tableName = "T_" + generateRandomString();
+ indexName = "I_" + generateRandomString();
+ }
- @BeforeClass
- public static void doSetup() throws Exception {
- Map<String,String> props = Maps.newHashMapWithExpectedSize(3);
- // Must update config before starting server
- props.put(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, Long.toString(20));
- props.put(QueryServices.DROP_METADATA_ATTRIB, Boolean.toString(true));
- setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
+ private List<KeyRange> getSplits(Connection conn, byte[] lowerRange, byte[] upperRange) throws SQLException {
+ return TestUtil.getSplits(conn, tableName, STABLE_PK_NAME, lowerRange, upperRange, null, "COUNT(*)");
}
@Test
public void testGetSplits() throws Exception {
Connection conn = DriverManager.getConnection(getUrl(), TEST_PROPERTIES);
- initTableValues(conn);
-
- PreparedStatement stmt = conn.prepareStatement("UPDATE STATISTICS STABLE");
+ byte[][] splits = new byte[][] {K3,K4,K9,K11};
+ createTable(conn, splits);
+ PreparedStatement stmt = conn.prepareStatement("upsert into " + tableName + " VALUES (?, ?)");
+ stmt.setString(1, new String(KMIN));
+ stmt.setInt(2, 1);
stmt.execute();
+ stmt.setString(1, new String(KMAX));
+ stmt.setInt(2, 2);
+ stmt.execute();
+ conn.commit();
+
+ conn.createStatement().execute("UPDATE STATISTICS " + tableName);
List<KeyRange> keyRanges;
- keyRanges = getAllSplits(conn);
+ keyRanges = getAllSplits(conn, tableName);
assertEquals("Unexpected number of splits: " + keyRanges, 7, keyRanges.size());
assertEquals(newKeyRange(KeyRange.UNBOUND, KMIN), keyRanges.get(0));
assertEquals(newKeyRange(KMIN, K3), keyRanges.get(1));
@@ -116,10 +126,10 @@ public class ParallelIteratorsIT extends BaseOwnClusterHBaseManagedTimeIT {
public void testServerNameOnScan() throws Exception {
Connection conn = DriverManager.getConnection(getUrl(), TEST_PROPERTIES);
byte[][] splits = new byte[][] { K3, K9, KR };
- ensureTableCreated(getUrl(), STABLE_NAME, STABLE_NAME, splits);
+ createTable(conn, splits);
PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class);
- ResultSet rs = stmt.executeQuery("SELECT * FROM " + STABLE_NAME + " LIMIT 1");
+ ResultSet rs = stmt.executeQuery("SELECT * FROM " + tableName + " LIMIT 1");
rs.next();
QueryPlan plan = stmt.getQueryPlan();
List<List<Scan>> nestedScans = plan.getScans();
@@ -138,56 +148,57 @@ public class ParallelIteratorsIT extends BaseOwnClusterHBaseManagedTimeIT {
public void testGuidePostsLifeCycle() throws Exception {
Connection conn = DriverManager.getConnection(getUrl(), TEST_PROPERTIES);
byte[][] splits = new byte[][] { K3, K9, KR };
- ensureTableCreated(getUrl(), STABLE_NAME, STABLE_NAME, splits);
+ createTable(conn, splits);
+
// create index
- conn.createStatement().execute("CREATE INDEX " + STABLE_INDEX + " ON " + STABLE_NAME + "( \"value\")");
+ conn.createStatement().execute("CREATE INDEX " + indexName + " ON " + tableName + "( \"value\")");
// before upserting
- List<KeyRange> keyRanges = getAllSplits(conn);
+ List<KeyRange> keyRanges = getAllSplits(conn, tableName);
assertEquals(4, keyRanges.size());
upsert(conn, new byte[][] { KMIN, K4, K11 });
// Analyze table alone
- analyzeTableColumns(conn);
- keyRanges = getAllSplits(conn);
+ analyzeTableColumns(conn, tableName);
+ keyRanges = getAllSplits(conn, tableName);
assertEquals(7, keyRanges.size());
// Get all splits on the index table before calling analyze on the index table
- List<KeyRange> indexSplits = getAllSplits(conn, STABLE_INDEX);
+ List<KeyRange> indexSplits = getAllSplits(conn, indexName);
assertEquals(1, indexSplits.size());
// Analyze the index table alone
- analyzeTableIndex(conn, STABLE_NAME);
+ analyzeTableIndex(conn, tableName);
// check the splits of the main table
- keyRanges = getAllSplits(conn);
+ keyRanges = getAllSplits(conn, tableName);
assertEquals(7, keyRanges.size());
// check the splits on the index table
- indexSplits = getAllSplits(conn, STABLE_INDEX);
+ indexSplits = getAllSplits(conn, indexName);
assertEquals(4, indexSplits.size());
upsert(conn, new byte[][] { KMIN2, K5, K12 });
// Update the stats for both the table and the index table
- analyzeTable(conn);
- keyRanges = getAllSplits(conn);
+ analyzeTable(conn, tableName);
+ keyRanges = getAllSplits(conn, tableName);
assertEquals(10, keyRanges.size());
// the above analyze should have udpated the index splits also
- indexSplits = getAllSplits(conn, STABLE_INDEX);
+ indexSplits = getAllSplits(conn, indexName);
assertEquals(7, indexSplits.size());
upsert(conn, new byte[][] { K1, K6, KP });
// Update only the table
- analyzeTableColumns(conn);
- keyRanges = getAllSplits(conn);
+ analyzeTableColumns(conn, tableName);
+ keyRanges = getAllSplits(conn, tableName);
assertEquals(13, keyRanges.size());
// No change to the index splits
- indexSplits = getAllSplits(conn, STABLE_INDEX);
+ indexSplits = getAllSplits(conn, indexName);
assertEquals(7, indexSplits.size());
- analyzeTableIndex(conn, STABLE_NAME);
- indexSplits = getAllSplits(conn, STABLE_INDEX);
+ analyzeTableIndex(conn, tableName);
+ indexSplits = getAllSplits(conn, indexName);
// the above analyze should have udpated the index splits only
assertEquals(10, indexSplits.size());
// No change in main table splits
- keyRanges = getAllSplits(conn);
+ keyRanges = getAllSplits(conn, tableName);
assertEquals(13, keyRanges.size());
conn.close();
}
- private static void upsert(Connection conn, byte[][] val) throws Exception {
- PreparedStatement stmt = conn.prepareStatement("upsert into " + STABLE_NAME + " VALUES (?, ?)");
+ private void upsert(Connection conn, byte[][] val) throws Exception {
+ PreparedStatement stmt = conn.prepareStatement("upsert into " + tableName + " VALUES (?, ?)");
stmt.setString(1, new String(val[0]));
stmt.setInt(2, 1);
stmt.execute();
@@ -204,16 +215,13 @@ public class ParallelIteratorsIT extends BaseOwnClusterHBaseManagedTimeIT {
return PChar.INSTANCE.getKeyRange(lowerRange, true, upperRange, false);
}
- private static void initTableValues(Connection conn) throws Exception {
- byte[][] splits = new byte[][] {K3,K4,K9,K11};
- ensureTableCreated(getUrl(),STABLE_NAME, STABLE_NAME, splits);
- PreparedStatement stmt = conn.prepareStatement("upsert into " + STABLE_NAME + " VALUES (?, ?)");
- stmt.setString(1, new String(KMIN));
- stmt.setInt(2, 1);
- stmt.execute();
- stmt.setString(1, new String(KMAX));
- stmt.setInt(2, 2);
+ private void createTable (Connection conn, byte[][] splits) throws SQLException {
+ PreparedStatement stmt = conn.prepareStatement("create table " + tableName +
+ " (id char(1) not null primary key,\n" +
+ " \"value\" integer) SPLIT ON (" + Joiner.on(',').join(Collections.nCopies(splits.length, "?")) + ")");
+ for (int i = 0; i < splits.length; i++) {
+ stmt.setBytes(i+1, splits[i]);
+ }
stmt.execute();
- conn.commit();
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelStatsEnabledIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelStatsEnabledIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelStatsEnabledIT.java
index 8ecb7b4..322cb9e 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelStatsEnabledIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelStatsEnabledIT.java
@@ -18,8 +18,15 @@
package org.apache.phoenix.end2end;
+import java.util.Map;
+
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.util.ReadOnlyProps;
+import org.junit.BeforeClass;
import org.junit.experimental.categories.Category;
+import com.google.common.collect.Maps;
+
/**
*
* Base class for tests that have statistics enabled.
@@ -27,5 +34,12 @@ import org.junit.experimental.categories.Category;
*/
@Category(ParallelStatsEnabledTest.class)
public abstract class ParallelStatsEnabledIT extends BaseParallelIT {
-
+
+ @BeforeClass
+ @Shadower(classBeingShadowed = BaseParallelIT.class)
+ public static void doSetup() throws Exception {
+ Map<String,String> props = Maps.newHashMapWithExpectedSize(5);
+ props.put(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, Long.toString(20));
+ setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
+ }
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryTimeoutIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryTimeoutIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryTimeoutIT.java
index 7450022..2d58615 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryTimeoutIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryTimeoutIT.java
@@ -48,7 +48,7 @@ import org.junit.Test;
import com.google.common.collect.Maps;
-public class QueryTimeoutIT extends BaseOwnClusterHBaseManagedTimeIT {
+public class QueryTimeoutIT extends BaseOwnClusterIT {
private static final String TEST_TABLE_NAME = "T";
@BeforeClass
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryWithLimitIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryWithLimitIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryWithLimitIT.java
index fae7a7c..cf8b0a9 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryWithLimitIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryWithLimitIT.java
@@ -43,7 +43,7 @@ import org.junit.Test;
import com.google.common.collect.Maps;
-public class QueryWithLimitIT extends BaseOwnClusterHBaseManagedTimeIT {
+public class QueryWithLimitIT extends BaseOwnClusterIT {
@BeforeClass
public static void doSetup() throws Exception {
@@ -61,7 +61,9 @@ public class QueryWithLimitIT extends BaseOwnClusterHBaseManagedTimeIT {
Properties props = PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES);
Connection conn = DriverManager.getConnection(getUrl(), props);
try {
- ensureTableCreated(getUrl(), KEYONLY_NAME, KEYONLY_NAME);
+ conn.createStatement().execute("create table " + KEYONLY_NAME + "\n" +
+ " (i1 integer not null, i2 integer not null\n" +
+ " CONSTRAINT pk PRIMARY KEY (i1,i2))");
initTableValues(conn, 100);
String query = "SELECT i1 FROM KEYONLY LIMIT 1";
@@ -85,7 +87,9 @@ public class QueryWithLimitIT extends BaseOwnClusterHBaseManagedTimeIT {
Properties props = PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES);
Connection conn = DriverManager.getConnection(getUrl(), props);
- ensureTableCreated(getUrl(), KEYONLY_NAME, KEYONLY_NAME);
+ conn.createStatement().execute("create table " + KEYONLY_NAME + "\n" +
+ " (i1 integer not null, i2 integer not null\n" +
+ " CONSTRAINT pk PRIMARY KEY (i1,i2))");
initTableValues(conn, 100);
conn.createStatement().execute("UPDATE STATISTICS " + KEYONLY_NAME);
[2/4] phoenix git commit: PHOENIX-3290 Move and/or combine as many
NeedsOwnCluster tests to bring down test run time
Posted by ja...@apache.org.
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDMLIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDMLIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDMLIT.java
index dbe767b..f26e6dd 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDMLIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDMLIT.java
@@ -28,13 +28,11 @@ import static org.junit.Assert.fail;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.ResultSet;
-import java.sql.SQLException;
import java.util.List;
import java.util.Properties;
import org.apache.phoenix.query.KeyRange;
import org.apache.phoenix.schema.TableNotFoundException;
-import org.apache.phoenix.util.PhoenixRuntime;
import org.apache.phoenix.util.PropertiesUtil;
import org.junit.Test;
@@ -43,23 +41,20 @@ public class TenantSpecificTablesDMLIT extends BaseTenantSpecificTablesIT {
@Test
public void testSelectWithLimit() throws Exception {
- Connection conn = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
+ Connection conn = DriverManager.getConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL, PropertiesUtil.deepCopy(TEST_PROPERTIES));
ResultSet rs = conn.createStatement().executeQuery("SELECT * FROM " + TENANT_TABLE_NAME + " LIMIT 100");
while(rs.next()) {}
}
@Test
public void testBasicUpsertSelect() throws Exception {
- Connection conn = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
+ Connection conn = DriverManager.getConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL, PropertiesUtil.deepCopy(TEST_PROPERTIES));
try {
conn.setAutoCommit(false);
conn.createStatement().executeUpdate("upsert into " + TENANT_TABLE_NAME + " (id, tenant_col) values (1, 'Cheap Sunglasses')");
conn.createStatement().executeUpdate("upsert into " + TENANT_TABLE_NAME + " (id, tenant_col) values (2, 'Viva Las Vegas')");
conn.commit();
- conn.close();
- conn = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
analyzeTable(conn, TENANT_TABLE_NAME);
- conn = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
ResultSet rs = conn.createStatement().executeQuery("select tenant_col from " + TENANT_TABLE_NAME + " where id = 1");
assertTrue("Expected 1 row in result set", rs.next());
assertEquals("Cheap Sunglasses", rs.getString(1));
@@ -72,49 +67,37 @@ public class TenantSpecificTablesDMLIT extends BaseTenantSpecificTablesIT {
@Test
public void testBasicUpsertSelect2() throws Exception {
- Connection conn1 = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
- createTestTable(PHOENIX_JDBC_TENANT_SPECIFIC_URL2, TENANT_TABLE_DDL, null, nextTimestamp());
- Connection conn2 = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL2);
+ Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+ Connection conn1 = DriverManager.getConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL, props);
+ createTestTable(PHOENIX_JDBC_TENANT_SPECIFIC_URL2, TENANT_TABLE_DDL);
+ Connection conn2 = DriverManager.getConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL2, props);
try {
conn1.setAutoCommit(false);
conn1.createStatement().executeUpdate("upsert into " + TENANT_TABLE_NAME + " values ('me','" + TENANT_TYPE_ID + "',1,'Cheap Sunglasses')");
conn1.createStatement().executeUpdate("upsert into " + TENANT_TABLE_NAME + " values ('you','" + TENANT_TYPE_ID +"',2,'Viva Las Vegas')");
conn1.commit();
- conn1 = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
analyzeTable(conn1, TENANT_TABLE_NAME);
- conn1 = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
conn2.setAutoCommit(true);
conn2.createStatement().executeUpdate("upsert into " + TENANT_TABLE_NAME + " values ('them','" + TENANT_TYPE_ID + "',1,'Long Hair')");
conn2.createStatement().executeUpdate("upsert into " + TENANT_TABLE_NAME + " values ('us','" + TENANT_TYPE_ID + "',2,'Black Hat')");
- conn2.close();
- conn1.close();
- conn1 = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
ResultSet rs = conn1.createStatement().executeQuery("select * from " + TENANT_TABLE_NAME + " where id = 1");
assertTrue("Expected 1 row in result set", rs.next());
assertEquals(1, rs.getInt(3));
assertEquals("Cheap Sunglasses", rs.getString(4));
assertFalse("Expected 1 row in result set", rs.next());
- conn2 = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL2);
analyzeTable(conn2, TENANT_TABLE_NAME);
- conn2 = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL2);
rs = conn2.createStatement().executeQuery("select * from " + TENANT_TABLE_NAME + " where id = 2");
assertTrue("Expected 1 row in result set", rs.next());
assertEquals(2, rs.getInt(3));
assertEquals("Black Hat", rs.getString(4));
assertFalse("Expected 1 row in result set", rs.next());
- conn2.close();
- conn1 = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
analyzeTable(conn1, TENANT_TABLE_NAME);
- conn1.close();
- conn2 = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL2);
conn2.createStatement().executeUpdate("upsert into " + TENANT_TABLE_NAME + " select * from " + TENANT_TABLE_NAME );
conn2.commit();
- conn2.close();
- conn2 = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL2);
rs = conn2.createStatement().executeQuery("select * from " + TENANT_TABLE_NAME);
assertTrue("Expected row in result set", rs.next());
assertEquals(1, rs.getInt(3));
@@ -123,14 +106,10 @@ public class TenantSpecificTablesDMLIT extends BaseTenantSpecificTablesIT {
assertEquals(2, rs.getInt(3));
assertEquals("Black Hat", rs.getString(4));
assertFalse("Expected 2 rows total", rs.next());
- conn2.close();
- conn2 = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL2);
conn2.setAutoCommit(true);;
conn2.createStatement().executeUpdate("upsert into " + TENANT_TABLE_NAME + " select 'all', tenant_type_id, id, 'Big ' || tenant_col from " + TENANT_TABLE_NAME );
- conn2.close();
- conn2 = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL2);
analyzeTable(conn2, TENANT_TABLE_NAME);
rs = conn2.createStatement().executeQuery("select * from " + TENANT_TABLE_NAME);
assertTrue("Expected row in result set", rs.next());
@@ -144,8 +123,6 @@ public class TenantSpecificTablesDMLIT extends BaseTenantSpecificTablesIT {
assertEquals(2, rs.getInt(3));
assertEquals("Big Black Hat", rs.getString(4));
assertFalse("Expected 2 rows total", rs.next());
- conn2.close();
- conn1 = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
rs = conn1.createStatement().executeQuery("select * from " + TENANT_TABLE_NAME);
assertTrue("Expected row row in result set", rs.next());
assertEquals(1, rs.getInt(3));
@@ -163,34 +140,23 @@ public class TenantSpecificTablesDMLIT extends BaseTenantSpecificTablesIT {
}
}
- private Connection nextConnection(String url) throws SQLException {
- Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
- props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(nextTimestamp()));
- return DriverManager.getConnection(url, props);
- }
-
@Test
public void testJoinWithGlobalTable() throws Exception {
- Connection conn = nextConnection(getUrl());
+ Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+ Connection conn = DriverManager.getConnection(getUrl(), props);
conn.createStatement().execute("create table foo (k INTEGER NOT NULL PRIMARY KEY)");
- conn.close();
- conn = nextConnection(getUrl());
conn.createStatement().execute("upsert into foo(k) values(1)");
conn.commit();
- conn.close();
- conn = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
+ conn = DriverManager.getConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL, props);
try {
conn.setAutoCommit(false);
conn.createStatement().executeUpdate("upsert into " + TENANT_TABLE_NAME + " (id, tenant_col) values (1, 'Cheap Sunglasses')");
conn.createStatement().executeUpdate("upsert into " + TENANT_TABLE_NAME + " (id, tenant_col) values (2, 'Viva Las Vegas')");
conn.commit();
- conn.close();
- conn = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
analyzeTable(conn, TENANT_TABLE_NAME);
- conn = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
ResultSet rs = conn.createStatement().executeQuery("select tenant_col from " + TENANT_TABLE_NAME + " join foo on k=id");
assertTrue("Expected 1 row in result set", rs.next());
assertEquals("Cheap Sunglasses", rs.getString(1));
@@ -203,20 +169,16 @@ public class TenantSpecificTablesDMLIT extends BaseTenantSpecificTablesIT {
@Test
public void testSelectOnlySeesTenantData() throws Exception {
- Connection conn = nextConnection(getUrl());
+ Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+ Connection conn = DriverManager.getConnection(getUrl(), props);
try {
conn.setAutoCommit(true);
conn.createStatement().executeUpdate("delete from " + PARENT_TABLE_NAME);
- conn.close();
-
- conn = nextConnection(getUrl());
- conn.setAutoCommit(true);
conn.createStatement().executeUpdate("upsert into " + PARENT_TABLE_NAME + " (tenant_id, tenant_type_id, id, user) values ('AC/DC', 'abc', 1, 'Bon Scott')");
conn.createStatement().executeUpdate("upsert into " + PARENT_TABLE_NAME + " (tenant_id, tenant_type_id, id, user) values ('" + TENANT_ID + "', '" + TENANT_TYPE_ID + "', 1, 'Billy Gibbons')");
conn.createStatement().executeUpdate("upsert into " + PARENT_TABLE_NAME + " (tenant_id, tenant_type_id, id, user) values ('" + TENANT_ID + "', 'def', 1, 'Billy Gibbons')");
- conn.close();
- conn = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
+ conn = DriverManager.getConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL, props);
ResultSet rs = conn.createStatement().executeQuery("select user from " + TENANT_TABLE_NAME);
assertTrue("Expected 1 row in result set", rs.next());
assertEquals("Billy Gibbons", rs.getString(1));
@@ -235,34 +197,24 @@ public class TenantSpecificTablesDMLIT extends BaseTenantSpecificTablesIT {
@Test
public void testDeleteOnlyDeletesTenantData() throws Exception {
- Connection conn = nextConnection(getUrl());
+ Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+ Connection conn = DriverManager.getConnection(getUrl(), props);
try {
conn.setAutoCommit(true);
conn.createStatement().executeUpdate("delete from " + PARENT_TABLE_NAME);
- conn.close();
-
- conn = nextConnection(getUrl());
- conn.setAutoCommit(true);
conn.createStatement().executeUpdate("upsert into " + PARENT_TABLE_NAME + " (tenant_id, tenant_type_id, id, user) values ('AC/DC', 'abc', 1, 'Bon Scott')");
conn.createStatement().executeUpdate("upsert into " + PARENT_TABLE_NAME + " (tenant_id, tenant_type_id, id, user) values ('" + TENANT_ID + "', '" + TENANT_TYPE_ID + "', 1, 'Billy Gibbons')");
conn.createStatement().executeUpdate("upsert into " + PARENT_TABLE_NAME + " (tenant_id, tenant_type_id, id, user) values ('" + TENANT_ID + "', 'def', 1, 'Billy Gibbons')");
- conn.close();
- conn = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
+ conn = DriverManager.getConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL, props);
conn.setAutoCommit(true);
int count = conn.createStatement().executeUpdate("delete from " + TENANT_TABLE_NAME);
assertEquals("Expected 1 row have been deleted", 1, count);
- conn.close();
-
- conn = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
- conn.setAutoCommit(true);
ResultSet rs = conn.createStatement().executeQuery("select * from " + TENANT_TABLE_NAME);
assertFalse("Expected no rows in result set", rs.next());
- conn.close();
- conn = nextConnection(getUrl());
+ conn = DriverManager.getConnection(getUrl(), props);
analyzeTable(conn, PARENT_TABLE_NAME);
- conn = nextConnection(getUrl());
rs = conn.createStatement().executeQuery("select count(*) from " + PARENT_TABLE_NAME);
rs.next();
assertEquals(2, rs.getInt(1));
@@ -274,38 +226,25 @@ public class TenantSpecificTablesDMLIT extends BaseTenantSpecificTablesIT {
@Test
public void testDeleteWhenImmutableIndex() throws Exception {
- Connection conn = nextConnection(getUrl());
+ Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+ Connection conn = DriverManager.getConnection(getUrl(), props);
try {
conn.setAutoCommit(true);
conn.createStatement().executeUpdate("delete from " + PARENT_TABLE_NAME);
- conn.close();
-
- conn = nextConnection(getUrl());
- conn.setAutoCommit(true);
conn.createStatement().executeUpdate("upsert into " + PARENT_TABLE_NAME + " (tenant_id, tenant_type_id, id, user) values ('AC/DC', 'abc', 1, 'Bon Scott')");
conn.createStatement().executeUpdate("upsert into " + PARENT_TABLE_NAME + " (tenant_id, tenant_type_id, id, user) values ('" + TENANT_ID + "', '" + TENANT_TYPE_ID + "', 1, 'Billy Gibbons')");
conn.createStatement().executeUpdate("upsert into " + PARENT_TABLE_NAME + " (tenant_id, tenant_type_id, id, user) values ('" + TENANT_ID + "', 'def', 1, 'Billy Gibbons')");
- conn.close();
- conn = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
- conn.createStatement().executeUpdate("create index idx1 on " + TENANT_TABLE_NAME + "(user)");
- conn.close();
-
- conn = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
- conn.setAutoCommit(true);
- int count = conn.createStatement().executeUpdate("delete from " + TENANT_TABLE_NAME + " where user='Billy Gibbons'");
+ Connection tsConn = DriverManager.getConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL, props);
+ tsConn.setAutoCommit(true);
+ tsConn.createStatement().executeUpdate("create index idx1 on " + TENANT_TABLE_NAME + "(user)");
+ int count = tsConn.createStatement().executeUpdate("delete from " + TENANT_TABLE_NAME + " where user='Billy Gibbons'");
assertEquals("Expected 1 row have been deleted", 1, count);
- conn.close();
-
- conn = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
- conn.setAutoCommit(true);
- ResultSet rs = conn.createStatement().executeQuery("select * from " + TENANT_TABLE_NAME);
+ ResultSet rs = tsConn.createStatement().executeQuery("select * from " + TENANT_TABLE_NAME);
assertFalse("Expected no rows in result set", rs.next());
- conn.close();
+ tsConn.close();
- conn = nextConnection(getUrl());
analyzeTable(conn, PARENT_TABLE_NAME);
- conn = nextConnection(getUrl());
rs = conn.createStatement().executeQuery("select count(*) from " + PARENT_TABLE_NAME);
rs.next();
assertEquals(2, rs.getInt(1));
@@ -317,30 +256,22 @@ public class TenantSpecificTablesDMLIT extends BaseTenantSpecificTablesIT {
@Test
public void testDeleteOnlyDeletesTenantDataWithNoTenantTypeId() throws Exception {
- Connection conn = nextConnection(getUrl());
+ Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+ Connection conn = DriverManager.getConnection(getUrl(), props);
try {
conn.setAutoCommit(true);
conn.createStatement().executeUpdate("delete from " + PARENT_TABLE_NAME_NO_TENANT_TYPE_ID);
- conn.close();
-
- conn = nextConnection(getUrl());
- conn.setAutoCommit(true);
conn.createStatement().executeUpdate("upsert into " + PARENT_TABLE_NAME_NO_TENANT_TYPE_ID + " (tenant_id, id, user) values ('AC/DC', 1, 'Bon Scott')");
conn.createStatement().executeUpdate("upsert into " + PARENT_TABLE_NAME_NO_TENANT_TYPE_ID + " (tenant_id, id, user) values ('" + TENANT_ID + "', 1, 'Billy Gibbons')");
conn.createStatement().executeUpdate("upsert into " + PARENT_TABLE_NAME_NO_TENANT_TYPE_ID + " (tenant_id, id, user) values ('" + TENANT_ID + "', 2, 'Billy Gibbons')");
- conn.close();
- conn = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
- conn.setAutoCommit(true);
- int count = conn.createStatement().executeUpdate("delete from " + TENANT_TABLE_NAME_NO_TENANT_TYPE_ID);
+ Connection tsConn = DriverManager.getConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL, props);
+ tsConn.setAutoCommit(true);
+ int count = tsConn.createStatement().executeUpdate("delete from " + TENANT_TABLE_NAME_NO_TENANT_TYPE_ID);
assertEquals("Expected 2 rows have been deleted", 2, count);
- conn.close();
- conn = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
- ResultSet rs = conn.createStatement().executeQuery("select * from " + TENANT_TABLE_NAME_NO_TENANT_TYPE_ID);
+ ResultSet rs = tsConn.createStatement().executeQuery("select * from " + TENANT_TABLE_NAME_NO_TENANT_TYPE_ID);
assertFalse("Expected no rows in result set", rs.next());
- conn.close();
- conn = nextConnection(getUrl());
rs = conn.createStatement().executeQuery("select count(*) from " + PARENT_TABLE_NAME_NO_TENANT_TYPE_ID);
rs.next();
assertEquals(1, rs.getInt(1));
@@ -352,146 +283,121 @@ public class TenantSpecificTablesDMLIT extends BaseTenantSpecificTablesIT {
@Test
public void testDeleteAllTenantTableData() throws Exception {
- Connection conn = nextConnection(getUrl());
+ Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+ Connection conn = DriverManager.getConnection(getUrl(), props);
+ Connection tsConn = DriverManager.getConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL, props);
try {
conn.setAutoCommit(true);
conn.createStatement().executeUpdate("delete from " + PARENT_TABLE_NAME);
- conn.close();
-
- conn = nextConnection(getUrl());
- conn.setAutoCommit(true);
conn.createStatement().executeUpdate("upsert into " + PARENT_TABLE_NAME + " (tenant_id, tenant_type_id, id, user) values ('AC/DC', 'abc', 1, 'Bon Scott')");
conn.createStatement().executeUpdate("upsert into " + PARENT_TABLE_NAME + " (tenant_id, tenant_type_id, id, user) values ('" + TENANT_ID + "', '" + TENANT_TYPE_ID + "', 1, 'Billy Gibbons')");
conn.createStatement().executeUpdate("upsert into " + PARENT_TABLE_NAME + " (tenant_id, tenant_type_id, id, user) values ('" + TENANT_ID + "', 'def', 1, 'Billy Gibbons')");
- conn.close();
- conn = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
- analyzeTable(conn, PARENT_TABLE_NAME);
- conn = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
- conn.createStatement().execute("delete from " + TENANT_TABLE_NAME);
- conn.commit();
- conn.close();
+ analyzeTable(tsConn, PARENT_TABLE_NAME);
+ tsConn.createStatement().execute("delete from " + TENANT_TABLE_NAME);
+ tsConn.commit();
- conn = nextConnection(getUrl());
ResultSet rs = conn.createStatement().executeQuery("select count(*) from " + PARENT_TABLE_NAME);
rs.next();
assertEquals(2, rs.getInt(1));
}
finally {
- conn.close();
+ if (conn != null) conn.close();
+ if (tsConn != null) tsConn.close();
}
}
@Test
public void testDropTenantTableDeletesNoData() throws Exception {
- Connection conn = nextConnection(getUrl());
+ Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+ Connection conn = DriverManager.getConnection(getUrl(), props);
+ Connection tsConn = DriverManager.getConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL, props);
try {
conn.setAutoCommit(true);
conn.createStatement().executeUpdate("delete from " + PARENT_TABLE_NAME_NO_TENANT_TYPE_ID);
- conn.close();
-
- conn = nextConnection(getUrl());
- conn.setAutoCommit(true);
conn.createStatement().executeUpdate("upsert into " + PARENT_TABLE_NAME_NO_TENANT_TYPE_ID + " (tenant_id, id, user) values ('AC/DC', 1, 'Bon Scott')");
conn.createStatement().executeUpdate("upsert into " + PARENT_TABLE_NAME_NO_TENANT_TYPE_ID + " (tenant_id, id, user) values ('" + TENANT_ID + "', 1, 'Billy Gibbons')");
conn.createStatement().executeUpdate("upsert into " + PARENT_TABLE_NAME_NO_TENANT_TYPE_ID + " (tenant_id, id, user) values ('" + TENANT_ID + "', 2, 'Billy Gibbons')");
- conn.close();
- conn = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
- conn.createStatement().execute("drop view " + TENANT_TABLE_NAME_NO_TENANT_TYPE_ID);
- conn.close();
+ tsConn.createStatement().execute("drop view " + TENANT_TABLE_NAME_NO_TENANT_TYPE_ID);
- conn = nextConnection(getUrl());
analyzeTable(conn, PARENT_TABLE_NAME_NO_TENANT_TYPE_ID);
- conn = nextConnection(getUrl());
ResultSet rs = conn.createStatement().executeQuery("select count(*) from " + PARENT_TABLE_NAME_NO_TENANT_TYPE_ID);
rs.next();
assertEquals(3, rs.getInt(1));
}
finally {
- conn.close();
+ if (conn != null) conn.close();
+ if (tsConn != null) tsConn.close();
}
}
@Test
public void testUpsertSelectOnlyUpsertsTenantData() throws Exception {
- Connection conn = nextConnection(getUrl());
+ Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+ Connection conn = DriverManager.getConnection(getUrl(), props);
+ Connection tsConn = DriverManager.getConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL, props);
try {
conn.setAutoCommit(true);
conn.createStatement().executeUpdate("delete from " + PARENT_TABLE_NAME);
- conn.close();
-
- conn = nextConnection(getUrl());
- conn.setAutoCommit(true);
conn.createStatement().executeUpdate("upsert into " + PARENT_TABLE_NAME + " (tenant_id, tenant_type_id, id, user) values ('AC/DC', 'aaa', 1, 'Bon Scott')");
conn.createStatement().executeUpdate("upsert into " + PARENT_TABLE_NAME + " (tenant_id, tenant_type_id, id, user) values ('" + TENANT_ID + "', '" + TENANT_TYPE_ID + "', 1, 'Billy Gibbons')");
conn.createStatement().executeUpdate("upsert into " + PARENT_TABLE_NAME + " (tenant_id, tenant_type_id, id, user) values ('" + TENANT_ID + "', 'def', 2, 'Billy Gibbons')");
- conn.close();
- conn = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
- analyzeTable(conn, TENANT_TABLE_NAME);
- conn = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
- conn.setAutoCommit(true);
- int count = conn.createStatement().executeUpdate("upsert into " + TENANT_TABLE_NAME + "(id, user) select id+100, user from " + TENANT_TABLE_NAME);
+ analyzeTable(tsConn, TENANT_TABLE_NAME);
+ int count = tsConn.createStatement().executeUpdate("upsert into " + TENANT_TABLE_NAME + "(id, user) select id+100, user from " + TENANT_TABLE_NAME);
+ tsConn.commit();
assertEquals("Expected 1 row to have been inserted", 1, count);
- conn.close();
- conn = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
- ResultSet rs = conn.createStatement().executeQuery("select count(*) from " + TENANT_TABLE_NAME);
+ ResultSet rs = tsConn.createStatement().executeQuery("select count(*) from " + TENANT_TABLE_NAME);
rs.next();
assertEquals(2, rs.getInt(1));
}
finally {
- conn.close();
+ if (conn != null) conn.close();
+ if (tsConn != null) tsConn.close();
}
}
@Test
public void testUpsertSelectOnlyUpsertsTenantDataWithDifferentTenantTable() throws Exception {
- createTestTable(PHOENIX_JDBC_TENANT_SPECIFIC_URL, "CREATE VIEW ANOTHER_TENANT_TABLE ( " +
- "tenant_col VARCHAR) AS SELECT * FROM " + PARENT_TABLE_NAME + " WHERE tenant_type_id = 'def'", null, nextTimestamp(), false);
+ String anotherTableName = "V_" + generateRandomString();
+ createTestTable(PHOENIX_JDBC_TENANT_SPECIFIC_URL, "CREATE VIEW " + anotherTableName + " ( " +
+ "tenant_col VARCHAR) AS SELECT * FROM " + PARENT_TABLE_NAME + " WHERE tenant_type_id = 'def'");
- Connection conn = nextConnection(getUrl());
+ Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+ Connection conn = DriverManager.getConnection(getUrl(), props);
+ Connection tsConn = DriverManager.getConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL, props);
try {
conn.setAutoCommit(true);
conn.createStatement().executeUpdate("delete from " + PARENT_TABLE_NAME);
- conn.close();
-
- conn = nextConnection(getUrl());
- conn.setAutoCommit(true);
conn.createStatement().executeUpdate("upsert into " + PARENT_TABLE_NAME + " (tenant_id, tenant_type_id, id, user) values ('AC/DC', 'aaa', 1, 'Bon Scott')");
conn.createStatement().executeUpdate("upsert into " + PARENT_TABLE_NAME + " (tenant_id, tenant_type_id, id, user) values ('" + TENANT_ID + "', '" + TENANT_TYPE_ID + "', 1, 'Billy Gibbons')");
conn.createStatement().executeUpdate("upsert into " + PARENT_TABLE_NAME + " (tenant_id, tenant_type_id, id, user) values ('" + TENANT_ID + "', 'def', 2, 'Billy Gibbons')");
- conn.close();
- conn = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
- analyzeTable(conn, TENANT_TABLE_NAME);
- conn = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
- conn.setAutoCommit(true);
- int count = conn.createStatement().executeUpdate("upsert into " + TENANT_TABLE_NAME + "(id, user) select id+100, user from ANOTHER_TENANT_TABLE where id=2");
+ analyzeTable(tsConn, TENANT_TABLE_NAME);
+ tsConn.setAutoCommit(true);
+ int count = tsConn.createStatement().executeUpdate("upsert into " + TENANT_TABLE_NAME + "(id, user)"
+ + "select id+100, user from " + anotherTableName + " where id=2");
assertEquals("Expected 1 row to have been inserted", 1, count);
- conn.close();
-
- conn = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
- ResultSet rs = conn.createStatement().executeQuery("select count(*) from " + TENANT_TABLE_NAME);
+ ResultSet rs = tsConn.createStatement().executeQuery("select count(*) from " + TENANT_TABLE_NAME);
rs.next();
assertEquals(2, rs.getInt(1));
}
finally {
- conn.close();
+ if (conn != null) conn.close();
+ if (tsConn != null) tsConn.close();
}
}
@Test
public void testUpsertValuesOnlyUpsertsTenantData() throws Exception {
- Connection conn = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
+ Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+ Connection conn = DriverManager.getConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL, props);
try {
- conn.setAutoCommit(true);
int count = conn.createStatement().executeUpdate("upsert into " + TENANT_TABLE_NAME + " (id, user) values (1, 'Bon Scott')");
+ conn.commit();
assertEquals("Expected 1 row to have been inserted", 1, count);
- conn.close();
-
- conn = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
ResultSet rs = conn.createStatement().executeQuery("select count(*) from " + TENANT_TABLE_NAME);
rs.next();
assertEquals(1, rs.getInt(1));
@@ -503,19 +409,14 @@ public class TenantSpecificTablesDMLIT extends BaseTenantSpecificTablesIT {
@Test
public void testBaseTableCanBeUsedInStatementsInMultitenantConnections() throws Exception {
- Connection conn = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
+ Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+ Connection conn = DriverManager.getConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL, props);
try {
ResultSet rs = conn.createStatement().executeQuery("select * from " + PARENT_TABLE_NAME);
assertFalse(rs.next());
- conn.close();
-
- conn = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
- conn.setAutoCommit(true);
conn.createStatement().executeUpdate("upsert into " + PARENT_TABLE_NAME + " (tenant_type_id, id, user) values ('" + TENANT_TYPE_ID + "', 1, 'Billy Gibbons')");
- conn.close();
- conn = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
+ conn.commit();
analyzeTable(conn, PARENT_TABLE_NAME);
- conn = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
rs = conn.createStatement().executeQuery("select user from " + PARENT_TABLE_NAME);
assertTrue(rs.next());
assertEquals(rs.getString(1),"Billy Gibbons");
@@ -528,7 +429,8 @@ public class TenantSpecificTablesDMLIT extends BaseTenantSpecificTablesIT {
@Test
public void testTenantTableCannotBeUsedInStatementsInNonMultitenantConnections() throws Exception {
- Connection conn = nextConnection(getUrl());
+ Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+ Connection conn = DriverManager.getConnection(getUrl(), props);
try {
try {
conn.createStatement().execute("select * from " + TENANT_TABLE_NAME);
@@ -543,16 +445,17 @@ public class TenantSpecificTablesDMLIT extends BaseTenantSpecificTablesIT {
@Test
public void testUpsertValuesUsingViewWithNoWhereClause() throws Exception {
- Connection conn = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
- conn.setAutoCommit(true);
- conn.createStatement().executeUpdate("upsert into " + TENANT_TABLE_NAME_NO_TENANT_TYPE_ID + " (id) values (0)");
- conn.close();
-
- conn = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
- ResultSet rs = conn.createStatement().executeQuery("select id from " + TENANT_TABLE_NAME_NO_TENANT_TYPE_ID);
- assertTrue(rs.next());
- assertEquals(0, rs.getInt(1));
- assertFalse(rs.next());
- conn.close();
+ Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+ Connection conn = DriverManager.getConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL, props);
+ try {
+ conn.createStatement().executeUpdate("upsert into " + TENANT_TABLE_NAME_NO_TENANT_TYPE_ID + " (id) values (0)");
+ conn.commit();
+ ResultSet rs = conn.createStatement().executeQuery("select id from " + TENANT_TABLE_NAME_NO_TENANT_TYPE_ID);
+ assertTrue(rs.next());
+ assertEquals(0, rs.getInt(1));
+ assertFalse(rs.next());
+ } finally {
+ conn.close();
+ }
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TransactionalViewIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TransactionalViewIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TransactionalViewIT.java
index e5cd578..95e7266 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TransactionalViewIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TransactionalViewIT.java
@@ -30,14 +30,21 @@ import java.util.Map;
import org.apache.phoenix.query.KeyRange;
import org.apache.phoenix.query.QueryServices;
import org.apache.phoenix.util.ReadOnlyProps;
+import org.apache.phoenix.util.SchemaUtil;
+import org.apache.phoenix.util.TestUtil;
+import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import com.google.common.collect.Maps;
-public class TransactionalViewIT extends BaseOwnClusterHBaseManagedTimeIT {
+public class TransactionalViewIT extends ParallelStatsEnabledIT {
+
+ private String fullTableName;
+ private String fullViewName;
@BeforeClass
+ @Shadower(classBeingShadowed = ParallelStatsEnabledIT.class)
public static void doSetup() throws Exception {
Map<String,String> props = Maps.newHashMapWithExpectedSize(3);
props.put(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, Integer.toString(20));
@@ -45,36 +52,46 @@ public class TransactionalViewIT extends BaseOwnClusterHBaseManagedTimeIT {
setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
}
+ @Before
+ public void generateTableNames() {
+ String schemaName = TestUtil.DEFAULT_SCHEMA_NAME;
+ String tableName = "T_" + generateRandomString();
+ fullTableName = SchemaUtil.getTableName(schemaName, tableName);
+ String viewName = "V_" + generateRandomString();
+ fullViewName = SchemaUtil.getTableName(schemaName, viewName);
+ }
+
@Test
public void testReadOwnWritesWithStats() throws Exception {
try (Connection conn1 = DriverManager.getConnection(getUrl());
Connection conn2 = DriverManager.getConnection(getUrl())) {
- String ddl = "CREATE TABLE t (k INTEGER NOT NULL PRIMARY KEY, v1 DATE) TRANSACTIONAL=true";
+ String ddl = "CREATE TABLE " + fullTableName
+ + " (k INTEGER NOT NULL PRIMARY KEY, v1 DATE) TRANSACTIONAL=true";
conn1.createStatement().execute(ddl);
- ddl = "CREATE VIEW v (v2 VARCHAR) AS SELECT * FROM t where k>5";
+ ddl = "CREATE VIEW " + fullViewName + " (v2 VARCHAR) AS SELECT * FROM " + fullTableName + " where k>5";
conn1.createStatement().execute(ddl);
for (int i = 0; i < 10; i++) {
- conn1.createStatement().execute("UPSERT INTO t VALUES(" + i + ")");
+ conn1.createStatement().execute("UPSERT INTO " + fullTableName + " VALUES(" + i + ")");
}
// verify you can read your own writes
int count = 0;
- ResultSet rs = conn1.createStatement().executeQuery("SELECT k FROM t");
+ ResultSet rs = conn1.createStatement().executeQuery("SELECT k FROM " + fullTableName);
while (rs.next()) {
assertEquals(count++, rs.getInt(1));
}
assertEquals(10, count);
count = 0;
- rs = conn1.createStatement().executeQuery("SELECT k FROM v");
+ rs = conn1.createStatement().executeQuery("SELECT k FROM " + fullViewName);
while (rs.next()) {
assertEquals(6+count++, rs.getInt(1));
}
assertEquals(4, count);
// verify stats can see the read own writes rows
- analyzeTable(conn2, "v", true);
- List<KeyRange> splits = getAllSplits(conn2, "v");
+ analyzeTable(conn2, fullViewName, true);
+ List<KeyRange> splits = getAllSplits(conn2, fullViewName);
assertEquals(4, splits.size());
}
}
@@ -83,24 +100,25 @@ public class TransactionalViewIT extends BaseOwnClusterHBaseManagedTimeIT {
public void testInvalidRowsWithStats() throws Exception {
try (Connection conn1 = DriverManager.getConnection(getUrl());
Connection conn2 = DriverManager.getConnection(getUrl())) {
- String ddl = "CREATE TABLE t (k INTEGER NOT NULL PRIMARY KEY, v1 DATE) TRANSACTIONAL=true";
+ String ddl = "CREATE TABLE " + fullTableName
+ + " (k INTEGER NOT NULL PRIMARY KEY, v1 DATE) TRANSACTIONAL=true";
conn1.createStatement().execute(ddl);
- ddl = "CREATE VIEW v (v2 VARCHAR) AS SELECT * FROM t where k>5";
+ ddl = "CREATE VIEW " + fullViewName + " (v2 VARCHAR) AS SELECT * FROM " + fullTableName + " where k>5";
conn1.createStatement().execute(ddl);
for (int i = 0; i < 10; i++) {
- conn1.createStatement().execute("UPSERT INTO t VALUES(" + i + ")");
+ conn1.createStatement().execute("UPSERT INTO " + fullTableName + " VALUES(" + i + ")");
}
// verify you can read your own writes
int count = 0;
- ResultSet rs = conn1.createStatement().executeQuery("SELECT k FROM t");
+ ResultSet rs = conn1.createStatement().executeQuery("SELECT k FROM " + fullTableName);
while (rs.next()) {
assertEquals(count++, rs.getInt(1));
}
assertEquals(10, count);
count = 0;
- rs = conn1.createStatement().executeQuery("SELECT k FROM v");
+ rs = conn1.createStatement().executeQuery("SELECT k FROM " + fullViewName);
while (rs.next()) {
assertEquals(6+count++, rs.getInt(1));
}
@@ -110,8 +128,8 @@ public class TransactionalViewIT extends BaseOwnClusterHBaseManagedTimeIT {
// assertEquals("There should be one invalid transaction", 1, txManager.getInvalidSize());
// verify stats can see the rows from the invalid transaction
- analyzeTable(conn2, "v", true);
- List<KeyRange> splits = getAllSplits(conn2, "v");
+ analyzeTable(conn2, fullViewName, true);
+ List<KeyRange> splits = getAllSplits(conn2, fullViewName);
assertEquals(4, splits.size());
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java
index bdd94a2..84fa217 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java
@@ -34,6 +34,8 @@ import java.sql.DriverManager;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
+import java.util.Arrays;
+import java.util.Collections;
import java.util.Properties;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicBoolean;
@@ -62,24 +64,30 @@ import org.apache.phoenix.util.MetaDataUtil;
import org.apache.phoenix.util.PhoenixRuntime;
import org.apache.phoenix.util.SchemaUtil;
import org.apache.phoenix.util.UpgradeUtil;
+import org.junit.Before;
import org.junit.Test;
-public class UpgradeIT extends BaseHBaseManagedTimeIT {
+public class UpgradeIT extends ParallelStatsDisabledIT {
- private static String TENANT_ID = "tenantId";
+ private String tenantId;
+
+ @Before
+ public void generateTenantId() {
+ tenantId = "T_" + generateRandomString();
+ }
@Test
public void testUpgradeForTenantViewWithSameColumnsAsBaseTable() throws Exception {
String tableWithViewName = generateRandomString();
String viewTableName = generateRandomString();
- testViewUpgrade(true, TENANT_ID, null, tableWithViewName + "1", null, viewTableName + "1", ColumnDiff.EQUAL);
- testViewUpgrade(true, TENANT_ID, "TABLESCHEMA", tableWithViewName + "", null, viewTableName + "2",
+ testViewUpgrade(true, tenantId, null, tableWithViewName + "1", null, viewTableName + "1", ColumnDiff.EQUAL);
+ testViewUpgrade(true, tenantId, "TABLESCHEMA", tableWithViewName + "", null, viewTableName + "2",
ColumnDiff.EQUAL);
- testViewUpgrade(true, TENANT_ID, null, tableWithViewName + "3", viewTableName + "SCHEMA", viewTableName + "3",
+ testViewUpgrade(true, tenantId, null, tableWithViewName + "3", viewTableName + "SCHEMA", viewTableName + "3",
ColumnDiff.EQUAL);
- testViewUpgrade(true, TENANT_ID, "TABLESCHEMA", tableWithViewName + "4", viewTableName + "SCHEMA", viewTableName + "4",
+ testViewUpgrade(true, tenantId, "TABLESCHEMA", tableWithViewName + "4", viewTableName + "SCHEMA", viewTableName + "4",
ColumnDiff.EQUAL);
- testViewUpgrade(true, TENANT_ID, "SAMESCHEMA", tableWithViewName + "5", "SAMESCHEMA", viewTableName + "5",
+ testViewUpgrade(true, tenantId, "SAMESCHEMA", tableWithViewName + "5", "SAMESCHEMA", viewTableName + "5",
ColumnDiff.EQUAL);
}
@@ -87,14 +95,14 @@ public class UpgradeIT extends BaseHBaseManagedTimeIT {
public void testUpgradeForTenantViewWithMoreColumnsThanBaseTable() throws Exception {
String tableWithViewName = generateRandomString();
String viewTableName = generateRandomString();
- testViewUpgrade(true, TENANT_ID, null, tableWithViewName + "1", null, viewTableName + "1", ColumnDiff.MORE);
- testViewUpgrade(true, TENANT_ID, "TABLESCHEMA", tableWithViewName + "", null, viewTableName + "2",
+ testViewUpgrade(true, tenantId, null, tableWithViewName + "1", null, viewTableName + "1", ColumnDiff.MORE);
+ testViewUpgrade(true, tenantId, "TABLESCHEMA", tableWithViewName + "", null, viewTableName + "2",
ColumnDiff.MORE);
- testViewUpgrade(true, TENANT_ID, null, tableWithViewName + "3", "VIEWSCHEMA", viewTableName + "3",
+ testViewUpgrade(true, tenantId, null, tableWithViewName + "3", "VIEWSCHEMA", viewTableName + "3",
ColumnDiff.MORE);
- testViewUpgrade(true, TENANT_ID, "TABLESCHEMA", tableWithViewName + "4", "VIEWSCHEMA", viewTableName + "4",
+ testViewUpgrade(true, tenantId, "TABLESCHEMA", tableWithViewName + "4", "VIEWSCHEMA", viewTableName + "4",
ColumnDiff.MORE);
- testViewUpgrade(true, TENANT_ID, "SAMESCHEMA", tableWithViewName + "5", "SAMESCHEMA", viewTableName + "5",
+ testViewUpgrade(true, tenantId, "SAMESCHEMA", tableWithViewName + "5", "SAMESCHEMA", viewTableName + "5",
ColumnDiff.MORE);
}
@@ -130,14 +138,14 @@ public class UpgradeIT extends BaseHBaseManagedTimeIT {
public void testSettingBaseColumnCountWhenBaseTableColumnDropped() throws Exception {
String tableWithViewName = generateRandomString();
String viewTableName = generateRandomString();
- testViewUpgrade(true, TENANT_ID, null, tableWithViewName + "1", null, viewTableName + "1", ColumnDiff.MORE);
- testViewUpgrade(true, TENANT_ID, "TABLESCHEMA", tableWithViewName + "", null, viewTableName + "2",
+ testViewUpgrade(true, tenantId, null, tableWithViewName + "1", null, viewTableName + "1", ColumnDiff.MORE);
+ testViewUpgrade(true, tenantId, "TABLESCHEMA", tableWithViewName + "", null, viewTableName + "2",
ColumnDiff.LESS);
- testViewUpgrade(true, TENANT_ID, null, tableWithViewName + "3", "VIEWSCHEMA", viewTableName + "3",
+ testViewUpgrade(true, tenantId, null, tableWithViewName + "3", "VIEWSCHEMA", viewTableName + "3",
ColumnDiff.LESS);
- testViewUpgrade(true, TENANT_ID, "TABLESCHEMA", tableWithViewName + "4", "VIEWSCHEMA", viewTableName + "4",
+ testViewUpgrade(true, tenantId, "TABLESCHEMA", tableWithViewName + "4", "VIEWSCHEMA", viewTableName + "4",
ColumnDiff.LESS);
- testViewUpgrade(true, TENANT_ID, "SAMESCHEMA", tableWithViewName + "5", "SAMESCHEMA", viewTableName + "5",
+ testViewUpgrade(true, tenantId, "SAMESCHEMA", tableWithViewName + "5", "SAMESCHEMA", viewTableName + "5",
ColumnDiff.LESS);
}
@@ -385,19 +393,24 @@ public class UpgradeIT extends BaseHBaseManagedTimeIT {
@Test
public void testSettingBaseColumnCountForMultipleViewsOnTable() throws Exception {
- String baseSchema = "XYZ";
- String baseTable = "BASE_TABLE";
+ String baseSchema = "S_" + generateRandomString();
+ String baseTable = "T_" + generateRandomString();
String fullBaseTableName = SchemaUtil.getTableName(baseSchema, baseTable);
try (Connection conn = DriverManager.getConnection(getUrl())) {
String baseTableDDL = "CREATE TABLE " + fullBaseTableName + " (TENANT_ID VARCHAR NOT NULL, PK1 VARCHAR NOT NULL, V1 INTEGER, V2 INTEGER CONSTRAINT NAME_PK PRIMARY KEY(TENANT_ID, PK1)) MULTI_TENANT = true";
conn.createStatement().execute(baseTableDDL);
- for (int i = 1; i <=2; i++) {
+ String[] tenants = new String[] {"T_" + generateRandomString(), "T_" + generateRandomString()};
+ Collections.sort(Arrays.asList(tenants));
+ String[] tenantViews = new String[] {"V_" + generateRandomString(), "V_" + generateRandomString(), "V_" + generateRandomString()};
+ Collections.sort(Arrays.asList(tenantViews));
+ String[] globalViews = new String[] {"G_" + generateRandomString(), "G_" + generateRandomString(), "G_" + generateRandomString()};
+ Collections.sort(Arrays.asList(globalViews));
+ for (int i = 0; i < 2; i++) {
// Create views for tenants;
- String tenant = "tenant" + i;
+ String tenant = tenants[i];
try (Connection tenantConn = createTenantConnection(tenant)) {
- String view = "TENANT_VIEW1";
-
+ String view = tenantViews[0];
// view with its own column
String viewDDL = "CREATE VIEW " + view + " AS SELECT * FROM " + fullBaseTableName;
tenantConn.createStatement().execute(viewDDL);
@@ -406,7 +419,7 @@ public class UpgradeIT extends BaseHBaseManagedTimeIT {
removeBaseColumnCountKV(tenant, null, view);
// view that has the last base table column removed
- view = "TENANT_VIEW2";
+ view = tenantViews[1];
viewDDL = "CREATE VIEW " + view + " AS SELECT * FROM " + fullBaseTableName;
tenantConn.createStatement().execute(viewDDL);
String droplastBaseCol = "ALTER VIEW " + view + " DROP COLUMN V2";
@@ -414,7 +427,7 @@ public class UpgradeIT extends BaseHBaseManagedTimeIT {
removeBaseColumnCountKV(tenant, null, view);
// view that has the middle base table column removed
- view = "TENANT_VIEW3";
+ view = tenantViews[2];
viewDDL = "CREATE VIEW " + view + " AS SELECT * FROM " + fullBaseTableName;
tenantConn.createStatement().execute(viewDDL);
String dropMiddileBaseCol = "ALTER VIEW " + view + " DROP COLUMN V1";
@@ -425,47 +438,46 @@ public class UpgradeIT extends BaseHBaseManagedTimeIT {
// create global views
try (Connection globalConn = DriverManager.getConnection(getUrl())) {
- String view = "GLOBAL_VIEW1";
-
+ String globalView = globalViews[0];
// view with its own column
- String viewDDL = "CREATE VIEW " + view + " AS SELECT * FROM " + fullBaseTableName;
+ String viewDDL = "CREATE VIEW " + globalView + " AS SELECT * FROM " + fullBaseTableName;
globalConn.createStatement().execute(viewDDL);
- String addCols = "ALTER VIEW " + view + " ADD COL1 VARCHAR ";
+ String addCols = "ALTER VIEW " + globalView + " ADD COL1 VARCHAR ";
globalConn.createStatement().execute(addCols);
- removeBaseColumnCountKV(null, null, view);
+ removeBaseColumnCountKV(null, null, globalView);
// view that has the last base table column removed
- view = "GLOBAL_VIEW2";
- viewDDL = "CREATE VIEW " + view + " AS SELECT * FROM " + fullBaseTableName;
+ globalView = globalViews[1];
+ viewDDL = "CREATE VIEW " + globalView + " AS SELECT * FROM " + fullBaseTableName;
globalConn.createStatement().execute(viewDDL);
- String droplastBaseCol = "ALTER VIEW " + view + " DROP COLUMN V2";
+ String droplastBaseCol = "ALTER VIEW " + globalView + " DROP COLUMN V2";
globalConn.createStatement().execute(droplastBaseCol);
- removeBaseColumnCountKV(null, null, view);
+ removeBaseColumnCountKV(null, null, globalView);
// view that has the middle base table column removed
- view = "GLOBAL_VIEW3";
- viewDDL = "CREATE VIEW " + view + " AS SELECT * FROM " + fullBaseTableName;
+ globalView = globalViews[2];
+ viewDDL = "CREATE VIEW " + globalView + " AS SELECT * FROM " + fullBaseTableName;
globalConn.createStatement().execute(viewDDL);
- String dropMiddileBaseCol = "ALTER VIEW " + view + " DROP COLUMN V1";
+ String dropMiddileBaseCol = "ALTER VIEW " + globalView + " DROP COLUMN V1";
globalConn.createStatement().execute(dropMiddileBaseCol);
- removeBaseColumnCountKV(null, null, view);
+ removeBaseColumnCountKV(null, null, globalView);
}
// run upgrade
UpgradeUtil.upgradeTo4_5_0(conn.unwrap(PhoenixConnection.class));
// Verify base column counts for tenant specific views
- for (int i = 1; i <=2 ; i++) {
- String tenantId = "tenant" + i;
- checkBaseColumnCount(tenantId, null, "TENANT_VIEW1", 4);
- checkBaseColumnCount(tenantId, null, "TENANT_VIEW2", DIVERGED_VIEW_BASE_COLUMN_COUNT);
- checkBaseColumnCount(tenantId, null, "TENANT_VIEW3", DIVERGED_VIEW_BASE_COLUMN_COUNT);
+ for (int i = 0; i < 2 ; i++) {
+ String tenantId = tenants[i];
+ checkBaseColumnCount(tenantId, null, tenantViews[0], 4);
+ checkBaseColumnCount(tenantId, null, tenantViews[1], DIVERGED_VIEW_BASE_COLUMN_COUNT);
+ checkBaseColumnCount(tenantId, null, tenantViews[2], DIVERGED_VIEW_BASE_COLUMN_COUNT);
}
// Verify base column count for global views
- checkBaseColumnCount(null, null, "GLOBAL_VIEW1", 4);
- checkBaseColumnCount(null, null, "GLOBAL_VIEW2", DIVERGED_VIEW_BASE_COLUMN_COUNT);
- checkBaseColumnCount(null, null, "GLOBAL_VIEW3", DIVERGED_VIEW_BASE_COLUMN_COUNT);
+ checkBaseColumnCount(null, null, globalViews[0], 4);
+ checkBaseColumnCount(null, null, globalViews[1], DIVERGED_VIEW_BASE_COLUMN_COUNT);
+ checkBaseColumnCount(null, null, globalViews[2], DIVERGED_VIEW_BASE_COLUMN_COUNT);
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UserDefinedFunctionsIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UserDefinedFunctionsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UserDefinedFunctionsIT.java
index 2565223..9f53d12 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UserDefinedFunctionsIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UserDefinedFunctionsIT.java
@@ -65,12 +65,13 @@ import org.apache.phoenix.schema.ValueRangeExcpetion;
import org.apache.phoenix.util.PhoenixRuntime;
import org.apache.phoenix.util.QueryUtil;
import org.apache.phoenix.util.ReadOnlyProps;
+import org.junit.After;
import org.junit.BeforeClass;
import org.junit.Test;
import com.google.common.collect.Maps;
-public class UserDefinedFunctionsIT extends BaseOwnClusterIT{
+public class UserDefinedFunctionsIT extends BaseOwnClusterIT {
protected static final String TENANT_ID = "ZZTop";
private static String url;
@@ -190,6 +191,10 @@ public class UserDefinedFunctionsIT extends BaseOwnClusterIT{
private static Properties EMPTY_PROPS = new Properties();
+ @Override
+ @After
+ public void cleanUpAfterTest() throws Exception {}
+
private static String getProgram(String className, String evaluateMethod, String returnType) {
return new StringBuffer()
.append("package org.apache.phoenix.end2end;\n")