You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by ss...@apache.org on 2017/11/15 06:12:57 UTC
[1/3] phoenix git commit: PHOENIX-4321 Replace deprecated HBaseAdmin
with Admin
Repository: phoenix
Updated Branches:
refs/heads/5.x-HBase-2.0 c85e06581 -> 693fa6598
http://git-wip-us.apache.org/repos/asf/phoenix/blob/693fa659/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java
index f6e00cc..b302210 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java
@@ -47,8 +47,8 @@ import org.apache.hadoop.hbase.regionserver.Region;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.VersionInfo;
+import org.apache.phoenix.hbase.index.IndexTableName;
import org.apache.phoenix.hbase.index.StubAbortable;
-import org.apache.phoenix.hbase.index.TableName;
import org.apache.phoenix.hbase.index.exception.IndexWriteException;
import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
import org.junit.Rule;
@@ -60,7 +60,7 @@ import org.mockito.stubbing.Answer;
public class TestIndexWriter {
private static final Log LOG = LogFactory.getLog(TestIndexWriter.class);
@Rule
- public TableName testName = new TableName();
+ public IndexTableName testName = new IndexTableName();
private final byte[] row = Bytes.toBytes("row");
@Test
http://git-wip-us.apache.org/repos/asf/phoenix/blob/693fa659/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleIndexWriter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleIndexWriter.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleIndexWriter.java
index 8573fb1..1fe0342 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleIndexWriter.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleIndexWriter.java
@@ -37,8 +37,8 @@ import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.VersionInfo;
+import org.apache.phoenix.hbase.index.IndexTableName;
import org.apache.phoenix.hbase.index.StubAbortable;
-import org.apache.phoenix.hbase.index.TableName;
import org.apache.phoenix.hbase.index.table.HTableInterfaceReference;
import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
import org.junit.Rule;
@@ -54,7 +54,7 @@ public class TestParalleIndexWriter {
private static final Log LOG = LogFactory.getLog(TestParalleIndexWriter.class);
@Rule
- public TableName test = new TableName();
+ public IndexTableName test = new IndexTableName();
private final byte[] row = Bytes.toBytes("row");
@Test
http://git-wip-us.apache.org/repos/asf/phoenix/blob/693fa659/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleWriterIndexCommitter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleWriterIndexCommitter.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleWriterIndexCommitter.java
index 2377ff1..79bc295 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleWriterIndexCommitter.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleWriterIndexCommitter.java
@@ -37,8 +37,8 @@ import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.VersionInfo;
+import org.apache.phoenix.hbase.index.IndexTableName;
import org.apache.phoenix.hbase.index.StubAbortable;
-import org.apache.phoenix.hbase.index.TableName;
import org.apache.phoenix.hbase.index.table.HTableInterfaceReference;
import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
import org.junit.Rule;
@@ -54,7 +54,7 @@ public class TestParalleWriterIndexCommitter {
private static final Log LOG = LogFactory.getLog(TestParalleWriterIndexCommitter.class);
@Rule
- public TableName test = new TableName();
+ public IndexTableName test = new IndexTableName();
private final byte[] row = Bytes.toBytes("row");
@Test
http://git-wip-us.apache.org/repos/asf/phoenix/blob/693fa659/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java
index 62cb24e..4f1eea6 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java
@@ -39,9 +39,9 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
@@ -58,9 +58,9 @@ import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
+import org.apache.phoenix.hbase.index.IndexTableName;
import org.apache.phoenix.hbase.index.IndexTestingUtils;
import org.apache.phoenix.hbase.index.Indexer;
-import org.apache.phoenix.hbase.index.TableName;
import org.apache.phoenix.hbase.index.covered.ColumnGroup;
import org.apache.phoenix.hbase.index.covered.CoveredColumn;
import org.apache.phoenix.hbase.index.covered.CoveredColumnIndexSpecifierBuilder;
@@ -90,7 +90,7 @@ public class TestWALRecoveryCaching {
private static final long TIMEOUT = ONE_MIN;
@Rule
- public TableName testTable = new TableName();
+ public IndexTableName testTable = new IndexTableName();
private String getIndexTableName() {
return this.testTable.getTableNameString() + "_index";
@@ -161,7 +161,7 @@ public class TestWALRecoveryCaching {
// start the cluster with 2 rs
util.startMiniCluster(2);
- HBaseAdmin admin = util.getHBaseAdmin();
+ Admin admin = util.getHBaseAdmin();
// setup the index
byte[] family = Bytes.toBytes("family");
byte[] qual = Bytes.toBytes("qualifier");
http://git-wip-us.apache.org/repos/asf/phoenix/blob/693fa659/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
index 326efa3..1133826 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
@@ -115,7 +115,7 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.IntegrationTestingUtility;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.ipc.PhoenixRpcSchedulerFactory;
import org.apache.hadoop.hbase.regionserver.RSRpcServices;
import org.apache.hadoop.hbase.util.Bytes;
@@ -1524,7 +1524,7 @@ public abstract class BaseTest {
*/
private static void disableAndDropNonSystemTables() throws Exception {
if (driver == null) return;
- HBaseAdmin admin = driver.getConnectionQueryServices(null, null).getAdmin();
+ Admin admin = driver.getConnectionQueryServices(null, null).getAdmin();
try {
HTableDescriptor[] tables = admin.listTables();
for (HTableDescriptor table : tables) {
@@ -1538,7 +1538,7 @@ public abstract class BaseTest {
}
}
- private static void disableAndDropTable(final HBaseAdmin admin, final TableName tableName)
+ private static void disableAndDropTable(final Admin admin, final TableName tableName)
throws Exception {
Future<Void> future = null;
boolean success = false;
http://git-wip-us.apache.org/repos/asf/phoenix/blob/693fa659/phoenix-core/src/test/java/org/apache/phoenix/query/ConnectionQueryServicesImplTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/ConnectionQueryServicesImplTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/ConnectionQueryServicesImplTest.java
index 4708ffb..0570826 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/query/ConnectionQueryServicesImplTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/query/ConnectionQueryServicesImplTest.java
@@ -32,7 +32,7 @@ import java.util.List;
import java.util.Map;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.phoenix.exception.PhoenixIOException;
import org.apache.phoenix.util.ReadOnlyProps;
@@ -52,7 +52,7 @@ public class ConnectionQueryServicesImplTest {
doNothing().when(cqs).createSysMutexTable(any(HBaseAdmin.class), any(ReadOnlyProps.class));
// Spoof out this call so that ensureSystemTablesUpgrade() will return-fast.
- when(cqs.getSystemTableNames(any(HBaseAdmin.class))).thenReturn(Collections.<TableName> emptyList());
+ when(cqs.getSystemTableNames(any(Admin.class))).thenReturn(Collections.<TableName> emptyList());
// Throw a special exception to check on later
doThrow(PHOENIX_IO_EXCEPTION).when(cqs).ensureNamespaceCreated(anyString());
@@ -64,7 +64,7 @@ public class ConnectionQueryServicesImplTest {
// Should be called after upgradeSystemTables()
// Proves that execution proceeded
- verify(cqs).getSystemTableNames(any(HBaseAdmin.class));
+ verify(cqs).getSystemTableNames(any(Admin.class));
try {
// Verifies that the exception is propagated back to the caller
http://git-wip-us.apache.org/repos/asf/phoenix/blob/693fa659/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java b/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
index b6f1bef..fcc0261 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
@@ -56,8 +56,9 @@ import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellScanner;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
@@ -796,9 +797,9 @@ public class TestUtil {
mutationState.commit();
}
- HBaseAdmin hbaseAdmin = services.getAdmin();
- hbaseAdmin.flush(tableName);
- hbaseAdmin.majorCompact(tableName);
+ Admin hbaseAdmin = services.getAdmin();
+ hbaseAdmin.flush(TableName.valueOf(tableName));
+ hbaseAdmin.majorCompact(TableName.valueOf(tableName));
hbaseAdmin.close();
boolean compactionDone = false;
@@ -821,8 +822,8 @@ public class TestUtil {
// need to run compaction after the next txn snapshot has been written so that compaction can remove deleted rows
if (!compactionDone && table.isTransactional()) {
hbaseAdmin = services.getAdmin();
- hbaseAdmin.flush(tableName);
- hbaseAdmin.majorCompact(tableName);
+ hbaseAdmin.flush(TableName.valueOf(tableName));
+ hbaseAdmin.majorCompact(TableName.valueOf(tableName));
hbaseAdmin.close();
}
}
@@ -972,9 +973,9 @@ public class TestUtil {
}
final int retries = 10;
int numTries = 10;
- try (HBaseAdmin admin = services.getAdmin()) {
- admin.modifyTable(Bytes.toBytes(tableName), descriptor);
- while (!admin.getTableDescriptor(Bytes.toBytes(tableName)).equals(descriptor)
+ try (Admin admin = services.getAdmin()) {
+ admin.modifyTable(TableName.valueOf(tableName), descriptor);
+ while (!admin.getTableDescriptor(TableName.valueOf(tableName)).equals(descriptor)
&& numTries > 0) {
numTries--;
if (numTries == 0) {
@@ -997,9 +998,9 @@ public class TestUtil {
}
final int retries = 10;
int numTries = retries;
- try (HBaseAdmin admin = services.getAdmin()) {
- admin.modifyTable(Bytes.toBytes(tableName), descriptor);
- while (!admin.getTableDescriptor(Bytes.toBytes(tableName)).equals(descriptor)
+ try (Admin admin = services.getAdmin()) {
+ admin.modifyTable(TableName.valueOf(tableName), descriptor);
+ while (!admin.getTableDescriptor(TableName.valueOf(tableName)).equals(descriptor)
&& numTries > 0) {
numTries--;
if (numTries == 0) {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/693fa659/phoenix-flume/src/it/java/org/apache/phoenix/flume/PhoenixSinkIT.java
----------------------------------------------------------------------
diff --git a/phoenix-flume/src/it/java/org/apache/phoenix/flume/PhoenixSinkIT.java b/phoenix-flume/src/it/java/org/apache/phoenix/flume/PhoenixSinkIT.java
index 867d1ad..01e106f 100644
--- a/phoenix-flume/src/it/java/org/apache/phoenix/flume/PhoenixSinkIT.java
+++ b/phoenix-flume/src/it/java/org/apache/phoenix/flume/PhoenixSinkIT.java
@@ -38,7 +38,8 @@ import org.apache.flume.conf.Configurables;
import org.apache.flume.event.EventBuilder;
import org.apache.flume.lifecycle.LifecycleState;
import org.apache.flume.sink.DefaultSinkFactory;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.end2end.BaseHBaseManagedTimeIT;
import org.apache.phoenix.flume.serializer.CustomSerializer;
@@ -186,9 +187,9 @@ public class PhoenixSinkIT extends BaseHBaseManagedTimeIT {
sink.setChannel(channel);
sink.start();
- HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin();
+ Admin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin();
try {
- boolean exists = admin.tableExists(fullTableName);
+ boolean exists = admin.tableExists(TableName.valueOf(fullTableName));
Assert.assertTrue(exists);
}finally {
admin.close();
http://git-wip-us.apache.org/repos/asf/phoenix/blob/693fa659/phoenix-hive/src/main/java/org/apache/phoenix/hive/util/PhoenixUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-hive/src/main/java/org/apache/phoenix/hive/util/PhoenixUtil.java b/phoenix-hive/src/main/java/org/apache/phoenix/hive/util/PhoenixUtil.java
index 9dcb3ef..d763fae 100644
--- a/phoenix-hive/src/main/java/org/apache/phoenix/hive/util/PhoenixUtil.java
+++ b/phoenix-hive/src/main/java/org/apache/phoenix/hive/util/PhoenixUtil.java
@@ -24,8 +24,8 @@ import com.google.common.collect.Maps;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult;
import org.apache.phoenix.hive.constants.PhoenixStorageHandlerConstants;
import org.apache.phoenix.jdbc.PhoenixConnection;
@@ -170,7 +170,7 @@ public class PhoenixUtil {
}
public static void flush(Connection conn, String tableName) throws SQLException {
- try (HBaseAdmin admin = ((PhoenixConnection) conn).getQueryServices().getAdmin()) {
+ try (Admin admin = ((PhoenixConnection) conn).getQueryServices().getAdmin()) {
admin.flush(TableName.valueOf(tableName));
} catch (IOException e) {
throw new SQLException(e);
[2/3] phoenix git commit: PHOENIX-4321 Replace deprecated HBaseAdmin
with Admin
Posted by ss...@apache.org.
http://git-wip-us.apache.org/repos/asf/phoenix/blob/693fa659/phoenix-core/src/it/java/org/apache/phoenix/end2end/UseSchemaIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UseSchemaIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UseSchemaIT.java
index a578bd3..92871aa 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UseSchemaIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UseSchemaIT.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.Put;
@@ -167,7 +168,7 @@ public class UseSchemaIT extends ParallelStatsDisabledIT {
String fullTablename = schema + QueryConstants.NAME_SEPARATOR + tableName;
props.setProperty(QueryServices.SCHEMA_ATTRIB, schema);
Connection conn = DriverManager.getConnection(getUrl(), props);
- HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin();
+ Admin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin();
admin.createNamespace(NamespaceDescriptor.create(schema).build());
admin.createTable(new HTableDescriptor(fullTablename)
.addFamily(new HColumnDescriptor(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES)));
http://git-wip-us.apache.org/repos/asf/phoenix/blob/693fa659/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
index 5c0d100..94f306f 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
@@ -39,7 +39,7 @@ import java.util.Properties;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.Admin;
import org.apache.phoenix.compile.QueryPlan;
import org.apache.phoenix.exception.SQLExceptionCode;
import org.apache.phoenix.jdbc.PhoenixConnection;
@@ -359,7 +359,7 @@ public class ViewIT extends BaseViewIT {
conn.createStatement().execute("CREATE SCHEMA IF NOT EXISTS " + schemaName1);
}
String ddl = "CREATE TABLE " + fullTableName1 + " (k INTEGER NOT NULL PRIMARY KEY, v1 DATE)" + tableDDLOptions;
- HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();
+ Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();
conn.createStatement().execute(ddl);
assertTrue(admin.tableExists(SchemaUtil.getPhysicalTableName(SchemaUtil.normalizeIdentifier(fullTableName1),
conn.unwrap(PhoenixConnection.class).getQueryServices().getProps())));
@@ -723,7 +723,7 @@ public class ViewIT extends BaseViewIT {
props.setProperty(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, Boolean.TRUE.toString());
try (Connection conn = DriverManager.getConnection(getUrl(), props);
- HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
+ Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
conn.createStatement().execute("CREATE SCHEMA " + NS);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/693fa659/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/BaseIndexIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/BaseIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/BaseIndexIT.java
index c1f0628..7b060e3 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/BaseIndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/BaseIndexIT.java
@@ -45,7 +45,7 @@ import org.apache.hadoop.hbase.CellScanner;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
@@ -1048,7 +1048,7 @@ public abstract class BaseIndexIT extends ParallelStatsDisabledIT {
String indexName = "IND_" + generateUniqueName();
String fullTableName = SchemaUtil.getTableName(TestUtil.DEFAULT_SCHEMA_NAME, tableName);
// Check system tables priorities.
- try (HBaseAdmin admin = driver.getConnectionQueryServices(null, null).getAdmin();
+ try (Admin admin = driver.getConnectionQueryServices(null, null).getAdmin();
Connection c = DriverManager.getConnection(getUrl())) {
ResultSet rs = c.getMetaData().getTables("",
"\""+ PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA + "\"",
http://git-wip-us.apache.org/repos/asf/phoenix/blob/693fa659/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/DropMetadataIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/DropMetadataIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/DropMetadataIT.java
index b92ed8d..550e9e2 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/DropMetadataIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/DropMetadataIT.java
@@ -25,7 +25,7 @@ import java.util.Properties;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.end2end.ParallelStatsDisabledIT;
@@ -60,7 +60,7 @@ public class DropMetadataIT extends ParallelStatsDisabledIT {
@Test
public void testDropViewKeepsHTable() throws Exception {
Connection conn = getConnection();
- HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();
+ Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();
String hbaseNativeViewName = generateUniqueName();
byte[] hbaseNativeBytes = SchemaUtil.getTableNameAsBytes(HBASE_NATIVE_SCHEMA_NAME, hbaseNativeViewName);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/693fa659/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
index f97ba22..04f34c6 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
@@ -45,8 +45,9 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
@@ -175,9 +176,9 @@ public class LocalIndexIT extends BaseLocalIndexIT {
Connection conn2 = getConnection();
conn1.createStatement().execute("CREATE LOCAL INDEX " + indexName + " ON " + tableName + "(v1)");
conn2.createStatement().executeQuery("SELECT * FROM " + tableName).next();
- HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin();
+ Admin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin();
HTableDescriptor htd = admin
- .getTableDescriptor(Bytes.toBytes(indexPhysicalTableName));
+ .getTableDescriptor(TableName.valueOf(indexPhysicalTableName));
assertEquals(IndexRegionSplitPolicy.class.getName(), htd.getValue(HTableDescriptor.SPLIT_POLICY));
try(org.apache.hadoop.hbase.client.Connection c = ConnectionFactory.createConnection(admin.getConfiguration())) {
try (RegionLocator userTable= c.getRegionLocator(SchemaUtil.getPhysicalTableName(tableName.getBytes(), isNamespaceMapped))) {
@@ -226,7 +227,7 @@ public class LocalIndexIT extends BaseLocalIndexIT {
ResultSet rs = conn1.createStatement().executeQuery("SELECT COUNT(*) FROM " + indexTableName);
assertTrue(rs.next());
assertEquals(4, rs.getInt(1));
- HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin();
+ Admin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin();
Table indexTable =
admin.getConnection().getTable(TableName.valueOf(indexPhysicalTableName));
Pair<byte[][], byte[][]> startEndKeys =
@@ -271,7 +272,7 @@ public class LocalIndexIT extends BaseLocalIndexIT {
ResultSet rs = conn1.createStatement().executeQuery("SELECT COUNT(*) FROM " + indexTableName);
assertTrue(rs.next());
- HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin();
+ Admin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin();
int numRegions = admin.getTableRegions(physicalTableName).size();
String query = "SELECT t_id, k1, k2, k3, V1 FROM " + tableName +" where v1='a'";
@@ -429,7 +430,7 @@ public class LocalIndexIT extends BaseLocalIndexIT {
conn1.commit();
conn1.createStatement().execute("CREATE LOCAL INDEX " + indexName + " ON " + tableName + "(v1)");
conn1.createStatement().execute("DROP INDEX " + indexName + " ON " + tableName);
- HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin();
+ Admin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin();
Table table =
admin.getConnection().getTable(TableName.valueOf(tableName));
Pair<byte[][], byte[][]> startEndKeys =
@@ -590,7 +591,7 @@ public class LocalIndexIT extends BaseLocalIndexIT {
if (isNamespaceMapped) { return; }
PhoenixConnection conn = DriverManager.getConnection(getUrl()).unwrap(PhoenixConnection.class);
try (Table metaTable = conn.getQueryServices().getTable(TableName.META_TABLE_NAME.getName());
- HBaseAdmin admin = conn.getQueryServices().getAdmin();) {
+ Admin admin = conn.getQueryServices().getAdmin();) {
Statement statement = conn.createStatement();
final String tableName = "T_AUTO_MATIC_REPAIR";
String indexName = "IDX_T_AUTO_MATIC_REPAIR";
@@ -607,10 +608,10 @@ public class LocalIndexIT extends BaseLocalIndexIT {
assertTrue(rs.next());
assertEquals(2000, rs.getLong(1));
List<HRegionInfo> tableRegions = admin.getTableRegions(TableName.valueOf(tableName));
- admin.disableTable(tableName);
+ admin.disableTable(TableName.valueOf(tableName));
copyLocalIndexHFiles(config, tableRegions.get(0), tableRegions.get(1), false);
copyLocalIndexHFiles(config, tableRegions.get(3), tableRegions.get(0), false);
- admin.enableTable(tableName);
+ admin.enableTable(TableName.valueOf(tableName));
int count=getCount(conn, tableName, "L#0");
assertTrue(count > 4000);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/693fa659/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
index e1d0b31..66fe338 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
@@ -40,7 +40,7 @@ import jline.internal.Log;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
import org.apache.hadoop.hbase.util.Bytes;
@@ -638,7 +638,7 @@ public class MutableIndexIT extends ParallelStatsDisabledIT {
public void testIndexHalfStoreFileReader() throws Exception {
Connection conn1 = getConnection();
ConnectionQueryServices connectionQueryServices = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES);
- HBaseAdmin admin = connectionQueryServices.getAdmin();
+ Admin admin = connectionQueryServices.getAdmin();
String tableName = "TBL_" + generateUniqueName();
String indexName = "IDX_" + generateUniqueName();
createBaseTable(conn1, tableName, "('e')");
@@ -699,7 +699,7 @@ public class MutableIndexIT extends ParallelStatsDisabledIT {
}
- private List<HRegionInfo> splitDuringScan(Connection conn1, String tableName, String indexName, String[] strings, HBaseAdmin admin, boolean isReverse)
+ private List<HRegionInfo> splitDuringScan(Connection conn1, String tableName, String indexName, String[] strings, Admin admin, boolean isReverse)
throws SQLException, IOException, InterruptedException {
ResultSet rs;
@@ -726,10 +726,10 @@ public class MutableIndexIT extends ParallelStatsDisabledIT {
for(int i = 0; i <=1; i++) {
Threads.sleep(10000);
if(localIndex) {
- admin.split(Bytes.toBytes(tableName),
+ admin.split(TableName.valueOf(tableName),
ByteUtil.concat(Bytes.toBytes(splitKeys[i])));
} else {
- admin.split(Bytes.toBytes(indexName), ByteUtil.concat(Bytes.toBytes(splitInts[i])));
+ admin.split(TableName.valueOf(indexName), ByteUtil.concat(Bytes.toBytes(splitInts[i])));
}
Thread.sleep(100);
regionsOfUserTable =
http://git-wip-us.apache.org/repos/asf/phoenix/blob/693fa659/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexReplicationIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexReplicationIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexReplicationIT.java
index 9c6923c..b8b96ac 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexReplicationIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexReplicationIT.java
@@ -42,8 +42,8 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
@@ -59,7 +59,6 @@ import org.apache.phoenix.query.QueryServices;
import org.apache.phoenix.util.PropertiesUtil;
import org.apache.phoenix.util.ReadOnlyProps;
import org.apache.phoenix.util.SchemaUtil;
-import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@@ -194,8 +193,8 @@ public class MutableIndexReplicationIT extends BaseTest {
assertFalse(rs.next());
// make sure the data tables are created on the remote cluster
- HBaseAdmin admin = utility1.getHBaseAdmin();
- HBaseAdmin admin2 = utility2.getHBaseAdmin();
+ Admin admin = utility1.getHBaseAdmin();
+ Admin admin2 = utility2.getHBaseAdmin();
List<String> dataTables = new ArrayList<String>();
dataTables.add(DATA_TABLE_FULL_NAME);
@@ -215,7 +214,7 @@ public class MutableIndexReplicationIT extends BaseTest {
desc.addFamily(col);
//disable/modify/enable table so it has replication enabled
admin.disableTable(desc.getTableName());
- admin.modifyTable(tableName, desc);
+ admin.modifyTable(TableName.valueOf(tableName), desc);
admin.enableTable(desc.getTableName());
LOG.info("Replication enabled on source table: "+tableName);
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/693fa659/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexSplitIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexSplitIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexSplitIT.java
index b05c9b7..902a83e 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexSplitIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexSplitIT.java
@@ -35,7 +35,7 @@ import java.util.Properties;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.phoenix.end2end.ParallelStatsDisabledIT;
@@ -77,7 +77,7 @@ public abstract class MutableIndexSplitIT extends ParallelStatsDisabledIT {
Connection conn1 = getConnection(props);
String tableName = "TBL_" + generateUniqueName();
String indexName = "IDX_" + generateUniqueName();
- HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin();
+ Admin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin();
try{
String[] strings = {"a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z"};
createTableAndLoadData(conn1, tableName, indexName, strings, isReverse);
@@ -103,7 +103,7 @@ public abstract class MutableIndexSplitIT extends ParallelStatsDisabledIT {
"CREATE " + (localIndex ? "LOCAL" : "")+" INDEX " + indexName + " ON " + tableName + "(v1"+(isReverse?" DESC":"")+") include (k3)");
}
- private List<HRegionInfo> splitDuringScan(Connection conn1, String tableName, String indexName, String[] strings, HBaseAdmin admin, boolean isReverse)
+ private List<HRegionInfo> splitDuringScan(Connection conn1, String tableName, String indexName, String[] strings, Admin admin, boolean isReverse)
throws SQLException, IOException, InterruptedException {
ResultSet rs;
@@ -130,10 +130,10 @@ public abstract class MutableIndexSplitIT extends ParallelStatsDisabledIT {
for(int i = 0; i <=1; i++) {
Threads.sleep(10000);
if(localIndex) {
- admin.split(Bytes.toBytes(tableName),
+ admin.split(TableName.valueOf(tableName),
ByteUtil.concat(Bytes.toBytes(splitKeys[i])));
} else {
- admin.split(Bytes.toBytes(indexName), ByteUtil.concat(Bytes.toBytes(splitInts[i])));
+ admin.split(TableName.valueOf(indexName), ByteUtil.concat(Bytes.toBytes(splitInts[i])));
}
Thread.sleep(100);
regionsOfUserTable =
http://git-wip-us.apache.org/repos/asf/phoenix/blob/693fa659/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java
index 61cca0b..c2ae7f2 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java
@@ -36,7 +36,8 @@ import java.util.concurrent.TimeUnit;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.DoNotRetryIOException;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
@@ -1017,10 +1018,10 @@ public class PartialIndexRebuilderIT extends BaseUniqueNamesOwnClusterIT {
Configuration conf = conn.unwrap(PhoenixConnection.class).getQueryServices().getConfiguration();
PTable table = metaCache.getTableRef(key).getTable();
assertTrue(MetaDataUtil.tableRegionsOnline(conf, table));
- try (HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
- admin.disableTable(fullTableName);
+ try (Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
+ admin.disableTable(TableName.valueOf(fullTableName));
assertFalse(MetaDataUtil.tableRegionsOnline(conf, table));
- admin.enableTable(fullTableName);
+ admin.enableTable(TableName.valueOf(fullTableName));
}
assertTrue(MetaDataUtil.tableRegionsOnline(conf, table));
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/693fa659/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/txn/MutableRollbackIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/txn/MutableRollbackIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/txn/MutableRollbackIT.java
index cbfe9a5..523e0d0 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/txn/MutableRollbackIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/txn/MutableRollbackIT.java
@@ -33,7 +33,7 @@ import java.util.Collection;
import java.util.Properties;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.Admin;
import org.apache.phoenix.end2end.ParallelStatsDisabledIT;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.query.QueryServices;
@@ -508,9 +508,9 @@ public class MutableRollbackIT extends ParallelStatsDisabledIT {
}
}
- private void dropTable(HBaseAdmin admin, Connection conn, String tableName) throws SQLException, IOException {
+ private void dropTable(Admin admin, Connection conn, String tableName) throws SQLException, IOException {
conn.createStatement().execute("DROP TABLE IF EXISTS "+ tableName);
- if(admin.tableExists(tableName)) {
+ if(admin.tableExists(TableName.valueOf(tableName))) {
admin.disableTable(TableName.valueOf(tableName));
admin.deleteTable(TableName.valueOf(tableName));
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/693fa659/phoenix-core/src/it/java/org/apache/phoenix/execute/UpsertSelectOverlappingBatchesIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/execute/UpsertSelectOverlappingBatchesIT.java b/phoenix-core/src/it/java/org/apache/phoenix/execute/UpsertSelectOverlappingBatchesIT.java
index dc9de81..59ed0d0 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/execute/UpsertSelectOverlappingBatchesIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/execute/UpsertSelectOverlappingBatchesIT.java
@@ -43,7 +43,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.NotServingRegionException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Waiter;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
@@ -205,7 +205,7 @@ public class UpsertSelectOverlappingBatchesIT extends BaseUniqueNamesOwnClusterI
// keep trying to split the region
final HBaseTestingUtility utility = getUtility();
- final HBaseAdmin admin = utility.getHBaseAdmin();
+ final Admin admin = utility.getHBaseAdmin();
final TableName dataTN = TableName.valueOf(dataTable);
assertEquals(1, utility.getHBaseCluster().getRegions(dataTN).size());
utility.waitFor(60000L, 1000, new Waiter.Predicate<Exception>() {
@@ -260,7 +260,7 @@ public class UpsertSelectOverlappingBatchesIT extends BaseUniqueNamesOwnClusterI
final HBaseTestingUtility utility = getUtility();
// try to close the region while UPSERT SELECTs are happening,
final HRegionServer dataRs = utility.getHBaseCluster().getRegionServer(0);
- final HBaseAdmin admin = utility.getHBaseAdmin();
+ final Admin admin = utility.getHBaseAdmin();
final HRegionInfo dataRegion =
admin.getTableRegions(TableName.valueOf(dataTable)).get(0);
logger.info("Closing data table region");
http://git-wip-us.apache.org/repos/asf/phoenix/blob/693fa659/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/FailForUnsupportedHBaseVersionsIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/FailForUnsupportedHBaseVersionsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/FailForUnsupportedHBaseVersionsIT.java
index 5916c43..7c6de68 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/FailForUnsupportedHBaseVersionsIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/FailForUnsupportedHBaseVersionsIT.java
@@ -30,7 +30,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.VersionInfo;
@@ -140,7 +140,7 @@ public class FailForUnsupportedHBaseVersionsIT {
HRegionServer server = util.getMiniHBaseCluster().getRegionServer(0);
// create the primary table
- HBaseAdmin admin = util.getHBaseAdmin();
+ Admin admin = util.getHBaseAdmin();
if (supported) {
admin.createTable(desc);
assertFalse("Hosting regeion server failed, even the HBase version (" + version
http://git-wip-us.apache.org/repos/asf/phoenix/blob/693fa659/phoenix-core/src/it/java/org/apache/phoenix/iterate/RoundRobinResultIteratorIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/iterate/RoundRobinResultIteratorIT.java b/phoenix-core/src/it/java/org/apache/phoenix/iterate/RoundRobinResultIteratorIT.java
index c1f7c88..b0c2cb4 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/iterate/RoundRobinResultIteratorIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/iterate/RoundRobinResultIteratorIT.java
@@ -38,7 +38,8 @@ import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.compile.StatementContext;
import org.apache.phoenix.end2end.ParallelStatsDisabledIT;
@@ -77,11 +78,11 @@ public class RoundRobinResultIteratorIT extends ParallelStatsDisabledIT {
ConnectionQueryServices services = conn.unwrap(PhoenixConnection.class).getQueryServices();
int nRegions = services.getAllTableRegions(tableNameBytes).size();
int nRegionsBeforeSplit = nRegions;
- HBaseAdmin admin = services.getAdmin();
+ Admin admin = services.getAdmin();
try {
// Split is an async operation. So hoping 10 seconds is long enough time.
// If the test tends to flap, then you might want to increase the wait time
- admin.split(tableName);
+ admin.split(TableName.valueOf(tableName));
CountDownLatch latch = new CountDownLatch(1);
int nTries = 0;
long waitTimeMillis = 2000;
@@ -257,9 +258,9 @@ public class RoundRobinResultIteratorIT extends ParallelStatsDisabledIT {
}
conn.commit();
ConnectionQueryServices services = conn.unwrap(PhoenixConnection.class).getQueryServices();
- HBaseAdmin admin = services.getAdmin();
+ Admin admin = services.getAdmin();
try {
- admin.flush(tableName);
+ admin.flush(TableName.valueOf(tableName));
} finally {
admin.close();
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/693fa659/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java b/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java
index 1c18667..bde8aeb 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java
@@ -37,7 +37,8 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.ipc.CallRunner;
import org.apache.hadoop.hbase.master.AssignmentManager;
import org.apache.hadoop.hbase.master.HMaster;
@@ -208,19 +209,19 @@ public class PhoenixServerRpcIT extends BaseUniqueNamesOwnClusterIT {
private void ensureTablesOnDifferentRegionServers(String tableName1, String tableName2) throws Exception {
byte[] table1 = Bytes.toBytes(tableName1);
byte[] table2 = Bytes.toBytes(tableName2);
- HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), TEST_PROPERTIES).getAdmin();
+ Admin admin = driver.getConnectionQueryServices(getUrl(), TEST_PROPERTIES).getAdmin();
HBaseTestingUtility util = getUtility();
MiniHBaseCluster cluster = util.getHBaseCluster();
HMaster master = cluster.getMaster();
AssignmentManager am = master.getAssignmentManager();
// verify there is only a single region for data table
- List<HRegionInfo> tableRegions = admin.getTableRegions(table1);
+ List<HRegionInfo> tableRegions = admin.getTableRegions(TableName.valueOf(table1));
assertEquals("Expected single region for " + table1, tableRegions.size(), 1);
HRegionInfo hri1 = tableRegions.get(0);
// verify there is only a single region for index table
- tableRegions = admin.getTableRegions(table2);
+ tableRegions = admin.getTableRegions(TableName.valueOf(table2));
HRegionInfo hri2 = tableRegions.get(0);
assertEquals("Expected single region for " + table2, tableRegions.size(), 1);
@@ -251,9 +252,9 @@ public class PhoenixServerRpcIT extends BaseUniqueNamesOwnClusterIT {
}
}
- hri1 = admin.getTableRegions(table1).get(0);
+ hri1 = admin.getTableRegions(TableName.valueOf(table1)).get(0);
serverName1 = am.getRegionStates().getRegionServerOfRegion(hri1);
- hri2 = admin.getTableRegions(table2).get(0);
+ hri2 = admin.getTableRegions(TableName.valueOf(table2)).get(0);
serverName2 = am.getRegionStates().getRegionServerOfRegion(hri2);
// verify index and data tables are on different servers
http://git-wip-us.apache.org/repos/asf/phoenix/blob/693fa659/phoenix-core/src/it/java/org/apache/phoenix/tx/ParameterizedTransactionIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/tx/ParameterizedTransactionIT.java b/phoenix-core/src/it/java/org/apache/phoenix/tx/ParameterizedTransactionIT.java
index 042d915..cf08d63 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/tx/ParameterizedTransactionIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/tx/ParameterizedTransactionIT.java
@@ -38,7 +38,7 @@ import java.util.Properties;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.util.Bytes;
@@ -336,8 +336,8 @@ public class ParameterizedTransactionIT extends ParallelStatsDisabledIT {
put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, ByteUtil.EMPTY_BYTE_ARRAY);
htable.put(put);
- HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();
- admin.disableTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME);
+ Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();
+ admin.disableTable(TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME));
try {
// This will succeed initially in updating the HBase metadata, but then will fail when
// the SYSTEM.CATALOG table is attempted to be updated, exercising the code to restore
@@ -347,7 +347,7 @@ public class ParameterizedTransactionIT extends ParallelStatsDisabledIT {
} catch (SQLException e) {
assertTrue(e.getMessage().contains(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME + " is disabled"));
} finally {
- admin.enableTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME);
+ admin.enableTable(TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME));
admin.close();
}
@@ -385,7 +385,7 @@ public class ParameterizedTransactionIT extends ParallelStatsDisabledIT {
assertEquals(SQLExceptionCode.TX_MAY_NOT_SWITCH_TO_NON_TX.getErrorCode(), e.getErrorCode());
}
- HBaseAdmin admin = pconn.getQueryServices().getAdmin();
+ Admin admin = pconn.getQueryServices().getAdmin();
HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(t2));
desc.addFamily(new HColumnDescriptor(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES));
admin.createTable(desc);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/693fa659/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/RecoveryIndexWriter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/RecoveryIndexWriter.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/RecoveryIndexWriter.java
index e340784..f79b8f0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/RecoveryIndexWriter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/RecoveryIndexWriter.java
@@ -27,8 +27,10 @@ import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.MasterNotRunningException;
+import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.util.Pair;
@@ -49,7 +51,7 @@ public class RecoveryIndexWriter extends IndexWriter {
private static final Log LOG = LogFactory.getLog(RecoveryIndexWriter.class);
private Set<HTableInterfaceReference> nonExistingTablesList = new HashSet<HTableInterfaceReference>();
- private HBaseAdmin admin;
+ private Admin admin;
/**
* Directly specify the {@link IndexCommitter} and {@link IndexFailurePolicy}. Both are expected to be fully setup
@@ -65,7 +67,7 @@ public class RecoveryIndexWriter extends IndexWriter {
public RecoveryIndexWriter(IndexFailurePolicy policy, RegionCoprocessorEnvironment env, String name)
throws MasterNotRunningException, ZooKeeperConnectionException, IOException {
super(new TrackingParallelWriterIndexCommitter(), policy, env, name);
- this.admin = new HBaseAdmin(env.getConfiguration());
+ this.admin = ConnectionFactory.createConnection(env.getConfiguration()).getAdmin();
}
@Override
@@ -74,7 +76,7 @@ public class RecoveryIndexWriter extends IndexWriter {
write(resolveTableReferences(toWrite), allowLocalUpdates);
} catch (MultiIndexWriteFailureException e) {
for (HTableInterfaceReference table : e.getFailedTables()) {
- if (!admin.tableExists(table.getTableName())) {
+ if (!admin.tableExists(TableName.valueOf(table.getTableName()))) {
LOG.warn("Failure due to non existing table: " + table.getTableName());
nonExistingTablesList.add(table);
} else {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/693fa659/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyTool.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyTool.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyTool.java
index f3ff39e..cae89ff 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyTool.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyTool.java
@@ -38,7 +38,7 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.io.Text;
@@ -285,7 +285,7 @@ public class IndexScrutinyTool extends Configured implements Tool {
PhoenixMapReduceUtil.setInput(job, PhoenixIndexDBWritable.class, qDataTable,
selectQuery);
} else { // TODO check if using a snapshot works
- HBaseAdmin admin = null;
+ Admin admin = null;
String snapshotName;
try {
final PhoenixConnection pConnection =
http://git-wip-us.apache.org/repos/asf/phoenix/blob/693fa659/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java
index cf13075..2aa3d3e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java
@@ -47,7 +47,7 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
@@ -387,7 +387,7 @@ public class IndexTool extends Configured implements Tool {
PhoenixMapReduceUtil.setInput(job, PhoenixIndexDBWritable.class, qDataTable,
selectQuery);
} else {
- HBaseAdmin admin = null;
+ Admin admin = null;
String snapshotName;
try {
admin = pConnection.getQueryServices().getAdmin();
http://git-wip-us.apache.org/repos/asf/phoenix/blob/693fa659/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java
index 558df85..1cac944 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java
@@ -27,7 +27,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
@@ -92,7 +92,7 @@ public interface ConnectionQueryServices extends QueryServices, MetaDataMutated
public void init(String url, Properties props) throws SQLException;
public int getLowestClusterHBaseVersion();
- public HBaseAdmin getAdmin() throws SQLException;
+ public Admin getAdmin() throws SQLException;
void clearTableRegionCache(TableName name) throws SQLException;
http://git-wip-us.apache.org/repos/asf/phoenix/blob/693fa659/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 6e9b40e..8ab42d6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -105,10 +105,10 @@ import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.TableExistsException;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Append;
import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.Increment;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
@@ -921,7 +921,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
String getOperationName();
}
- private void pollForUpdatedTableDescriptor(final HBaseAdmin admin, final HTableDescriptor newTableDescriptor,
+ private void pollForUpdatedTableDescriptor(final Admin admin, final HTableDescriptor newTableDescriptor,
final byte[] tableName) throws InterruptedException, TimeoutException {
checkAndRetry(new RetriableOperation() {
@@ -932,7 +932,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
@Override
public boolean checkForCompletion() throws TimeoutException, IOException {
- HTableDescriptor tableDesc = admin.getTableDescriptor(tableName);
+ HTableDescriptor tableDesc = admin.getTableDescriptor(TableName.valueOf(tableName));
return newTableDescriptor.equals(tableDesc);
}
});
@@ -993,7 +993,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
void ensureNamespaceCreated(String schemaName) throws SQLException {
SQLException sqlE = null;
- try (HBaseAdmin admin = getAdmin()) {
+ try (Admin admin = getAdmin()) {
NamespaceDescriptor namespaceDescriptor = null;
try {
namespaceDescriptor = admin.getNamespaceDescriptor(schemaName);
@@ -1027,12 +1027,12 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
HTableDescriptor existingDesc = null;
boolean isMetaTable = SchemaUtil.isMetaTable(physicalTableName);
boolean tableExist = true;
- try (HBaseAdmin admin = getAdmin()) {
+ try (Admin admin = getAdmin()) {
final String quorum = ZKConfig.getZKQuorumServersString(config);
final String znode = this.props.get(HConstants.ZOOKEEPER_ZNODE_PARENT);
logger.debug("Found quorum: " + quorum + ":" + znode);
try {
- existingDesc = admin.getTableDescriptor(physicalTableName);
+ existingDesc = admin.getTableDescriptor(TableName.valueOf(physicalTableName));
} catch (org.apache.hadoop.hbase.TableNotFoundException e) {
tableExist = false;
if (tableType == PTableType.VIEW) {
@@ -1138,13 +1138,14 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
private void modifyTable(byte[] tableName, HTableDescriptor newDesc, boolean shouldPoll) throws IOException,
InterruptedException, TimeoutException, SQLException {
- try (HBaseAdmin admin = getAdmin()) {
+ TableName tn = TableName.valueOf(tableName);
+ try (Admin admin = getAdmin()) {
if (!allowOnlineTableSchemaUpdate()) {
- admin.disableTable(tableName);
- admin.modifyTable(tableName, newDesc);
- admin.enableTable(tableName);
+ admin.disableTable(tn);
+ admin.modifyTable(tn, newDesc); // TODO: Update to TableDescriptor
+ admin.enableTable(tn);
} else {
- admin.modifyTable(tableName, newDesc);
+ admin.modifyTable(tn, newDesc); // TODO: Update to TableDescriptor
if (shouldPoll) {
pollForUpdatedTableDescriptor(admin, newDesc, tableName);
}
@@ -1323,7 +1324,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
private boolean ensureViewIndexTableDropped(byte[] physicalTableName, long timestamp) throws SQLException {
byte[] physicalIndexName = MetaDataUtil.getViewIndexPhysicalName(physicalTableName);
boolean wasDeleted = false;
- try (HBaseAdmin admin = getAdmin()) {
+ try (Admin admin = getAdmin()) {
try {
TableName physicalIndexTableName = TableName.valueOf(physicalIndexName);
HTableDescriptor desc = admin.getTableDescriptor(physicalIndexTableName);
@@ -1351,9 +1352,9 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
private boolean ensureLocalIndexTableDropped(byte[] physicalTableName, long timestamp) throws SQLException {
HTableDescriptor desc = null;
boolean wasDeleted = false;
- try (HBaseAdmin admin = getAdmin()) {
+ try (Admin admin = getAdmin()) {
try {
- desc = admin.getTableDescriptor(physicalTableName);
+ desc = admin.getTableDescriptor(TableName.valueOf(physicalTableName));
for (byte[] fam : desc.getFamiliesKeys()) {
this.tableStatsCache.invalidate(new GuidePostsKey(physicalTableName, fam));
}
@@ -1367,7 +1368,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
}
}
for(String cf: columnFamiles) {
- admin.deleteColumn(physicalTableName, cf);
+ admin.deleteColumnFamily(TableName.valueOf(physicalTableName), Bytes.toBytes(cf));
}
clearTableRegionCache(TableName.valueOf(physicalTableName));
wasDeleted = true;
@@ -1621,13 +1622,14 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
private void dropTables(final List<byte[]> tableNamesToDelete) throws SQLException {
SQLException sqlE = null;
- try (HBaseAdmin admin = getAdmin()) {
+ try (Admin admin = getAdmin()) {
if (tableNamesToDelete != null){
for ( byte[] tableName : tableNamesToDelete ) {
try {
+ TableName tn = TableName.valueOf(tableName);
HTableDescriptor htableDesc = this.getTableDescriptor(tableName);
- admin.disableTable(tableName);
- admin.deleteTable(tableName);
+ admin.disableTable(tn);
+ admin.deleteTable(tn);
tableStatsCache.invalidateAll(htableDesc);
clearTableRegionCache(TableName.valueOf(tableName));
} catch (TableNotFoundException ignore) {
@@ -1835,7 +1837,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
private void updateDescriptorForTx(PTable table, Map<String, Object> tableProps, HTableDescriptor tableDescriptor,
String txValue, Set<HTableDescriptor> descriptorsToUpdate, Set<HTableDescriptor> origDescriptors) throws SQLException {
byte[] physicalTableName = table.getPhysicalName().getBytes();
- try (HBaseAdmin admin = getAdmin()) {
+ try (Admin admin = getAdmin()) {
setTransactional(tableDescriptor, table.getType(), txValue, tableProps);
Map<String, Object> indexTableProps;
if (txValue == null) {
@@ -1845,7 +1847,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
indexTableProps.put(PhoenixTransactionContext.READ_NON_TX_DATA, Boolean.valueOf(txValue));
}
for (PTable index : table.getIndexes()) {
- HTableDescriptor indexDescriptor = admin.getTableDescriptor(index.getPhysicalName().getBytes());
+ HTableDescriptor indexDescriptor = admin.getTableDescriptor(TableName.valueOf(index.getPhysicalName().getBytes()));
origDescriptors.add(indexDescriptor);
indexDescriptor = new HTableDescriptor(indexDescriptor);
descriptorsToUpdate.add(indexDescriptor);
@@ -1869,7 +1871,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
setTransactional(indexDescriptor, index.getType(), txValue, indexTableProps);
}
try {
- HTableDescriptor indexDescriptor = admin.getTableDescriptor(MetaDataUtil.getViewIndexPhysicalName(physicalTableName));
+ HTableDescriptor indexDescriptor = admin.getTableDescriptor(TableName.valueOf(MetaDataUtil.getViewIndexPhysicalName(physicalTableName)));
origDescriptors.add(indexDescriptor);
indexDescriptor = new HTableDescriptor(indexDescriptor);
descriptorsToUpdate.add(indexDescriptor);
@@ -1879,7 +1881,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
// Ignore, as we may never have created a view index table
}
try {
- HTableDescriptor indexDescriptor = admin.getTableDescriptor(MetaDataUtil.getLocalIndexPhysicalName(physicalTableName));
+ HTableDescriptor indexDescriptor = admin.getTableDescriptor(TableName.valueOf(MetaDataUtil.getLocalIndexPhysicalName(physicalTableName)));
origDescriptors.add(indexDescriptor);
indexDescriptor = new HTableDescriptor(indexDescriptor);
descriptorsToUpdate.add(indexDescriptor);
@@ -2411,12 +2413,12 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
openConnection();
hConnectionEstablished = true;
boolean isDoNotUpgradePropSet = UpgradeUtil.isNoUpgradeSet(props);
- try (HBaseAdmin admin = getAdmin()) {
+ try (Admin admin = getAdmin()) {
boolean mappedSystemCatalogExists = admin
.tableExists(SchemaUtil.getPhysicalTableName(SYSTEM_CATALOG_NAME_BYTES, true));
if (SchemaUtil.isNamespaceMappingEnabled(PTableType.SYSTEM,
ConnectionQueryServicesImpl.this.getProps())) {
- if (admin.tableExists(SYSTEM_CATALOG_NAME_BYTES)) {
+ if (admin.tableExists(TableName.valueOf(SYSTEM_CATALOG_NAME_BYTES))) {
//check if the server is already updated and have namespace config properly set.
checkClientServerCompatibility(SYSTEM_CATALOG_NAME_BYTES);
}
@@ -2440,7 +2442,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
Long.toString(getSystemTableVersion()));
scnProps.remove(PhoenixRuntime.TENANT_ID_ATTRIB);
String globalUrl = JDBCUtil.removeProperty(url, PhoenixRuntime.TENANT_ID_ATTRIB);
- try (HBaseAdmin hBaseAdmin = getAdmin();
+ try (Admin hBaseAdmin = getAdmin();
PhoenixConnection metaConnection = new PhoenixConnection(ConnectionQueryServicesImpl.this, globalUrl,
scnProps, newEmptyMetaData())) {
try {
@@ -2526,7 +2528,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
}
}
- void createSysMutexTable(HBaseAdmin admin, ReadOnlyProps props) throws IOException, SQLException {
+ void createSysMutexTable(Admin admin, ReadOnlyProps props) throws IOException, SQLException {
try {
final TableName mutexTableName = SchemaUtil.getPhysicalTableName(
PhoenixDatabaseMetaData.SYSTEM_MUTEX_NAME, props);
@@ -2553,11 +2555,11 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
}
}
- List<TableName> getSystemTableNames(HBaseAdmin admin) throws IOException {
- return Lists.newArrayList(admin.listTableNames(QueryConstants.SYSTEM_SCHEMA_NAME + "\\..*"));
+ List<TableName> getSystemTableNames(Admin admin) throws IOException {
+ return Lists.newArrayList(admin.listTableNames(QueryConstants.SYSTEM_SCHEMA_NAME + "\\..*")); // TODO: replace to pattern
}
- private void createOtherSystemTables(PhoenixConnection metaConnection, HBaseAdmin hbaseAdmin) throws SQLException, IOException {
+ private void createOtherSystemTables(PhoenixConnection metaConnection, Admin hbaseAdmin) throws SQLException, IOException {
try {
metaConnection.createStatement().execute(QueryConstants.CREATE_SEQUENCE_METADATA);
} catch (TableAlreadyExistsException e) {
@@ -2628,7 +2630,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
// We know that we always need to add the STORE_NULLS column for 4.3 release
columnsToAdd = addColumn(columnsToAdd, PhoenixDatabaseMetaData.STORE_NULLS
+ " " + PBoolean.INSTANCE.getSqlTypeName());
- try (HBaseAdmin admin = getAdmin()) {
+ try (Admin admin = getAdmin()) {
HTableDescriptor[] localIndexTables = admin
.listTables(MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + ".*");
for (HTableDescriptor table : localIndexTables) {
@@ -3040,11 +3042,11 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
private void createSnapshot(String snapshotName, String tableName)
throws SQLException {
- HBaseAdmin admin = null;
+ Admin admin = null;
SQLException sqlE = null;
try {
admin = getAdmin();
- admin.snapshot(snapshotName, tableName);
+ admin.snapshot(snapshotName, TableName.valueOf(tableName));
logger.info("Successfully created snapshot " + snapshotName + " for "
+ tableName);
} catch (Exception e) {
@@ -3075,12 +3077,12 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
boolean tableDisabled = false;
if (!success && snapshotName != null) {
SQLException sqlE = null;
- HBaseAdmin admin = null;
+ Admin admin = null;
try {
logger.warn("Starting restore of " + tableName + " using snapshot "
+ snapshotName + " because upgrade failed");
admin = getAdmin();
- admin.disableTable(tableName);
+ admin.disableTable(TableName.valueOf(tableName));
tableDisabled = true;
admin.restoreSnapshot(snapshotName);
snapshotRestored = true;
@@ -3091,7 +3093,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
} finally {
if (admin != null && tableDisabled) {
try {
- admin.enableTable(tableName);
+ admin.enableTable(TableName.valueOf(tableName));
if (snapshotRestored) {
logger.warn("Successfully restored and enabled " + tableName + " using snapshot "
+ snapshotName);
@@ -3142,7 +3144,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE);
Table metatable = null;
- try (HBaseAdmin admin = getAdmin()) {
+ try (Admin admin = getAdmin()) {
// SYSTEM namespace needs to be created via HBase API's because "CREATE SCHEMA" statement tries to write its metadata
// in SYSTEM:CATALOG table. Without SYSTEM namespace, SYSTEM:CATALOG table cannot be created.
try {
@@ -3183,7 +3185,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, props).getName();
metatable = getTable(mappedSystemTable);
if (tableNames.contains(PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME)) {
- if (!admin.tableExists(mappedSystemTable)) {
+ if (!admin.tableExists(TableName.valueOf(mappedSystemTable))) {
logger.info("Migrating SYSTEM.CATALOG table to SYSTEM namespace.");
// Actual migration of SYSCAT table
UpgradeUtil.mapTableToNamespace(admin, metatable,
@@ -3287,7 +3289,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
private byte[] getSysMutexPhysicalTableNameBytes() throws IOException, SQLException {
byte[] sysMutexPhysicalTableNameBytes = null;
- try(HBaseAdmin admin = getAdmin()) {
+ try(Admin admin = getAdmin()) {
if(admin.tableExists(PhoenixDatabaseMetaData.SYSTEM_MUTEX_HBASE_TABLE_NAME)) {
sysMutexPhysicalTableNameBytes = PhoenixDatabaseMetaData.SYSTEM_MUTEX_NAME_BYTES;
} else if (admin.tableExists(TableName.valueOf(
@@ -3524,25 +3526,25 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
}
private void flushTable(byte[] tableName) throws SQLException {
- HBaseAdmin admin = getAdmin();
+ Admin admin = getAdmin();
try {
- admin.flush(tableName);
+ admin.flush(TableName.valueOf(tableName));
} catch (IOException e) {
throw new PhoenixIOException(e);
- } catch (InterruptedException e) {
- // restore the interrupt status
- Thread.currentThread().interrupt();
- throw new SQLExceptionInfo.Builder(SQLExceptionCode.INTERRUPTED_EXCEPTION).setRootCause(e).build()
- .buildException();
+// } catch (InterruptedException e) {
+// // restore the interrupt status
+// Thread.currentThread().interrupt();
+// throw new SQLExceptionInfo.Builder(SQLExceptionCode.INTERRUPTED_EXCEPTION).setRootCause(e).build()
+// .buildException();
} finally {
Closeables.closeQuietly(admin);
}
}
@Override
- public HBaseAdmin getAdmin() throws SQLException {
+ public Admin getAdmin() throws SQLException {
try {
- return new HBaseAdmin(connection);
+ return connection.getAdmin();
} catch (IOException e) {
throw new PhoenixIOException(e);
}
@@ -4366,7 +4368,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
private void ensureNamespaceDropped(String schemaName, long mutationTime) throws SQLException {
SQLException sqlE = null;
- try (HBaseAdmin admin = getAdmin()) {
+ try (Admin admin = getAdmin()) {
final String quorum = ZKConfig.getZKQuorumServersString(config);
final String znode = this.props.get(HConstants.ZOOKEEPER_ZNODE_PARENT);
logger.debug("Found quorum: " + quorum + ":" + znode);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/693fa659/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
index 410bb71..b748568 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
@@ -36,7 +36,7 @@ import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
@@ -358,7 +358,7 @@ public class ConnectionlessQueryServicesImpl extends DelegateQueryServices imple
}
@Override
- public HBaseAdmin getAdmin() throws SQLException {
+ public Admin getAdmin() throws SQLException {
throw new UnsupportedOperationException();
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/693fa659/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java b/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java
index e57dadd..bb24602 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java
@@ -27,7 +27,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
@@ -153,7 +153,7 @@ public class DelegateConnectionQueryServices extends DelegateQueryServices imple
}
@Override
- public HBaseAdmin getAdmin() throws SQLException {
+ public Admin getAdmin() throws SQLException {
return getDelegate().getAdmin();
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/693fa659/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index 338b325..d618183 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -131,7 +131,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Scan;
@@ -2885,8 +2885,8 @@ public class MetaDataClient {
.getViewIndexPhysicalName(table.getPhysicalName().getBytes());
if (!dropMetaData) {
// we need to drop rows only when actually view index exists
- try (HBaseAdmin admin = connection.getQueryServices().getAdmin()) {
- hasViewIndexTable = admin.tableExists(viewIndexPhysicalName);
+ try (Admin admin = connection.getQueryServices().getAdmin()) {
+ hasViewIndexTable = admin.tableExists(org.apache.hadoop.hbase.TableName.valueOf(viewIndexPhysicalName));
} catch (IOException e1) {
// absorbing as it is not critical check
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/693fa659/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
index 4b264e3..e913d39 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
@@ -80,9 +80,9 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
@@ -188,7 +188,7 @@ public class UpgradeUtil {
return Bytes.toBytes("_BAK_" + PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME);
}
- private static void createSequenceSnapshot(HBaseAdmin admin, PhoenixConnection conn) throws SQLException {
+ private static void createSequenceSnapshot(Admin admin, PhoenixConnection conn) throws SQLException {
byte[] tableName = getSequenceSnapshotName();
HColumnDescriptor columnDesc = new HColumnDescriptor(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES);
HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
@@ -201,16 +201,16 @@ public class UpgradeUtil {
}
}
- private static void restoreSequenceSnapshot(HBaseAdmin admin, PhoenixConnection conn) throws SQLException {
+ private static void restoreSequenceSnapshot(Admin admin, PhoenixConnection conn) throws SQLException {
byte[] tableName = getSequenceSnapshotName();
copyTable(conn, tableName, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES);
}
- private static void deleteSequenceSnapshot(HBaseAdmin admin) throws SQLException {
- byte[] tableName = getSequenceSnapshotName();
+ private static void deleteSequenceSnapshot(Admin admin) throws SQLException {
+ TableName tableName = TableName.valueOf(getSequenceSnapshotName());
try {
- admin.disableTable(TableName.valueOf(tableName));
- admin.deleteTable(TableName.valueOf(tableName));
+ admin.disableTable(tableName);
+ admin.deleteTable(tableName);
} catch (IOException e) {
throw ServerUtil.parseServerException(e);
}
@@ -286,7 +286,7 @@ public class UpgradeUtil {
}
private static void preSplitSequenceTable(PhoenixConnection conn, int nSaltBuckets) throws SQLException {
- HBaseAdmin admin = conn.getQueryServices().getAdmin();
+ Admin admin = conn.getQueryServices().getAdmin();
boolean snapshotCreated = false;
boolean success = false;
try {
@@ -338,7 +338,7 @@ public class UpgradeUtil {
PhoenixConnection toReturn = null;
globalConnection = new PhoenixConnection(metaConnection, metaConnection.getQueryServices(), props);
SQLException sqlEx = null;
- try (HBaseAdmin admin = globalConnection.getQueryServices().getAdmin()) {
+ try (Admin admin = globalConnection.getQueryServices().getAdmin()) {
ResultSet rs = globalConnection.createStatement().executeQuery("SELECT TABLE_SCHEM, TABLE_NAME, DATA_TABLE_NAME, TENANT_ID, MULTI_TENANT, SALT_BUCKETS FROM SYSTEM.CATALOG "
+ " WHERE COLUMN_NAME IS NULL"
+ " AND COLUMN_FAMILY IS NULL"
@@ -499,7 +499,7 @@ public class UpgradeUtil {
try {
globalConnection = new PhoenixConnection(connParam, connParam.getQueryServices(), props);
String tenantId = null;
- try (HBaseAdmin admin = globalConnection.getQueryServices().getAdmin()) {
+ try (Admin admin = globalConnection.getQueryServices().getAdmin()) {
String fetchViewIndexes = "SELECT " + TENANT_ID + ", " + TABLE_SCHEM + ", " + TABLE_NAME +
", " + DATA_TABLE_NAME + " FROM " + SYSTEM_CATALOG_NAME + " WHERE " + VIEW_INDEX_ID
+ " IS NOT NULL";
@@ -1428,10 +1428,10 @@ public class UpgradeUtil {
}
private static void upgradeDescVarLengthRowKeys(PhoenixConnection upgradeConn, PhoenixConnection globalConn, String schemaName, String tableName, boolean isTable, boolean bypassUpgrade) throws SQLException {
- String physicalName = SchemaUtil.getTableName(schemaName, tableName);
+ TableName physicalName = TableName.valueOf(SchemaUtil.getTableName(schemaName, tableName));
long currentTime = System.currentTimeMillis();
String snapshotName = physicalName + "_" + currentTime;
- HBaseAdmin admin = null;
+ Admin admin = null;
if (isTable && !bypassUpgrade) {
admin = globalConn.getQueryServices().getAdmin();
}
@@ -1442,9 +1442,9 @@ public class UpgradeUtil {
String msg = "Taking snapshot of physical table " + physicalName + " prior to upgrade...";
System.out.println(msg);
logger.info(msg);
- admin.disableTable(TableName.valueOf(physicalName));
- admin.snapshot(snapshotName, TableName.valueOf(physicalName));
- admin.enableTable(TableName.valueOf(physicalName));
+ admin.disableTable(physicalName);
+ admin.snapshot(snapshotName, physicalName);
+ admin.enableTable(physicalName);
restoreSnapshot = true;
}
String escapedTableName = SchemaUtil.getEscapedTableName(schemaName, tableName);
@@ -1517,9 +1517,9 @@ public class UpgradeUtil {
boolean restored = false;
try {
if (!success && restoreSnapshot) {
- admin.disableTable(TableName.valueOf(physicalName));
+ admin.disableTable(physicalName);
admin.restoreSnapshot(snapshotName, false);
- admin.enableTable(TableName.valueOf(physicalName));
+ admin.enableTable(physicalName);
String msg = "Restored snapshot of " + physicalName + " due to failure of upgrade";
System.out.println(msg);
logger.info(msg);
@@ -1727,11 +1727,10 @@ public class UpgradeUtil {
return false;
}
- private static void mapTableToNamespace(HBaseAdmin admin, Table metatable, String srcTableName,
+ private static void mapTableToNamespace(Admin admin, Table metatable, String srcTableName,
String destTableName, ReadOnlyProps props, Long ts, String phoenixTableName, PTableType pTableType,PName tenantId)
throws SnapshotCreationException, IllegalArgumentException, IOException, InterruptedException,
SQLException {
- srcTableName = SchemaUtil.normalizeIdentifier(srcTableName);
if (!SchemaUtil.isNamespaceMappingEnabled(pTableType,
props)) { throw new IllegalArgumentException(SchemaUtil.isSystemTable(srcTableName.getBytes())
? "For system table " + QueryServices.IS_SYSTEM_TABLE_MAPPED_TO_NAMESPACE
@@ -1761,23 +1760,25 @@ public class UpgradeUtil {
}
}
- public static void mapTableToNamespace(HBaseAdmin admin, String srcTableName, String destTableName, PTableType pTableType) throws IOException {
- boolean srcTableExists=admin.tableExists(srcTableName);
+ public static void mapTableToNamespace(Admin admin, String srcTableName, String destTableName, PTableType pTableType) throws IOException {
+ TableName srcTable = TableName.valueOf(SchemaUtil.normalizeIdentifier(srcTableName));
+ TableName dstTable = TableName.valueOf(destTableName);
+ boolean srcTableExists=admin.tableExists(srcTable);
// we need to move physical table in actual namespace for TABLE and Index
if (srcTableExists && (PTableType.TABLE.equals(pTableType)
|| PTableType.INDEX.equals(pTableType) || PTableType.SYSTEM.equals(pTableType))) {
- boolean destTableExists=admin.tableExists(destTableName);
+ boolean destTableExists=admin.tableExists(dstTable);
if (!destTableExists) {
String snapshotName = QueryConstants.UPGRADE_TABLE_SNAPSHOT_PREFIX + srcTableName;
logger.info("Disabling table " + srcTableName + " ..");
- admin.disableTable(srcTableName);
+ admin.disableTable(srcTable);
logger.info(String.format("Taking snapshot %s of table %s..", snapshotName, srcTableName));
- admin.snapshot(snapshotName, srcTableName);
+ admin.snapshot(snapshotName, srcTable);
logger.info(
String.format("Restoring snapshot %s in destination table %s..", snapshotName, destTableName));
- admin.cloneSnapshot(Bytes.toBytes(snapshotName), Bytes.toBytes(destTableName));
+ admin.cloneSnapshot(snapshotName, dstTable);
logger.info(String.format("deleting old table %s..", srcTableName));
- admin.deleteTable(srcTableName);
+ admin.deleteTable(srcTable);
logger.info(String.format("deleting snapshot %s..", snapshotName));
admin.deleteSnapshot(snapshotName);
}
@@ -1788,7 +1789,7 @@ public class UpgradeUtil {
* Method to map existing phoenix table to a namespace. Should not be use if tables has views and indexes ,instead
* use map table utility in psql.py
*/
- public static void mapTableToNamespace(HBaseAdmin admin, Table metatable, String tableName,
+ public static void mapTableToNamespace(Admin admin, Table metatable, String tableName,
ReadOnlyProps props, Long ts, PTableType pTableType, PName tenantId) throws SnapshotCreationException,
IllegalArgumentException, IOException, InterruptedException, SQLException {
String destTablename = SchemaUtil
@@ -1804,7 +1805,7 @@ public class UpgradeUtil {
if (!SchemaUtil.isNamespaceMappingEnabled(PTableType.TABLE,
readOnlyProps)) { throw new IllegalArgumentException(
QueryServices.IS_NAMESPACE_MAPPING_ENABLED + " is not enabled!!"); }
- try (HBaseAdmin admin = conn.getQueryServices().getAdmin();
+ try (Admin admin = conn.getQueryServices().getAdmin();
Table metatable = conn.getQueryServices()
.getTable(SchemaUtil
.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, readOnlyProps)
http://git-wip-us.apache.org/repos/asf/phoenix/blob/693fa659/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/IndexTableName.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/IndexTableName.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/IndexTableName.java
new file mode 100644
index 0000000..689a5ee
--- /dev/null
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/IndexTableName.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.hbase.index;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.rules.TestWatcher;
+import org.junit.runner.Description;
+
+/**
+ * Returns a {@code byte[]} containing the name of the currently running test method.
+ */
+public class IndexTableName extends TestWatcher {
+ private String tableName;
+
+ /**
+ * Invoked when a test is about to start
+ */
+ @Override
+ protected void starting(Description description) {
+ tableName = description.getMethodName();
+ }
+
+ public byte[] getTableName() {
+ return Bytes.toBytes(tableName);
+ }
+
+ public String getTableNameString() {
+ return this.tableName;
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/phoenix/blob/693fa659/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/TableName.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/TableName.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/TableName.java
deleted file mode 100644
index 835e12c..0000000
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/TableName.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.hbase.index;
-
-import org.apache.hadoop.hbase.util.Bytes;
-import org.junit.rules.TestWatcher;
-import org.junit.runner.Description;
-
-/**
- * Returns a {@code byte[]} containing the name of the currently running test method.
- */
-public class TableName extends TestWatcher {
- private String tableName;
-
- /**
- * Invoked when a test is about to start
- */
- @Override
- protected void starting(Description description) {
- tableName = description.getMethodName();
- }
-
- public byte[] getTableName() {
- return Bytes.toBytes(tableName);
- }
-
- public String getTableNameString() {
- return this.tableName;
- }
-}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/phoenix/blob/693fa659/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/parallel/TestThreadPoolBuilder.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/parallel/TestThreadPoolBuilder.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/parallel/TestThreadPoolBuilder.java
index 5ff7b8b..f3a7201 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/parallel/TestThreadPoolBuilder.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/parallel/TestThreadPoolBuilder.java
@@ -20,16 +20,14 @@ package org.apache.phoenix.hbase.index.parallel;
import static org.junit.Assert.*;
import org.apache.hadoop.conf.Configuration;
+import org.apache.phoenix.hbase.index.IndexTableName;
import org.junit.Rule;
import org.junit.Test;
-import org.apache.phoenix.hbase.index.TableName;
-import org.apache.phoenix.hbase.index.parallel.ThreadPoolBuilder;
-
public class TestThreadPoolBuilder {
@Rule
- public TableName name = new TableName();
+ public IndexTableName name = new IndexTableName();
@Test
public void testCoreThreadTimeoutNonZero() {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/693fa659/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/parallel/TestThreadPoolManager.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/parallel/TestThreadPoolManager.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/parallel/TestThreadPoolManager.java
index 24c30ac..8060246 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/parallel/TestThreadPoolManager.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/parallel/TestThreadPoolManager.java
@@ -27,17 +27,14 @@ import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.conf.Configuration;
+import org.apache.phoenix.hbase.index.IndexTableName;
import org.junit.Rule;
import org.junit.Test;
-import org.apache.phoenix.hbase.index.TableName;
-import org.apache.phoenix.hbase.index.parallel.ThreadPoolBuilder;
-import org.apache.phoenix.hbase.index.parallel.ThreadPoolManager;
-
public class TestThreadPoolManager {
@Rule
- public TableName name = new TableName();
+ public IndexTableName name = new IndexTableName();
@Test
public void testShutdownGetsNewThreadPool() throws Exception{
http://git-wip-us.apache.org/repos/asf/phoenix/blob/693fa659/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/util/TestIndexManagementUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/util/TestIndexManagementUtil.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/util/TestIndexManagementUtil.java
index ae3efc6..317d07a 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/util/TestIndexManagementUtil.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/util/TestIndexManagementUtil.java
@@ -23,7 +23,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.regionserver.wal.IndexedHLogReader;
import org.apache.hadoop.hbase.regionserver.wal.IndexedWALEditCodec;
import org.apache.hadoop.hbase.regionserver.wal.WALCellCodec;
@@ -72,11 +72,11 @@ public class TestIndexManagementUtil {
/**
* Create the specified index table with the necessary columns
- * @param admin {@link HBaseAdmin} to use when creating the table
+ * @param admin {@link Admin} to use when creating the table
* @param indexTable name of the index table.
* @throws IOException
*/
- public static void createIndexTable(HBaseAdmin admin, String indexTable) throws IOException {
+ public static void createIndexTable(Admin admin, String indexTable) throws IOException {
createIndexTable(admin, new HTableDescriptor(indexTable));
}
@@ -84,7 +84,7 @@ public class TestIndexManagementUtil {
* @param admin to create the table
* @param index descriptor to update before creating table
*/
- public static void createIndexTable(HBaseAdmin admin, HTableDescriptor index) throws IOException {
+ public static void createIndexTable(Admin admin, HTableDescriptor index) throws IOException {
HColumnDescriptor col =
new HColumnDescriptor(CoveredColumnIndexCodec.INDEX_ROW_COLUMN_FAMILY);
// ensure that we can 'see past' delete markers when doing scans
[3/3] phoenix git commit: PHOENIX-4321 Replace deprecated HBaseAdmin
with Admin
Posted by ss...@apache.org.
PHOENIX-4321 Replace deprecated HBaseAdmin with Admin
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/693fa659
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/693fa659
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/693fa659
Branch: refs/heads/5.x-HBase-2.0
Commit: 693fa6598df41c2cbd8111e465cf98d1b3ba1ec4
Parents: c85e065
Author: Sergey Soldatov <ss...@apache.org>
Authored: Thu Nov 9 13:29:50 2017 -0800
Committer: Sergey Soldatov <ss...@apache.org>
Committed: Tue Nov 14 22:12:35 2017 -0800
----------------------------------------------------------------------
...ReplayWithIndexWritesAndCompressedWALIT.java | 13 +--
.../StatisticsCollectionRunTrackerIT.java | 11 +--
.../phoenix/end2end/AggregateQueryIT.java | 8 +-
.../apache/phoenix/end2end/AlterTableIT.java | 11 ++-
.../end2end/ColumnProjectionOptimizationIT.java | 9 +-
.../apache/phoenix/end2end/CreateSchemaIT.java | 4 +-
.../apache/phoenix/end2end/CreateTableIT.java | 45 ++++-----
.../phoenix/end2end/DisableLocalIndexIT.java | 7 +-
.../apache/phoenix/end2end/DropSchemaIT.java | 4 +-
.../apache/phoenix/end2end/DynamicColumnIT.java | 4 +-
.../phoenix/end2end/FlappingAlterTableIT.java | 12 +--
.../phoenix/end2end/FlappingLocalIndexIT.java | 8 +-
.../phoenix/end2end/LocalIndexSplitMergeIT.java | 6 +-
.../phoenix/end2end/MappingTableDataTypeIT.java | 6 +-
.../end2end/NamespaceSchemaMappingIT.java | 8 +-
.../phoenix/end2end/NativeHBaseTypesIT.java | 5 +-
.../phoenix/end2end/ProductMetricsIT.java | 7 +-
.../end2end/QueryDatabaseMetaDataIT.java | 21 +++--
.../apache/phoenix/end2end/ReverseScanIT.java | 8 --
.../apache/phoenix/end2end/SetPropertyIT.java | 96 +++++++++----------
.../end2end/SkipScanAfterManualSplitIT.java | 11 ++-
.../apache/phoenix/end2end/SkipScanQueryIT.java | 7 +-
.../end2end/TableSnapshotReadsMapReduceIT.java | 8 +-
.../end2end/TenantSpecificTablesDDLIT.java | 4 +-
.../org/apache/phoenix/end2end/UpgradeIT.java | 20 ++--
.../org/apache/phoenix/end2end/UseSchemaIT.java | 3 +-
.../java/org/apache/phoenix/end2end/ViewIT.java | 6 +-
.../phoenix/end2end/index/BaseIndexIT.java | 4 +-
.../phoenix/end2end/index/DropMetadataIT.java | 4 +-
.../phoenix/end2end/index/LocalIndexIT.java | 19 ++--
.../phoenix/end2end/index/MutableIndexIT.java | 10 +-
.../index/MutableIndexReplicationIT.java | 9 +-
.../end2end/index/MutableIndexSplitIT.java | 10 +-
.../end2end/index/PartialIndexRebuilderIT.java | 9 +-
.../end2end/index/txn/MutableRollbackIT.java | 6 +-
.../UpsertSelectOverlappingBatchesIT.java | 6 +-
.../FailForUnsupportedHBaseVersionsIT.java | 4 +-
.../iterate/RoundRobinResultIteratorIT.java | 11 ++-
.../apache/phoenix/rpc/PhoenixServerRpcIT.java | 13 +--
.../phoenix/tx/ParameterizedTransactionIT.java | 10 +-
.../hbase/index/write/RecoveryIndexWriter.java | 10 +-
.../mapreduce/index/IndexScrutinyTool.java | 4 +-
.../phoenix/mapreduce/index/IndexTool.java | 4 +-
.../phoenix/query/ConnectionQueryServices.java | 4 +-
.../query/ConnectionQueryServicesImpl.java | 98 ++++++++++----------
.../query/ConnectionlessQueryServicesImpl.java | 4 +-
.../query/DelegateConnectionQueryServices.java | 4 +-
.../apache/phoenix/schema/MetaDataClient.java | 6 +-
.../org/apache/phoenix/util/UpgradeUtil.java | 57 ++++++------
.../phoenix/hbase/index/IndexTableName.java | 45 +++++++++
.../apache/phoenix/hbase/index/TableName.java | 45 ---------
.../index/parallel/TestThreadPoolBuilder.java | 6 +-
.../index/parallel/TestThreadPoolManager.java | 7 +-
.../index/util/TestIndexManagementUtil.java | 8 +-
.../hbase/index/write/TestIndexWriter.java | 4 +-
.../index/write/TestParalleIndexWriter.java | 4 +-
.../write/TestParalleWriterIndexCommitter.java | 4 +-
.../index/write/TestWALRecoveryCaching.java | 8 +-
.../java/org/apache/phoenix/query/BaseTest.java | 6 +-
.../query/ConnectionQueryServicesImplTest.java | 6 +-
.../java/org/apache/phoenix/util/TestUtil.java | 25 ++---
.../org/apache/phoenix/flume/PhoenixSinkIT.java | 7 +-
.../apache/phoenix/hive/util/PhoenixUtil.java | 4 +-
63 files changed, 422 insertions(+), 415 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/693fa659/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
index 9566e48..67b7df3 100644
--- a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
+++ b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
@@ -38,9 +38,10 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
@@ -55,8 +56,8 @@ import org.apache.hadoop.hbase.wal.WAL;
import org.apache.hadoop.hbase.wal.WALFactory;
import org.apache.hadoop.hbase.wal.WALSplitter;
import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
+import org.apache.phoenix.hbase.index.IndexTableName;
import org.apache.phoenix.hbase.index.IndexTestingUtils;
-import org.apache.phoenix.hbase.index.TableName;
import org.apache.phoenix.hbase.index.covered.ColumnGroup;
import org.apache.phoenix.hbase.index.covered.CoveredColumn;
import org.apache.phoenix.hbase.index.covered.CoveredColumnIndexSpecifierBuilder;
@@ -86,7 +87,7 @@ public class WALReplayWithIndexWritesAndCompressedWALIT {
public static final Log LOG = LogFactory.getLog(WALReplayWithIndexWritesAndCompressedWALIT.class);
@Rule
- public TableName table = new TableName();
+ public IndexTableName table = new IndexTableName();
private String INDEX_TABLE_NAME = table.getTableNameString() + "_INDEX";
final HBaseTestingUtility UTIL = new HBaseTestingUtility();
@@ -236,9 +237,9 @@ public class WALReplayWithIndexWritesAndCompressedWALIT {
assertEquals("Primary region wasn't updated from WAL replay!", 1, result.size());
// cleanup the index table
- HBaseAdmin admin = UTIL.getHBaseAdmin();
- admin.disableTable(INDEX_TABLE_NAME);
- admin.deleteTable(INDEX_TABLE_NAME);
+ Admin admin = UTIL.getHBaseAdmin();
+ admin.disableTable(TableName.valueOf(INDEX_TABLE_NAME));
+ admin.deleteTable(TableName.valueOf(INDEX_TABLE_NAME));
admin.close();
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/693fa659/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/StatisticsCollectionRunTrackerIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/StatisticsCollectionRunTrackerIT.java b/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/StatisticsCollectionRunTrackerIT.java
index cf475f9..27ebec0 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/StatisticsCollectionRunTrackerIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/StatisticsCollectionRunTrackerIT.java
@@ -30,8 +30,7 @@ import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.client.Admin;
import org.apache.phoenix.end2end.ParallelStatsEnabledIT;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.schema.stats.StatisticsCollectionRunTracker;
@@ -137,13 +136,13 @@ public class StatisticsCollectionRunTrackerIT extends ParallelStatsEnabledIT {
}
private HRegionInfo createTableAndGetRegion(String tableName) throws Exception {
- byte[] tableNameBytes = Bytes.toBytes(tableName);
+ TableName tn = TableName.valueOf(tableName);
String ddl = "CREATE TABLE " + tableName + " (PK1 VARCHAR PRIMARY KEY, KV1 VARCHAR)";
try (Connection conn = DriverManager.getConnection(getUrl())) {
conn.createStatement().execute(ddl);
PhoenixConnection phxConn = conn.unwrap(PhoenixConnection.class);
- try (HBaseAdmin admin = phxConn.getQueryServices().getAdmin()) {
- List<HRegionInfo> tableRegions = admin.getTableRegions(tableNameBytes);
+ try (Admin admin = phxConn.getQueryServices().getAdmin()) {
+ List<HRegionInfo> tableRegions = admin.getTableRegions(tn);
return tableRegions.get(0);
}
}
@@ -157,7 +156,7 @@ public class StatisticsCollectionRunTrackerIT extends ParallelStatsEnabledIT {
private void runMajorCompaction(String tableName) throws Exception {
try (PhoenixConnection conn = DriverManager.getConnection(getUrl()).unwrap(PhoenixConnection.class)) {
- try (HBaseAdmin admin = conn.getQueryServices().getAdmin()) {
+ try (Admin admin = conn.getQueryServices().getAdmin()) {
TableName t = TableName.valueOf(tableName);
admin.flush(t);
admin.majorCompact(t);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/693fa659/phoenix-core/src/it/java/org/apache/phoenix/end2end/AggregateQueryIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AggregateQueryIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AggregateQueryIT.java
index 437ee4f..cb892c6 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AggregateQueryIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AggregateQueryIT.java
@@ -37,9 +37,9 @@ import java.util.Properties;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.util.Bytes;
@@ -83,7 +83,7 @@ public class AggregateQueryIT extends BaseQueryIT {
String query = "SELECT a_string, b_string, count(1) FROM " + tableName + " WHERE organization_id=? and entity_id<=? GROUP BY a_string,b_string";
Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
Connection conn = DriverManager.getConnection(getUrl(), props);
- HBaseAdmin admin = null;
+ Admin admin = null;
try {
PreparedStatement statement = conn.prepareStatement(query);
statement.setString(1, tenantId);
@@ -103,7 +103,7 @@ public class AggregateQueryIT extends BaseQueryIT {
assertEquals(1, rs.getLong(3));
assertFalse(rs.next());
- byte[] tableNameBytes = Bytes.toBytes(tableName);
+ TableName tn =TableName.valueOf(tableName);
admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();
Table htable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(tableNameBytes);
Configuration configuration = conn.unwrap(PhoenixConnection.class).getQueryServices().getConfiguration();
@@ -111,7 +111,7 @@ public class AggregateQueryIT extends BaseQueryIT {
((ClusterConnection)hbaseConn).clearRegionCache(TableName.valueOf(tableName));
RegionLocator regionLocator = hbaseConn.getRegionLocator(TableName.valueOf(tableName));
int nRegions = regionLocator.getAllRegionLocations().size();
- admin.split(tableNameBytes, ByteUtil.concat(Bytes.toBytes(tenantId), Bytes.toBytes("00A3")));
+ admin.split(tn, ByteUtil.concat(Bytes.toBytes(tenantId), Bytes.toBytes("00A3")));
int retryCount = 0;
do {
Thread.sleep(2000);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/693fa659/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
index 5265b09..903fddc 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
@@ -48,7 +48,8 @@ import java.util.Properties;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.exception.SQLExceptionCode;
import org.apache.phoenix.jdbc.PhoenixConnection;
@@ -231,8 +232,8 @@ public class AlterTableIT extends ParallelStatsDisabledIT {
try {
conn.createStatement().execute(ddl);
conn.createStatement().execute("ALTER TABLE " + dataTableFullName + " ADD CF.col2 integer CF.IN_MEMORY=true");
- try (HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
- HColumnDescriptor[] columnFamilies = admin.getTableDescriptor(Bytes.toBytes(dataTableFullName)).getColumnFamilies();
+ try (Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
+ HColumnDescriptor[] columnFamilies = admin.getTableDescriptor(TableName.valueOf(dataTableFullName)).getColumnFamilies();
assertEquals(2, columnFamilies.length);
assertEquals("0", columnFamilies[0].getNameAsString());
assertFalse(columnFamilies[0].isInMemory());
@@ -936,8 +937,8 @@ public class AlterTableIT extends ParallelStatsDisabledIT {
assertEquals(3, rs.getShort("KEY_SEQ"));
assertFalse(rs.next());
- try (HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
- HTableDescriptor tableDesc = admin.getTableDescriptor(Bytes.toBytes(dataTableFullName));
+ try (Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
+ HTableDescriptor tableDesc = admin.getTableDescriptor(TableName.valueOf(dataTableFullName));
HColumnDescriptor[] columnFamilies = tableDesc.getColumnFamilies();
assertEquals(2, columnFamilies.length);
assertEquals("0", columnFamilies[0].getNameAsString());
http://git-wip-us.apache.org/repos/asf/phoenix/blob/693fa659/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnProjectionOptimizationIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnProjectionOptimizationIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnProjectionOptimizationIT.java
index 08ecee6..56947bb 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnProjectionOptimizationIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnProjectionOptimizationIT.java
@@ -44,7 +44,8 @@ import java.util.Properties;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.util.Bytes;
@@ -216,7 +217,7 @@ public class ColumnProjectionOptimizationIT extends ParallelStatsDisabledIT {
byte[][] familyNames = new byte[][] { cfB, cfC };
String table = generateUniqueName();
byte[] htableName = SchemaUtil.getTableNameAsBytes("", table);
- HBaseAdmin admin = pconn.getQueryServices().getAdmin();
+ Admin admin = pconn.getQueryServices().getAdmin();
@SuppressWarnings("deprecation")
HTableDescriptor descriptor = new HTableDescriptor(htableName);
@@ -298,8 +299,8 @@ public class ColumnProjectionOptimizationIT extends ParallelStatsDisabledIT {
assertFalse(rs.next());
} finally {
if (htable != null) htable.close();
- admin.disableTable(htableName);
- admin.deleteTable(htableName);
+ admin.disableTable(TableName.valueOf(htableName));
+ admin.deleteTable(TableName.valueOf(htableName));
admin.close();
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/693fa659/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateSchemaIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateSchemaIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateSchemaIT.java
index fe09dcd..a05d702 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateSchemaIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateSchemaIT.java
@@ -26,7 +26,7 @@ import java.sql.DriverManager;
import java.sql.SQLException;
import java.util.Properties;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.Admin;
import org.apache.phoenix.exception.SQLExceptionCode;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.query.QueryServices;
@@ -45,7 +45,7 @@ public class CreateSchemaIT extends ParallelStatsDisabledIT {
String schemaName = generateUniqueName();
String ddl = "CREATE SCHEMA " + schemaName;
try (Connection conn = DriverManager.getConnection(getUrl(), props);
- HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();) {
+ Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();) {
conn.createStatement().execute(ddl);
assertNotNull(admin.getNamespaceDescriptor(schemaName));
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/693fa659/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java
index 1abc653..866bd85 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java
@@ -35,8 +35,9 @@ import java.util.List;
import java.util.Properties;
import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.GlobalPermissionOrBuilder;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.regionserver.BloomType;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.exception.SQLExceptionCode;
@@ -114,10 +115,10 @@ public class CreateTableIT extends ParallelStatsDisabledIT {
try (Connection conn = DriverManager.getConnection(getUrl(), props);) {
conn.createStatement().execute(ddl);
}
- HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), props).getAdmin();
- assertNotNull(admin.getTableDescriptor(Bytes.toBytes(tableName)));
+ Admin admin = driver.getConnectionQueryServices(getUrl(), props).getAdmin();
+ assertNotNull(admin.getTableDescriptor(TableName.valueOf(tableName)));
HColumnDescriptor[] columnFamilies =
- admin.getTableDescriptor(Bytes.toBytes(tableName)).getColumnFamilies();
+ admin.getTableDescriptor(TableName.valueOf(tableName)).getColumnFamilies();
assertEquals(BloomType.NONE, columnFamilies[0].getBloomFilterType());
try (Connection conn = DriverManager.getConnection(getUrl(), props);) {
@@ -136,8 +137,8 @@ public class CreateTableIT extends ParallelStatsDisabledIT {
}
try (Connection conn = DriverManager.getConnection(getUrl(), props);) {
conn.createStatement().execute(ddl);
- assertNotEquals(null, admin.getTableDescriptor(
- SchemaUtil.getPhysicalTableName(tableName.getBytes(), true).getName()));
+ assertNotEquals(null, admin.getTableDescriptor(TableName.valueOf(
+ SchemaUtil.getPhysicalTableName(tableName.getBytes(), true).getName())));
} finally {
admin.close();
}
@@ -185,9 +186,9 @@ public class CreateTableIT extends ParallelStatsDisabledIT {
Properties props = new Properties();
Connection conn = DriverManager.getConnection(getUrl(), props);
conn.createStatement().execute(ddl);
- HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), props).getAdmin();
+ Admin admin = driver.getConnectionQueryServices(getUrl(), props).getAdmin();
HColumnDescriptor[] columnFamilies =
- admin.getTableDescriptor(Bytes.toBytes(tableName)).getColumnFamilies();
+ admin.getTableDescriptor(TableName.valueOf(tableName)).getColumnFamilies();
assertEquals(1, columnFamilies.length);
assertEquals(86400, columnFamilies[0].getTimeToLive());
}
@@ -238,9 +239,9 @@ public class CreateTableIT extends ParallelStatsDisabledIT {
Properties props = new Properties();
Connection conn = DriverManager.getConnection(getUrl(), props);
conn.createStatement().execute(ddl);
- HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), props).getAdmin();
+ Admin admin = driver.getConnectionQueryServices(getUrl(), props).getAdmin();
HColumnDescriptor[] columnFamilies =
- admin.getTableDescriptor(Bytes.toBytes(tableName)).getColumnFamilies();
+ admin.getTableDescriptor(TableName.valueOf(tableName)).getColumnFamilies();
assertEquals(2, columnFamilies.length);
assertEquals(86400, columnFamilies[0].getTimeToLive());
assertEquals("B", columnFamilies[0].getNameAsString());
@@ -264,9 +265,9 @@ public class CreateTableIT extends ParallelStatsDisabledIT {
Properties props = new Properties();
Connection conn = DriverManager.getConnection(getUrl(), props);
conn.createStatement().execute(ddl);
- HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), props).getAdmin();
+ Admin admin = driver.getConnectionQueryServices(getUrl(), props).getAdmin();
HColumnDescriptor[] columnFamilies =
- admin.getTableDescriptor(Bytes.toBytes(tableName)).getColumnFamilies();
+ admin.getTableDescriptor(TableName.valueOf(tableName)).getColumnFamilies();
assertEquals(2, columnFamilies.length);
assertEquals("0", columnFamilies[0].getNameAsString());
assertEquals(86400, columnFamilies[0].getTimeToLive());
@@ -292,9 +293,9 @@ public class CreateTableIT extends ParallelStatsDisabledIT {
Properties props = new Properties();
Connection conn = DriverManager.getConnection(getUrl(), props);
conn.createStatement().execute(ddl);
- HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), props).getAdmin();
+ Admin admin = driver.getConnectionQueryServices(getUrl(), props).getAdmin();
HColumnDescriptor[] columnFamilies =
- admin.getTableDescriptor(Bytes.toBytes(tableName)).getColumnFamilies();
+ admin.getTableDescriptor(TableName.valueOf(tableName)).getColumnFamilies();
assertEquals(2, columnFamilies.length);
assertEquals("0", columnFamilies[0].getNameAsString());
assertEquals(DEFAULT_REPLICATION_SCOPE, columnFamilies[0].getScope());
@@ -319,9 +320,9 @@ public class CreateTableIT extends ParallelStatsDisabledIT {
Properties props = new Properties();
Connection conn = DriverManager.getConnection(getUrl(), props);
conn.createStatement().execute(ddl);
- HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), props).getAdmin();
+ Admin admin = driver.getConnectionQueryServices(getUrl(), props).getAdmin();
HColumnDescriptor[] columnFamilies =
- admin.getTableDescriptor(Bytes.toBytes(tableName)).getColumnFamilies();
+ admin.getTableDescriptor(TableName.valueOf(tableName)).getColumnFamilies();
assertEquals(2, columnFamilies.length);
assertEquals("B", columnFamilies[0].getNameAsString());
assertEquals(0, columnFamilies[0].getScope());
@@ -344,9 +345,9 @@ public class CreateTableIT extends ParallelStatsDisabledIT {
Properties props = new Properties();
Connection conn = DriverManager.getConnection(getUrl(), props);
conn.createStatement().execute(ddl);
- HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), props).getAdmin();
+ Admin admin = driver.getConnectionQueryServices(getUrl(), props).getAdmin();
HColumnDescriptor[] columnFamilies =
- admin.getTableDescriptor(Bytes.toBytes(tableName)).getColumnFamilies();
+ admin.getTableDescriptor(TableName.valueOf(tableName)).getColumnFamilies();
assertEquals(1, columnFamilies.length);
assertEquals("a", columnFamilies[0].getNameAsString());
assertEquals(10000, columnFamilies[0].getTimeToLive());
@@ -366,9 +367,9 @@ public class CreateTableIT extends ParallelStatsDisabledIT {
Properties props = new Properties();
Connection conn = DriverManager.getConnection(getUrl(), props);
conn.createStatement().execute(ddl);
- HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), props).getAdmin();
+ Admin admin = driver.getConnectionQueryServices(getUrl(), props).getAdmin();
HColumnDescriptor[] columnFamilies =
- admin.getTableDescriptor(Bytes.toBytes(tableName)).getColumnFamilies();
+ admin.getTableDescriptor(TableName.valueOf(tableName)).getColumnFamilies();
assertEquals(1, columnFamilies.length);
assertEquals("a", columnFamilies[0].getNameAsString());
assertEquals(10000, columnFamilies[0].getTimeToLive());
@@ -385,9 +386,9 @@ public class CreateTableIT extends ParallelStatsDisabledIT {
Properties props = new Properties();
Connection conn = DriverManager.getConnection(getUrl(), props);
conn.createStatement().execute(ddl);
- HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), props).getAdmin();
+ Admin admin = driver.getConnectionQueryServices(getUrl(), props).getAdmin();
HColumnDescriptor[] columnFamilies =
- admin.getTableDescriptor(Bytes.toBytes(tableName)).getColumnFamilies();
+ admin.getTableDescriptor(TableName.valueOf(tableName)).getColumnFamilies();
assertEquals(BloomType.ROW, columnFamilies[0].getBloomFilterType());
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/693fa659/phoenix-core/src/it/java/org/apache/phoenix/end2end/DisableLocalIndexIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DisableLocalIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DisableLocalIndexIT.java
index 01fc24c..e28c3a7 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DisableLocalIndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DisableLocalIndexIT.java
@@ -26,7 +26,8 @@ import java.sql.DriverManager;
import java.sql.SQLException;
import java.util.Properties;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.exception.SQLExceptionCode;
@@ -54,8 +55,8 @@ public class DisableLocalIndexIT extends ParallelStatsDisabledIT {
conn.createStatement().execute("CREATE TABLE " + tableName + " (k1 VARCHAR NOT NULL, k2 VARCHAR, CONSTRAINT PK PRIMARY KEY(K1,K2)) MULTI_TENANT=true");
conn.createStatement().execute("UPSERT INTO " + tableName + " VALUES('t1','x')");
conn.createStatement().execute("UPSERT INTO " + tableName + " VALUES('t2','y')");
- HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();
- assertFalse(admin.tableExists(Bytes.toBytes(MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + tableName)));
+ Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();
+ assertFalse(admin.tableExists(TableName.valueOf(MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + tableName)));
admin.close();
try {
Table t = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + tableName));
http://git-wip-us.apache.org/repos/asf/phoenix/blob/693fa659/phoenix-core/src/it/java/org/apache/phoenix/end2end/DropSchemaIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DropSchemaIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DropSchemaIT.java
index 5c5420c..97ab29a 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DropSchemaIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DropSchemaIT.java
@@ -31,7 +31,7 @@ import java.util.Properties;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.NamespaceNotFoundException;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.Admin;
import org.apache.phoenix.exception.SQLExceptionCode;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.query.QueryServices;
@@ -77,7 +77,7 @@ public class DropSchemaIT extends BaseUniqueNamesOwnClusterIT {
String normalizeSchemaIdentifier = SchemaUtil.normalizeIdentifier(schema);
String ddl = "DROP SCHEMA " + schema;
try (Connection conn = DriverManager.getConnection(getUrl(), props);
- HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
+ Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
try {
conn.createStatement().execute(ddl);
fail();
http://git-wip-us.apache.org/repos/asf/phoenix/blob/693fa659/phoenix-core/src/it/java/org/apache/phoenix/end2end/DynamicColumnIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DynamicColumnIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DynamicColumnIT.java
index 714f80a..6a53906 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DynamicColumnIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DynamicColumnIT.java
@@ -34,7 +34,7 @@ import java.util.Properties;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Row;
import org.apache.hadoop.hbase.client.Table;
@@ -68,7 +68,7 @@ public class DynamicColumnIT extends ParallelStatsDisabledIT {
tableName = generateUniqueName();
try (PhoenixConnection pconn = DriverManager.getConnection(getUrl()).unwrap(PhoenixConnection.class)) {
ConnectionQueryServices services = pconn.getQueryServices();
- try (HBaseAdmin admin = services.getAdmin()) {
+ try (Admin admin = services.getAdmin()) {
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
htd.addFamily(new HColumnDescriptor(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES));
htd.addFamily(new HColumnDescriptor(FAMILY_NAME_A));
http://git-wip-us.apache.org/repos/asf/phoenix/blob/693fa659/phoenix-core/src/it/java/org/apache/phoenix/end2end/FlappingAlterTableIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/FlappingAlterTableIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/FlappingAlterTableIT.java
index e090b98..0e0e555 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/FlappingAlterTableIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/FlappingAlterTableIT.java
@@ -26,8 +26,8 @@ import java.util.Properties;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.util.PropertiesUtil;
import org.apache.phoenix.util.SchemaUtil;
@@ -58,8 +58,8 @@ public class FlappingAlterTableIT extends ParallelStatsDisabledIT {
conn1.createStatement().execute(ddl);
ddl = "ALTER TABLE " + dataTableFullName + " ADD CF.STRING VARCHAR";
conn1.createStatement().execute(ddl);
- try (HBaseAdmin admin = conn1.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
- HColumnDescriptor[] columnFamilies = admin.getTableDescriptor(Bytes.toBytes(dataTableFullName)).getColumnFamilies();
+ try (Admin admin = conn1.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
+ HColumnDescriptor[] columnFamilies = admin.getTableDescriptor(TableName.valueOf(dataTableFullName)).getColumnFamilies();
assertEquals(2, columnFamilies.length);
assertEquals("0", columnFamilies[0].getNameAsString());
assertEquals(HColumnDescriptor.DEFAULT_TTL, columnFamilies[0].getTimeToLive());
@@ -82,8 +82,8 @@ public class FlappingAlterTableIT extends ParallelStatsDisabledIT {
conn1.createStatement().execute(ddl);
ddl = "ALTER TABLE " + dataTableFullName + " ADD CF.STRING VARCHAR";
conn1.createStatement().execute(ddl);
- try (HBaseAdmin admin = conn1.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
- HTableDescriptor tableDesc = admin.getTableDescriptor(Bytes.toBytes(dataTableFullName));
+ try (Admin admin = conn1.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
+ HTableDescriptor tableDesc = admin.getTableDescriptor(TableName.valueOf(dataTableFullName));
HColumnDescriptor[] columnFamilies = tableDesc.getColumnFamilies();
assertEquals(2, columnFamilies.length);
assertEquals("0", columnFamilies[0].getNameAsString());
http://git-wip-us.apache.org/repos/asf/phoenix/blob/693fa659/phoenix-core/src/it/java/org/apache/phoenix/end2end/FlappingLocalIndexIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/FlappingLocalIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/FlappingLocalIndexIT.java
index 0d64be0..517cd6a 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/FlappingLocalIndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/FlappingLocalIndexIT.java
@@ -31,7 +31,7 @@ import java.util.concurrent.CountDownLatch;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
@@ -151,7 +151,7 @@ public class FlappingLocalIndexIT extends BaseLocalIndexIT {
ResultSet rs = conn1.createStatement().executeQuery("SELECT COUNT(*) FROM " + indexTableName);
assertTrue(rs.next());
- HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin();
+ Admin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin();
int numRegions = admin.getTableRegions(physicalTableName).size();
String query = "SELECT * FROM " + tableName +" where v1 like 'a%'";
@@ -285,7 +285,7 @@ public class FlappingLocalIndexIT extends BaseLocalIndexIT {
ResultSet rs = conn1.createStatement().executeQuery("SELECT COUNT(*) FROM " + indexTableName);
assertTrue(rs.next());
assertEquals(4, rs.getInt(1));
- HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin();
+ Admin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin();
org.apache.hadoop.hbase.client.Connection hbaseConn = admin.getConnection();
Table indexTable = hbaseConn.getTable(TableName.valueOf(indexPhysicalTableName));
Pair<byte[][], byte[][]> startEndKeys = hbaseConn.getRegionLocator(TableName.valueOf(indexPhysicalTableName)).getStartEndKeys();
@@ -330,7 +330,7 @@ public class FlappingLocalIndexIT extends BaseLocalIndexIT {
conn1.createStatement().execute("UPSERT INTO "+tableName+" values('j',2,4,2,'a')");
conn1.createStatement().execute("UPSERT INTO "+tableName+" values('q',3,1,1,'c')");
conn1.commit();
- HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin();
+ Admin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin();
HTableDescriptor tableDescriptor = admin.getTableDescriptor(physicalTableName);
tableDescriptor.addCoprocessor(DeleyOpenRegionObserver.class.getName(), null,
QueryServicesOptions.DEFAULT_COPROCESSOR_PRIORITY - 1, null);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/693fa659/phoenix-core/src/it/java/org/apache/phoenix/end2end/LocalIndexSplitMergeIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/LocalIndexSplitMergeIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/LocalIndexSplitMergeIT.java
index 409e98f..0781097 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/LocalIndexSplitMergeIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/LocalIndexSplitMergeIT.java
@@ -32,7 +32,7 @@ import java.util.Properties;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.query.BaseTest;
@@ -111,7 +111,7 @@ public class LocalIndexSplitMergeIT extends BaseTest {
ResultSet rs = conn1.createStatement().executeQuery("SELECT * FROM " + tableName);
assertTrue(rs.next());
- HBaseAdmin admin = conn1.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();
+ Admin admin = conn1.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();
for (int i = 1; i < 5; i++) {
admin.split(physicalTableName, ByteUtil.concat(Bytes.toBytes(strings[3 * i])));
List<HRegionInfo> regionsOfUserTable =
@@ -212,7 +212,7 @@ public class LocalIndexSplitMergeIT extends BaseTest {
ResultSet rs = conn1.createStatement().executeQuery("SELECT * FROM " + tableName);
assertTrue(rs.next());
- HBaseAdmin admin = conn1.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();
+ Admin admin = conn1.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();
List<HRegionInfo> regionsOfUserTable =
MetaTableAccessor.getTableRegions(getUtility().getZooKeeperWatcher(),
admin.getConnection(), physicalTableName, false);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/693fa659/phoenix-core/src/it/java/org/apache/phoenix/end2end/MappingTableDataTypeIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MappingTableDataTypeIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MappingTableDataTypeIT.java
index fb78e1c..e8a4f80 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MappingTableDataTypeIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MappingTableDataTypeIT.java
@@ -37,7 +37,7 @@ import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
@@ -57,7 +57,7 @@ public class MappingTableDataTypeIT extends ParallelStatsDisabledIT {
Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
PhoenixConnection conn = DriverManager.getConnection(getUrl(), props).unwrap(PhoenixConnection.class);
- HBaseAdmin admin = conn.getQueryServices().getAdmin();
+ Admin admin = conn.getQueryServices().getAdmin();
try {
// Create table then get the single region for our new table.
HTableDescriptor descriptor = new HTableDescriptor(tableName);
@@ -104,7 +104,7 @@ public class MappingTableDataTypeIT extends ParallelStatsDisabledIT {
}
}
- private void insertData(final byte[] tableName, HBaseAdmin admin, Table t) throws IOException,
+ private void insertData(final byte[] tableName, Admin admin, Table t) throws IOException,
InterruptedException {
Put p = new Put(Bytes.toBytes("row"));
p.addColumn(Bytes.toBytes("cf1"), Bytes.toBytes("q1"), Bytes.toBytes("value1"));
http://git-wip-us.apache.org/repos/asf/phoenix/blob/693fa659/phoenix-core/src/it/java/org/apache/phoenix/end2end/NamespaceSchemaMappingIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/NamespaceSchemaMappingIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/NamespaceSchemaMappingIT.java
index d9a27f5..b0c681e 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/NamespaceSchemaMappingIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/NamespaceSchemaMappingIT.java
@@ -29,7 +29,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Table;
import org.apache.phoenix.jdbc.PhoenixConnection;
@@ -61,7 +61,7 @@ public class NamespaceSchemaMappingIT extends ParallelStatsDisabledIT {
String phoenixFullTableName = schemaName + "." + tableName;
String hbaseFullTableName = schemaName + ":" + tableName;
- HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin();
+ Admin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin();
admin.createNamespace(NamespaceDescriptor.create(namespace).build());
admin.createTable(new HTableDescriptor(TableName.valueOf(namespace, tableName))
.addFamily(new HColumnDescriptor(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES)));
@@ -106,8 +106,8 @@ public class NamespaceSchemaMappingIT extends ParallelStatsDisabledIT {
rs = conn.createStatement().executeQuery(query);
assertTrue(rs.next());
assertEquals(hbaseFullTableName, rs.getString(1));
- admin.disableTable(phoenixFullTableName);
- admin.deleteTable(phoenixFullTableName);
+ admin.disableTable(TableName.valueOf(phoenixFullTableName));
+ admin.deleteTable(TableName.valueOf(phoenixFullTableName));
conn.createStatement().execute("DROP TABLE " + phoenixFullTableName);
admin.close();
conn.close();
http://git-wip-us.apache.org/repos/asf/phoenix/blob/693fa659/phoenix-core/src/it/java/org/apache/phoenix/end2end/NativeHBaseTypesIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/NativeHBaseTypesIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/NativeHBaseTypesIT.java
index 3b17ad1..5ece0bd 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/NativeHBaseTypesIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/NativeHBaseTypesIT.java
@@ -35,9 +35,10 @@ import java.util.Properties;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Row;
@@ -72,7 +73,7 @@ public class NativeHBaseTypesIT extends ParallelStatsDisabledIT {
final byte[] tableBytes = tableName.getBytes();
final byte[] familyName = Bytes.toBytes(SchemaUtil.normalizeIdentifier("1"));
final byte[][] splits = new byte[][] {Bytes.toBytes(20), Bytes.toBytes(30)};
- HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).getAdmin();
+ Admin admin = driver.getConnectionQueryServices(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).getAdmin();
try {
HTableDescriptor descriptor = new HTableDescriptor(tableBytes);
HColumnDescriptor columnDescriptor = new HColumnDescriptor(familyName);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/693fa659/phoenix-core/src/it/java/org/apache/phoenix/end2end/ProductMetricsIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ProductMetricsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ProductMetricsIT.java
index 858a0fd..e673397 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ProductMetricsIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ProductMetricsIT.java
@@ -36,7 +36,8 @@ import java.util.Collections;
import java.util.List;
import java.util.Properties;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.query.QueryConstants;
@@ -1522,11 +1523,11 @@ public class ProductMetricsIT extends ParallelStatsDisabledIT {
Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
Connection conn = DriverManager.getConnection(getUrl(), props);
- HBaseAdmin admin = null;
+ Admin admin = null;
try {
initTableValues(tablename, tenantId, getSplits(tenantId));
admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();
- admin.flush(SchemaUtil.getTableNameAsBytes(PRODUCT_METRICS_SCHEMA_NAME,tablename));
+ admin.flush(TableName.valueOf(SchemaUtil.getTableNameAsBytes(PRODUCT_METRICS_SCHEMA_NAME,tablename)));
String query = "SELECT SUM(TRANSACTIONS) FROM " + tablename + " WHERE FEATURE=?";
PreparedStatement statement = conn.prepareStatement(query);
statement.setString(1, F1);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/693fa659/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryDatabaseMetaDataIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryDatabaseMetaDataIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryDatabaseMetaDataIT.java
index af5a52a..c65ca63 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryDatabaseMetaDataIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryDatabaseMetaDataIT.java
@@ -47,7 +47,8 @@ import java.util.Properties;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
@@ -707,11 +708,11 @@ public class QueryDatabaseMetaDataIT extends ParallelStatsDisabledIT {
byte[] cfC = Bytes.toBytes("c");
byte[][] familyNames = new byte[][] { cfB, cfC };
byte[] htableName = SchemaUtil.getTableNameAsBytes(schemaName, tableName);
- HBaseAdmin admin = pconn.getQueryServices().getAdmin();
+ Admin admin = pconn.getQueryServices().getAdmin();
try {
- admin.disableTable(htableName);
- admin.deleteTable(htableName);
- admin.enableTable(htableName);
+ admin.disableTable(TableName.valueOf(htableName));
+ admin.deleteTable(TableName.valueOf(htableName));
+ admin.enableTable(TableName.valueOf(htableName));
} catch (org.apache.hadoop.hbase.TableNotFoundException e) {
}
@@ -725,7 +726,7 @@ public class QueryDatabaseMetaDataIT extends ParallelStatsDisabledIT {
createMDTestTable(pconn, tableName,
"a." + HColumnDescriptor.KEEP_DELETED_CELLS + "=" + Boolean.TRUE);
- descriptor = admin.getTableDescriptor(htableName);
+ descriptor = admin.getTableDescriptor(TableName.valueOf(htableName));
assertEquals(3, descriptor.getColumnFamilies().length);
HColumnDescriptor cdA = descriptor.getFamily(cfA);
assertNotEquals(HColumnDescriptor.DEFAULT_KEEP_DELETED, cdA.getKeepDeletedCells());
@@ -786,10 +787,10 @@ public class QueryDatabaseMetaDataIT extends ParallelStatsDisabledIT {
byte[] cfC = Bytes.toBytes("c");
byte[][] familyNames = new byte[][] { cfB, cfC };
byte[] htableName = SchemaUtil.getTableNameAsBytes(schemaName, tableName);
- try (HBaseAdmin admin = pconn.getQueryServices().getAdmin()) {
+ try (Admin admin = pconn.getQueryServices().getAdmin()) {
try {
- admin.disableTable(htableName);
- admin.deleteTable(htableName);
+ admin.disableTable(TableName.valueOf(htableName));
+ admin.deleteTable(TableName.valueOf(htableName));
} catch (org.apache.hadoop.hbase.TableNotFoundException e) {
}
@@ -865,7 +866,7 @@ public class QueryDatabaseMetaDataIT extends ParallelStatsDisabledIT {
Table htable =
pconn.getQueryServices()
- .getTable(SchemaUtil.getTableNameAsBytes(schemaName, tableName));
+ .getTable(TableName.valueOf(SchemaUtil.getTableNameAsBytes(schemaName, tableName)));
Put put = new Put(Bytes.toBytes("0"));
put.addColumn(cfB, Bytes.toBytes("COL1"), PInteger.INSTANCE.toBytes(1));
put.addColumn(cfC, Bytes.toBytes("COL2"), PLong.INSTANCE.toBytes(2));
http://git-wip-us.apache.org/repos/asf/phoenix/blob/693fa659/phoenix-core/src/it/java/org/apache/phoenix/end2end/ReverseScanIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ReverseScanIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ReverseScanIT.java
index f172d00..8ea1876 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ReverseScanIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ReverseScanIT.java
@@ -35,21 +35,13 @@ import java.sql.DriverManager;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.Statement;
-import java.util.Map;
import java.util.Properties;
-import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.util.PropertiesUtil;
import org.apache.phoenix.util.QueryUtil;
-import org.apache.phoenix.util.ReadOnlyProps;
-import org.apache.phoenix.util.TestUtil;
-import org.junit.BeforeClass;
import org.junit.Test;
-import com.google.common.collect.Maps;
public class ReverseScanIT extends ParallelStatsDisabledIT {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/693fa659/phoenix-core/src/it/java/org/apache/phoenix/end2end/SetPropertyIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SetPropertyIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SetPropertyIT.java
index 7a7576d..d785063 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SetPropertyIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SetPropertyIT.java
@@ -32,8 +32,8 @@ import java.util.Properties;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeepDeletedCells;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
import org.apache.phoenix.exception.SQLExceptionCode;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.schema.PTable;
@@ -94,8 +94,8 @@ public abstract class SetPropertyIT extends ParallelStatsDisabledIT {
conn1.createStatement().execute(ddl);
ddl = "ALTER TABLE " + dataTableFullName + " SET REPLICATION_SCOPE=1";
conn1.createStatement().execute(ddl);
- try (HBaseAdmin admin = conn1.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
- HColumnDescriptor[] columnFamilies = admin.getTableDescriptor(Bytes.toBytes(dataTableFullName))
+ try (Admin admin = conn1.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
+ HColumnDescriptor[] columnFamilies = admin.getTableDescriptor(TableName.valueOf(dataTableFullName))
.getColumnFamilies();
assertEquals(1, columnFamilies.length);
assertEquals("0", columnFamilies[0].getNameAsString());
@@ -117,8 +117,8 @@ public abstract class SetPropertyIT extends ParallelStatsDisabledIT {
conn1.createStatement().execute(ddl);
ddl = "ALTER TABLE " + dataTableFullName + " SET COMPACTION_ENABLED=FALSE";
conn1.createStatement().execute(ddl);
- try (HBaseAdmin admin = conn1.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
- HTableDescriptor tableDesc = admin.getTableDescriptor(Bytes.toBytes(dataTableFullName));
+ try (Admin admin = conn1.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
+ HTableDescriptor tableDesc = admin.getTableDescriptor(TableName.valueOf(dataTableFullName));
assertEquals(1, tableDesc.getColumnFamilies().length);
assertEquals("0", tableDesc.getColumnFamilies()[0].getNameAsString());
assertEquals(Boolean.toString(false), tableDesc.getValue(HTableDescriptor.COMPACTION_ENABLED));
@@ -139,8 +139,8 @@ public abstract class SetPropertyIT extends ParallelStatsDisabledIT {
conn1.createStatement().execute(ddl);
ddl = "ALTER TABLE " + dataTableFullName + " SET COMPACTION_ENABLED = FALSE, REPLICATION_SCOPE = 1";
conn1.createStatement().execute(ddl);
- try (HBaseAdmin admin = conn1.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
- HTableDescriptor tableDesc = admin.getTableDescriptor(Bytes.toBytes(dataTableFullName));
+ try (Admin admin = conn1.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
+ HTableDescriptor tableDesc = admin.getTableDescriptor(TableName.valueOf(dataTableFullName));
HColumnDescriptor[] columnFamilies = tableDesc.getColumnFamilies();
assertEquals(1, columnFamilies.length);
assertEquals("0", columnFamilies[0].getNameAsString());
@@ -168,8 +168,8 @@ public abstract class SetPropertyIT extends ParallelStatsDisabledIT {
ddl = "ALTER TABLE " + dataTableFullName + " SET COMPACTION_ENABLED = FALSE, CF1.MIN_VERSIONS = 1, CF2.MIN_VERSIONS = 3, MIN_VERSIONS = 8, CF1.KEEP_DELETED_CELLS = true, KEEP_DELETED_CELLS = false";
conn.createStatement().execute(ddl);
- try (HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
- HTableDescriptor tableDesc = admin.getTableDescriptor(Bytes.toBytes(dataTableFullName));
+ try (Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
+ HTableDescriptor tableDesc = admin.getTableDescriptor(TableName.valueOf(dataTableFullName));
HColumnDescriptor[] columnFamilies = tableDesc.getColumnFamilies();
assertEquals(3, columnFamilies.length);
@@ -387,8 +387,8 @@ public abstract class SetPropertyIT extends ParallelStatsDisabledIT {
+ " SET COMPACTION_ENABLED = FALSE, CF.REPLICATION_SCOPE=1, IMMUTABLE_ROWS = TRUE, TTL=1000";
conn.createStatement().execute(ddl);
assertImmutableRows(conn, dataTableFullName, true);
- try (HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
- HTableDescriptor tableDesc = admin.getTableDescriptor(Bytes.toBytes(dataTableFullName));
+ try (Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
+ HTableDescriptor tableDesc = admin.getTableDescriptor(TableName.valueOf(dataTableFullName));
HColumnDescriptor[] columnFamilies = tableDesc.getColumnFamilies();
assertEquals(2, columnFamilies.length);
assertEquals("CF", columnFamilies[0].getNameAsString());
@@ -418,8 +418,8 @@ public abstract class SetPropertyIT extends ParallelStatsDisabledIT {
conn.createStatement().execute(ddl);
conn.createStatement().execute(
"ALTER TABLE " + dataTableFullName + " ADD CF.col3 integer CF.IN_MEMORY=true");
- try (HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
- HColumnDescriptor[] columnFamilies = admin.getTableDescriptor(Bytes.toBytes(dataTableFullName))
+ try (Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
+ HColumnDescriptor[] columnFamilies = admin.getTableDescriptor(TableName.valueOf(dataTableFullName))
.getColumnFamilies();
assertEquals(2, columnFamilies.length);
assertEquals("0", columnFamilies[0].getNameAsString());
@@ -447,8 +447,8 @@ public abstract class SetPropertyIT extends ParallelStatsDisabledIT {
"ALTER TABLE "
+ dataTableFullName
+ " ADD col4 integer, CF1.col5 integer, CF2.col6 integer IN_MEMORY=true, CF1.REPLICATION_SCOPE=1, CF2.IN_MEMORY=false ");
- try (HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
- HColumnDescriptor[] columnFamilies = admin.getTableDescriptor(Bytes.toBytes(dataTableFullName))
+ try (Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
+ HColumnDescriptor[] columnFamilies = admin.getTableDescriptor(TableName.valueOf(dataTableFullName))
.getColumnFamilies();
assertEquals(3, columnFamilies.length);
assertEquals("0", columnFamilies[0].getNameAsString());
@@ -481,8 +481,8 @@ public abstract class SetPropertyIT extends ParallelStatsDisabledIT {
"ALTER TABLE "
+ dataTableFullName
+ " ADD col4 integer, CF1.col5 integer, CF2.col6 integer IN_MEMORY=true, CF1.REPLICATION_SCOPE=1, CF2.IN_MEMORY=false, XYZ.REPLICATION_SCOPE=1 ");
- try (HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
- HColumnDescriptor[] columnFamilies = admin.getTableDescriptor(Bytes.toBytes(dataTableFullName))
+ try (Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
+ HColumnDescriptor[] columnFamilies = admin.getTableDescriptor(TableName.valueOf(dataTableFullName))
.getColumnFamilies();
assertEquals(3, columnFamilies.length);
assertEquals("CF1", columnFamilies[0].getNameAsString());
@@ -538,8 +538,8 @@ public abstract class SetPropertyIT extends ParallelStatsDisabledIT {
"ALTER TABLE "
+ dataTableFullName
+ " ADD col4 integer, CF1.col5 integer, CF2.col6 integer, CF3.col7 integer CF1.REPLICATION_SCOPE=1, CF1.IN_MEMORY=false, IN_MEMORY=true ");
- try (HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
- HColumnDescriptor[] columnFamilies = admin.getTableDescriptor(Bytes.toBytes(dataTableFullName))
+ try (Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
+ HColumnDescriptor[] columnFamilies = admin.getTableDescriptor(TableName.valueOf(dataTableFullName))
.getColumnFamilies();
assertEquals(4, columnFamilies.length);
assertEquals("CF1", columnFamilies[0].getNameAsString());
@@ -574,8 +574,8 @@ public abstract class SetPropertyIT extends ParallelStatsDisabledIT {
"ALTER TABLE " + dataTableFullName + " ADD col4 integer XYZ.REPLICATION_SCOPE=1 ");
conn.createStatement()
.execute("ALTER TABLE " + dataTableFullName + " ADD XYZ.col5 integer IN_MEMORY=true ");
- try (HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
- HColumnDescriptor[] columnFamilies = admin.getTableDescriptor(Bytes.toBytes(dataTableFullName))
+ try (Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
+ HColumnDescriptor[] columnFamilies = admin.getTableDescriptor(TableName.valueOf(dataTableFullName))
.getColumnFamilies();
assertEquals(2, columnFamilies.length);
assertEquals("CF1", columnFamilies[0].getNameAsString());
@@ -601,8 +601,8 @@ public abstract class SetPropertyIT extends ParallelStatsDisabledIT {
try {
conn.createStatement().execute(ddl);
conn.createStatement().execute("ALTER TABLE " + dataTableFullName + " ADD col2 integer IN_MEMORY=true");
- try (HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
- HColumnDescriptor[] columnFamilies = admin.getTableDescriptor(Bytes.toBytes(dataTableFullName))
+ try (Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
+ HColumnDescriptor[] columnFamilies = admin.getTableDescriptor(TableName.valueOf(dataTableFullName))
.getColumnFamilies();
assertEquals(1, columnFamilies.length);
assertEquals("0", columnFamilies[0].getNameAsString());
@@ -631,8 +631,8 @@ public abstract class SetPropertyIT extends ParallelStatsDisabledIT {
String ddl = "Alter table " + dataTableFullName + " add cf3.col5 integer, cf4.col6 integer in_memory=true";
conn.createStatement().execute(ddl);
- try (HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
- HTableDescriptor tableDesc = admin.getTableDescriptor(Bytes.toBytes(dataTableFullName));
+ try (Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
+ HTableDescriptor tableDesc = admin.getTableDescriptor(TableName.valueOf(dataTableFullName));
assertTrue(tableDesc.isCompactionEnabled());
HColumnDescriptor[] columnFamilies = tableDesc.getColumnFamilies();
assertEquals(5, columnFamilies.length);
@@ -670,8 +670,8 @@ public abstract class SetPropertyIT extends ParallelStatsDisabledIT {
String ddl = "Alter table " + dataTableFullName + " add cf1.col5 integer in_memory=true";
conn.createStatement().execute(ddl);
- try (HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
- HTableDescriptor tableDesc = admin.getTableDescriptor(Bytes.toBytes(dataTableFullName));
+ try (Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
+ HTableDescriptor tableDesc = admin.getTableDescriptor(TableName.valueOf(dataTableFullName));
assertTrue(tableDesc.isCompactionEnabled());
HColumnDescriptor[] columnFamilies = tableDesc.getColumnFamilies();
assertEquals(3, columnFamilies.length);
@@ -747,8 +747,8 @@ public abstract class SetPropertyIT extends ParallelStatsDisabledIT {
+ " CONSTRAINT NAME_PK PRIMARY KEY (id, col1, col2)"
+ " ) " + generateDDLOptions("TTL=86400, SALT_BUCKETS = 4, DEFAULT_COLUMN_FAMILY='XYZ'");
conn.createStatement().execute(ddl);
- try (HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
- HTableDescriptor tableDesc = admin.getTableDescriptor(Bytes.toBytes(dataTableFullName));
+ try (Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
+ HTableDescriptor tableDesc = admin.getTableDescriptor(TableName.valueOf(dataTableFullName));
HColumnDescriptor[] columnFamilies = tableDesc.getColumnFamilies();
assertEquals(1, columnFamilies.length);
assertEquals("XYZ", columnFamilies[0].getNameAsString());
@@ -757,8 +757,8 @@ public abstract class SetPropertyIT extends ParallelStatsDisabledIT {
ddl = "ALTER TABLE " + dataTableFullName + " SET TTL=30";
conn.createStatement().execute(ddl);
conn.commit();
- try (HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
- HTableDescriptor tableDesc = admin.getTableDescriptor(Bytes.toBytes(dataTableFullName));
+ try (Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
+ HTableDescriptor tableDesc = admin.getTableDescriptor(TableName.valueOf(dataTableFullName));
HColumnDescriptor[] columnFamilies = tableDesc.getColumnFamilies();
assertEquals(1, columnFamilies.length);
assertEquals(30, columnFamilies[0].getTimeToLive());
@@ -785,8 +785,8 @@ public abstract class SetPropertyIT extends ParallelStatsDisabledIT {
ddl = "ALTER TABLE " + dataTableFullName + " SET IN_MEMORY=true";
conn.createStatement().execute(ddl);
conn.commit();
- try (HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
- HTableDescriptor tableDesc = admin.getTableDescriptor(Bytes.toBytes(dataTableFullName));
+ try (Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
+ HTableDescriptor tableDesc = admin.getTableDescriptor(TableName.valueOf(dataTableFullName));
HColumnDescriptor[] columnFamilies = tableDesc.getColumnFamilies();
assertEquals(1, columnFamilies.length);
assertEquals(true, columnFamilies[0].isInMemory());
@@ -813,8 +813,8 @@ public abstract class SetPropertyIT extends ParallelStatsDisabledIT {
ddl = "ALTER TABLE " + dataTableFullName + " SET IN_MEMORY=true";
conn.createStatement().execute(ddl);
conn.commit();
- try (HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
- HTableDescriptor tableDesc = admin.getTableDescriptor(Bytes.toBytes(dataTableFullName));
+ try (Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
+ HTableDescriptor tableDesc = admin.getTableDescriptor(TableName.valueOf(dataTableFullName));
HColumnDescriptor[] columnFamilies = tableDesc.getColumnFamilies();
assertEquals(1, columnFamilies.length);
assertEquals(true, columnFamilies[0].isInMemory());
@@ -841,8 +841,8 @@ public abstract class SetPropertyIT extends ParallelStatsDisabledIT {
ddl = "ALTER TABLE " + dataTableFullName + " ADD COL3 INTEGER IN_MEMORY=true";
conn.createStatement().execute(ddl);
conn.commit();
- try (HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
- HTableDescriptor tableDesc = admin.getTableDescriptor(Bytes.toBytes(dataTableFullName));
+ try (Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
+ HTableDescriptor tableDesc = admin.getTableDescriptor(TableName.valueOf(dataTableFullName));
HColumnDescriptor[] columnFamilies = tableDesc.getColumnFamilies();
assertEquals(1, columnFamilies.length);
assertEquals(true, columnFamilies[0].isInMemory());
@@ -869,8 +869,8 @@ public abstract class SetPropertyIT extends ParallelStatsDisabledIT {
ddl = "ALTER TABLE " + dataTableFullName + " ADD NEWCF.COL3 INTEGER IN_MEMORY=true";
conn.createStatement().execute(ddl);
conn.commit();
- try (HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
- HTableDescriptor tableDesc = admin.getTableDescriptor(Bytes.toBytes(dataTableFullName));
+ try (Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
+ HTableDescriptor tableDesc = admin.getTableDescriptor(TableName.valueOf(dataTableFullName));
HColumnDescriptor[] columnFamilies = tableDesc.getColumnFamilies();
assertEquals(2, columnFamilies.length);
assertEquals("NEWCF", columnFamilies[0].getNameAsString());
@@ -899,8 +899,8 @@ public abstract class SetPropertyIT extends ParallelStatsDisabledIT {
ddl = "ALTER TABLE " + dataTableFullName + " ADD NEWCF.COL3 INTEGER IN_MEMORY=true";
conn.createStatement().execute(ddl);
conn.commit();
- try (HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
- HTableDescriptor tableDesc = admin.getTableDescriptor(Bytes.toBytes(dataTableFullName));
+ try (Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
+ HTableDescriptor tableDesc = admin.getTableDescriptor(TableName.valueOf(dataTableFullName));
HColumnDescriptor[] columnFamilies = tableDesc.getColumnFamilies();
assertEquals(2, columnFamilies.length);
assertEquals("NEWCF", columnFamilies[0].getNameAsString());
@@ -914,8 +914,8 @@ public abstract class SetPropertyIT extends ParallelStatsDisabledIT {
ddl = "ALTER TABLE " + dataTableFullName + " SET TTL=1000";
conn.createStatement().execute(ddl);
conn.commit();
- try (HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
- HTableDescriptor tableDesc = admin.getTableDescriptor(Bytes.toBytes(dataTableFullName));
+ try (Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
+ HTableDescriptor tableDesc = admin.getTableDescriptor(TableName.valueOf(dataTableFullName));
HColumnDescriptor[] columnFamilies = tableDesc.getColumnFamilies();
assertEquals(2, columnFamilies.length);
assertEquals("NEWCF", columnFamilies[0].getNameAsString());
@@ -932,8 +932,8 @@ public abstract class SetPropertyIT extends ParallelStatsDisabledIT {
ddl = "ALTER TABLE " + dataTableFullName + " ADD COL3 INTEGER";
conn.createStatement().execute(ddl);
conn.commit();
- try (HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
- HTableDescriptor tableDesc = admin.getTableDescriptor(Bytes.toBytes(dataTableFullName));
+ try (Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
+ HTableDescriptor tableDesc = admin.getTableDescriptor(TableName.valueOf(dataTableFullName));
HColumnDescriptor[] columnFamilies = tableDesc.getColumnFamilies();
assertEquals(2, columnFamilies.length);
assertEquals("NEWCF", columnFamilies[0].getNameAsString());
@@ -969,8 +969,8 @@ public abstract class SetPropertyIT extends ParallelStatsDisabledIT {
}
ddl = "ALTER TABLE " + dataTableFullName + " SET UNKNOWN_PROP='ABC'";
conn.createStatement().execute(ddl);
- try (HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
- HTableDescriptor tableDesc = admin.getTableDescriptor(Bytes.toBytes(dataTableFullName));
+ try (Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
+ HTableDescriptor tableDesc = admin.getTableDescriptor(TableName.valueOf(dataTableFullName));
assertEquals("ABC", tableDesc.getValue("UNKNOWN_PROP"));
}
} finally {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/693fa659/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanAfterManualSplitIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanAfterManualSplitIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanAfterManualSplitIT.java
index e4add9a..658ef92 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanAfterManualSplitIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanAfterManualSplitIT.java
@@ -31,7 +31,8 @@ import java.sql.SQLException;
import java.util.Properties;
import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.query.ConnectionQueryServices;
@@ -86,9 +87,9 @@ public class SkipScanAfterManualSplitIT extends ParallelStatsDisabledIT {
}
conn.commit();
ConnectionQueryServices services = conn.unwrap(PhoenixConnection.class).getQueryServices();
- HBaseAdmin admin = services.getAdmin();
+ Admin admin = services.getAdmin();
try {
- admin.flush(tableName);
+ admin.flush(TableName.valueOf(tableName));
} finally {
admin.close();
}
@@ -104,9 +105,9 @@ public class SkipScanAfterManualSplitIT extends ParallelStatsDisabledIT {
ConnectionQueryServices services = conn.unwrap(PhoenixConnection.class).getQueryServices();
int nRegions = services.getAllTableRegions(tableNameBytes).size();
int nInitialRegions = nRegions;
- HBaseAdmin admin = services.getAdmin();
+ Admin admin = services.getAdmin();
try {
- admin.split(tableName);
+ admin.split(TableName.valueOf(tableName));
int nTries = 0;
while (nRegions == nInitialRegions && nTries < 10) {
Thread.sleep(1000);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/693fa659/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanQueryIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanQueryIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanQueryIT.java
index d98bbe2..c4e4a9e 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanQueryIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanQueryIT.java
@@ -34,7 +34,8 @@ import java.util.Collections;
import java.util.List;
import java.util.Properties;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.util.TestUtil;
import org.apache.phoenix.util.SchemaUtil;
@@ -529,7 +530,7 @@ public class SkipScanQueryIT extends ParallelStatsDisabledIT {
stmt.setString(3, "T0");
stmt.executeUpdate();
conn.commit();
- try (HBaseAdmin admin =
+ try (Admin admin =
conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
/*
* The split key is 27 bytes instead of at least 30 bytes (CHAR(15) + CHAR(15)).
@@ -537,7 +538,7 @@ public class SkipScanQueryIT extends ParallelStatsDisabledIT {
* it ends up padding the split point bytes to 30.
*/
byte[] smallSplitKey = Bytes.toBytes("00Do0000000a8w10D5o000002Rhv");
- admin.split(Bytes.toBytes(tableName), smallSplitKey);
+ admin.split(TableName.valueOf(tableName), smallSplitKey);
}
ResultSet rs =
conn.createStatement().executeQuery("SELECT EXTENSION FROM " + tableName
http://git-wip-us.apache.org/repos/asf/phoenix/blob/693fa659/phoenix-core/src/it/java/org/apache/phoenix/end2end/TableSnapshotReadsMapReduceIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TableSnapshotReadsMapReduceIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TableSnapshotReadsMapReduceIT.java
index cae91a3..612bf3b 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TableSnapshotReadsMapReduceIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TableSnapshotReadsMapReduceIT.java
@@ -37,7 +37,7 @@ import java.util.UUID;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.io.NullWritable;
@@ -195,10 +195,10 @@ public class TableSnapshotReadsMapReduceIT extends BaseUniqueNamesOwnClusterIT {
upsertData(tableName);
Connection conn = DriverManager.getConnection(getUrl());
- HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();
+ Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();
admin.snapshot(SNAPSHOT_NAME, TableName.valueOf(tableName));
// call flush to create new files in the region
- admin.flush(tableName);
+ admin.flush(TableName.valueOf(tableName));
List<HBaseProtos.SnapshotDescription> snapshots = admin.listSnapshots();
Assert.assertEquals(tableName, snapshots.get(0).getTable());
@@ -211,7 +211,7 @@ public class TableSnapshotReadsMapReduceIT extends BaseUniqueNamesOwnClusterIT {
public void deleteSnapshot(String tableName) throws Exception {
try (Connection conn = DriverManager.getConnection(getUrl());
- HBaseAdmin admin =
+ Admin admin =
conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();) {
admin.deleteSnapshot(SNAPSHOT_NAME);
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/693fa659/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDDLIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDDLIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDDLIT.java
index f8dfd65..078229c 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDDLIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDDLIT.java
@@ -45,7 +45,7 @@ import java.util.List;
import java.util.Properties;
import java.util.Set;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.Admin;
import org.apache.phoenix.exception.SQLExceptionCode;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
@@ -67,7 +67,7 @@ public class TenantSpecificTablesDDLIT extends BaseTenantSpecificTablesIT {
public void testCreateTenantSpecificTable() throws Exception {
// ensure we didn't create a physical HBase table for the tenant-specific table
Connection conn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES));
- HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();
+ Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();
assertEquals(0, admin.listTables(TENANT_TABLE_NAME).length);
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/693fa659/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java
index 044c5ac..3481169 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java
@@ -48,7 +48,8 @@ import java.util.concurrent.atomic.AtomicInteger;
import org.apache.curator.shaded.com.google.common.collect.Sets;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RowMutations;
import org.apache.hadoop.hbase.client.Table;
@@ -222,10 +223,10 @@ public class UpgradeIT extends ParallelStatsDisabledIT {
}
}
- HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();
- assertTrue(admin.tableExists(phoenixFullTableName));
- assertTrue(admin.tableExists(schemaName + QueryConstants.NAME_SEPARATOR + indexName));
- assertTrue(admin.tableExists(MetaDataUtil.getViewIndexPhysicalName(Bytes.toBytes(phoenixFullTableName))));
+ Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();
+ assertTrue(admin.tableExists(TableName.valueOf(phoenixFullTableName)));
+ assertTrue(admin.tableExists(TableName.valueOf(schemaName + QueryConstants.NAME_SEPARATOR + indexName)));
+ assertTrue(admin.tableExists(TableName.valueOf(MetaDataUtil.getViewIndexPhysicalName(Bytes.toBytes(phoenixFullTableName)))));
Properties props = new Properties();
props.setProperty(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, Boolean.toString(true));
props.setProperty(QueryServices.IS_SYSTEM_TABLE_MAPPED_TO_NAMESPACE, Boolean.toString(false));
@@ -249,10 +250,9 @@ public class UpgradeIT extends ParallelStatsDisabledIT {
admin = phxConn.getQueryServices().getAdmin();
String hbaseTableName = SchemaUtil.getPhysicalTableName(Bytes.toBytes(phoenixFullTableName), true)
.getNameAsString();
- assertTrue(admin.tableExists(hbaseTableName));
- assertTrue(admin.tableExists(Bytes.toBytes(hbaseTableName)));
- assertTrue(admin.tableExists(schemaName + QueryConstants.NAMESPACE_SEPARATOR + indexName));
- assertTrue(admin.tableExists(MetaDataUtil.getViewIndexPhysicalName(Bytes.toBytes(hbaseTableName))));
+ assertTrue(admin.tableExists(TableName.valueOf(hbaseTableName)));
+ assertTrue(admin.tableExists(TableName.valueOf(schemaName + QueryConstants.NAMESPACE_SEPARATOR + indexName)));
+ assertTrue(admin.tableExists(TableName.valueOf(MetaDataUtil.getViewIndexPhysicalName(Bytes.toBytes(hbaseTableName)))));
i = 0;
// validate data
for (String tableName : tableNames) {
@@ -631,7 +631,7 @@ public class UpgradeIT extends ParallelStatsDisabledIT {
(DriverManager.getConnection(getUrl())).unwrap(PhoenixConnection.class)) {
try (Table htable =
conn.getQueryServices().getTable(
- Bytes.toBytes(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME))) {
+ Bytes.toBytes(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME))) {
RowMutations mutations = new RowMutations(rowKey);
mutations.add(viewColumnDefinitionPut);
htable.mutateRow(mutations);