You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by ra...@apache.org on 2017/11/08 10:37:31 UTC
[1/3] phoenix git commit: PHOENIX-4303 Replace HTableInterface,
HConnection with Table, Connection interfaces respectively(Rajeshbabu)
Repository: phoenix
Updated Branches:
refs/heads/5.x-HBase-2.0 136c7a629 -> 113904275
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java
index c191d8d..ede2896 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java
@@ -22,13 +22,13 @@ import java.util.List;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.CompareOperator;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Append;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.Increment;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
@@ -36,6 +36,8 @@ import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Row;
import org.apache.hadoop.hbase.client.RowMutations;
import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.coprocessor.Batch.Call;
import org.apache.hadoop.hbase.client.coprocessor.Batch.Callback;
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
@@ -56,11 +58,11 @@ public class TephraTransactionTable implements PhoenixTransactionalTable {
private TephraTransactionContext tephraTransactionContext;
- public TephraTransactionTable(PhoenixTransactionContext ctx, HTableInterface hTable) {
+ public TephraTransactionTable(PhoenixTransactionContext ctx, Table hTable) {
this(ctx, hTable, null);
}
- public TephraTransactionTable(PhoenixTransactionContext ctx, HTableInterface hTable, PTable pTable) {
+ public TephraTransactionTable(PhoenixTransactionContext ctx, Table hTable, PTable pTable) {
assert(ctx instanceof TephraTransactionContext);
@@ -171,32 +173,6 @@ public class TephraTransactionTable implements PhoenixTransactionalTable {
transactionAwareHTable.close();
}
- @Override
- public long incrementColumnValue(byte[] row, byte[] family,
- byte[] qualifier, long amount, boolean writeToWAL)
- throws IOException {
- return transactionAwareHTable.incrementColumnValue(row, family, qualifier, amount, writeToWAL);
- }
-
- @Override
- public Boolean[] exists(List<Get> gets) throws IOException {
- return transactionAwareHTable.exists(gets);
- }
-
- @Override
- public void setAutoFlush(boolean autoFlush, boolean clearBufferOnFail) {
- transactionAwareHTable.setAutoFlush(autoFlush, clearBufferOnFail);
- }
-
- @Override
- public void setAutoFlushTo(boolean autoFlush) {
- transactionAwareHTable.setAutoFlush(autoFlush);
- }
-
- @Override
- public Result getRowOrBefore(byte[] row, byte[] family) throws IOException {
- return transactionAwareHTable.getRowOrBefore(row, family);
- }
@Override
public TableName getName() {
@@ -215,12 +191,6 @@ public class TephraTransactionTable implements PhoenixTransactionalTable {
}
@Override
- public Object[] batch(List<? extends Row> actions) throws IOException,
- InterruptedException {
- return transactionAwareHTable.batch(actions);
- }
-
- @Override
public <R> void batchCallback(List<? extends Row> actions,
Object[] results, Callback<R> callback) throws IOException,
InterruptedException {
@@ -228,12 +198,6 @@ public class TephraTransactionTable implements PhoenixTransactionalTable {
}
@Override
- public <R> Object[] batchCallback(List<? extends Row> actions,
- Callback<R> callback) throws IOException, InterruptedException {
- return transactionAwareHTable.batchCallback(actions, callback);
- }
-
- @Override
public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier,
byte[] value, Put put) throws IOException {
return transactionAwareHTable.checkAndPut(row, family, qualifier, value, put);
@@ -329,22 +293,65 @@ public class TephraTransactionTable implements PhoenixTransactionalTable {
}
@Override
- public void setOperationTimeout(int i) {
-// transactionAwareHTable.setOperationTimeout(i);
+ public void setOperationTimeout(int operationTimeout) {
+ transactionAwareHTable.setOperationTimeout(operationTimeout);
}
@Override
public int getOperationTimeout() {
- return 0; //transactionAwareHTable.getOperationTimeout();
+ return transactionAwareHTable.getOperationTimeout();
}
@Override
- public void setRpcTimeout(int i) {
-// transactionAwareHTable.setRpcTimeout(i);
+ public void setRpcTimeout(int rpcTimeout) {
+ transactionAwareHTable.setRpcTimeout(rpcTimeout);
}
@Override
public int getRpcTimeout() {
- return 0; //transactionAwareHTable.getRpcTimeout();
+ return transactionAwareHTable.getRpcTimeout();
+ }
+
+ @Override
+ public TableDescriptor getDescriptor() throws IOException {
+ return transactionAwareHTable.getDescriptor();
+ }
+
+ @Override
+ public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, CompareOperator op,
+ byte[] value, Put put) throws IOException {
+ return transactionAwareHTable.checkAndPut(row, family, qualifier, op, value, put);
+ }
+
+ @Override
+ public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, CompareOperator op,
+ byte[] value, Delete delete) throws IOException {
+ return transactionAwareHTable.checkAndDelete(row, family, qualifier, op, value, delete);
+ }
+
+ @Override
+ public boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, CompareOperator op,
+ byte[] value, RowMutations mutation) throws IOException {
+ return transactionAwareHTable.checkAndMutate(row, family, qualifier, op, value, mutations);
+ }
+
+ @Override
+ public int getReadRpcTimeout() {
+ return transactionAwareHTable.getReadRpcTimeout();
+ }
+
+ @Override
+ public void setReadRpcTimeout(int readRpcTimeout) {
+ transactionAwareHTable.setReadRpcTimeout(readRpcTimeout);
+ }
+
+ @Override
+ public int getWriteRpcTimeout() {
+ return transactionAwareHTable.getWriteRpcTimeout();
+ }
+
+ @Override
+ public void setWriteRpcTimeout(int writeRpcTimeout) {
+ return transactionAwareHTable.setWriteRpcTimeout(writeRpcTimeout);
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/transaction/TransactionFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/TransactionFactory.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TransactionFactory.java
index 8b3fc1d..9ca3d15 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/TransactionFactory.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TransactionFactory.java
@@ -19,7 +19,7 @@ package org.apache.phoenix.transaction;
import java.io.IOException;
-import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.phoenix.jdbc.PhoenixConnection;
public class TransactionFactory {
@@ -123,13 +123,13 @@ public class TransactionFactory {
return ctx;
}
- public PhoenixTransactionalTable getTransactionalTable(PhoenixTransactionContext ctx, HTableInterface htable) {
+ public PhoenixTransactionalTable getTransactionalTable(PhoenixTransactionContext ctx, Table htable) {
PhoenixTransactionalTable table = null;
switch(tp) {
case Tephra:
- table = new TephraTransactionTable(ctx, htable);
+ table = new TephraTransactionTable(ctx,htable);
break;
case Omid:
// table = new OmidTransactionContext(contex, connection, subTask);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
index 92645c0..c3182c5 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
@@ -49,6 +49,7 @@ import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.coprocessor.Batch;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
@@ -532,8 +533,8 @@ public class IndexUtil {
} else {
TableName dataTable =
TableName.valueOf(MetaDataUtil.getLocalIndexUserTableName(
- environment.getRegion().getTableDesc().getNameAsString()));
- HTableInterface table = null;
+ environment.getRegion().getTableDesc().getTableName().getNameAsString()));
+ Table table = null;
try {
table = environment.getTable(dataTable);
joinResult = table.get(get);
@@ -616,11 +617,6 @@ public class IndexUtil {
return cell.getTypeByte();
}
- @Override
- public long getMvccVersion() {
- return cell.getMvccVersion();
- }
-
@Override public long getSequenceId() {
return cell.getSequenceId();
}
@@ -656,6 +652,11 @@ public class IndexUtil {
}
@Override
+ public long getMvccVersion() {
+ return cell.getMvccVersion();
+ }
+
+ @Override
public byte[] getValue() {
return cell.getValue();
}
@@ -716,13 +717,13 @@ public class IndexUtil {
}
public static MetaDataMutationResult updateIndexState(String indexTableName, long minTimeStamp,
- HTableInterface metaTable, PIndexState newState) throws Throwable {
+ Table metaTable, PIndexState newState) throws Throwable {
byte[] indexTableKey = SchemaUtil.getTableKeyFromFullName(indexTableName);
return updateIndexState(indexTableKey, minTimeStamp, metaTable, newState);
}
public static MetaDataMutationResult updateIndexState(byte[] indexTableKey, long minTimeStamp,
- HTableInterface metaTable, PIndexState newState) throws Throwable {
+ Table metaTable, PIndexState newState) throws Throwable {
// Mimic the Put that gets generated by the client on an update of the index state
Put put = new Put(indexTableKey);
put.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.INDEX_STATE_BYTES,
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
index 502ef37..69eb5bc 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
@@ -34,8 +34,9 @@ import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.client.HConnection;
-import org.apache.hadoop.hbase.client.HConnectionManager;
+import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Scan;
@@ -44,6 +45,7 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.RequestConverter;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
+import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.ipc.RemoteException;
@@ -528,11 +530,11 @@ public class MetaDataUtil {
* @throws
*/
public static boolean tableRegionsOnline(Configuration conf, PTable table) {
- HConnection hcon = null;
+ Connection hcon = null;
try {
- hcon = HConnectionManager.getConnection(conf);
- List<HRegionLocation> locations = hcon.locateRegions(
+ hcon = ConnectionFactory.createConnection(conf);
+ List<HRegionLocation> locations = ((ClusterConnection)hcon).locateRegions(
org.apache.hadoop.hbase.TableName.valueOf(table.getPhysicalName().getBytes()));
for (HRegionLocation loc : locations) {
@@ -540,7 +542,7 @@ public class MetaDataUtil {
ServerName sn = loc.getServerName();
if (sn == null) continue;
- AdminService.BlockingInterface admin = hcon.getAdmin(sn);
+ AdminService.BlockingInterface admin = (BlockingInterface) ((ClusterConnection)hcon).getAdmin(sn);
GetRegionInfoRequest request = RequestConverter.buildGetRegionInfoRequest(
loc.getRegionInfo().getRegionName());
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
index cd23dc5..a3c8787 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
@@ -28,9 +28,10 @@ import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.NotServingRegionException;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.HTableInterface;
-import org.apache.hadoop.hbase.client.HTablePool;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.regionserver.Region;
import org.apache.hadoop.hbase.util.Bytes;
@@ -42,7 +43,6 @@ import org.apache.phoenix.hbase.index.util.VersionUtil;
import org.apache.phoenix.schema.StaleRegionBoundaryCacheException;
-@SuppressWarnings("deprecation")
public class ServerUtil {
private static final int COPROCESSOR_SCAN_WORKS = VersionUtil.encodeVersion("0.98.6");
@@ -157,14 +157,13 @@ public class ServerUtil {
* This code works around HBASE-11837 which causes HTableInterfaces retrieved from
* RegionCoprocessorEnvironment to not read local data.
*/
- private static HTableInterface getTableFromSingletonPool(RegionCoprocessorEnvironment env, byte[] tableName) throws IOException {
+ private static Table getTableFromSingletonPool(RegionCoprocessorEnvironment env, TableName tableName) throws IOException {
// It's ok to not ever do a pool.close() as we're storing a single
// table only. The HTablePool holds no other resources that this table
// which will be closed itself when it's no longer needed.
- @SuppressWarnings("resource")
- HTablePool pool = new HTablePool(env.getConfiguration(),1);
+ Connection conn = ConnectionFactory.createConnection(env.getConfiguration());
try {
- return pool.getTable(tableName);
+ return conn.getTable(tableName);
} catch (RuntimeException t) {
// handle cases that an IOE is wrapped inside a RuntimeException like HTableInterface#createHTableInterface
if(t.getCause() instanceof IOException) {
@@ -175,16 +174,16 @@ public class ServerUtil {
}
}
- public static HTableInterface getHTableForCoprocessorScan (RegionCoprocessorEnvironment env, HTableInterface writerTable) throws IOException {
+ public static Table getHTableForCoprocessorScan (RegionCoprocessorEnvironment env, Table writerTable) throws IOException {
if (coprocessorScanWorks(env)) {
return writerTable;
}
- return getTableFromSingletonPool(env, writerTable.getTableName());
+ return getTableFromSingletonPool(env, writerTable.getName());
}
- public static HTableInterface getHTableForCoprocessorScan (RegionCoprocessorEnvironment env, byte[] tableName) throws IOException {
+ public static Table getHTableForCoprocessorScan (RegionCoprocessorEnvironment env, TableName tableName) throws IOException {
if (coprocessorScanWorks(env)) {
- return env.getTable(TableName.valueOf(tableName));
+ return env.getTable(tableName);
}
return getTableFromSingletonPool(env, tableName);
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/util/TransactionUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/TransactionUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/TransactionUtil.java
index 01b775e..f437087 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/TransactionUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/TransactionUtil.java
@@ -22,7 +22,7 @@ import java.sql.SQLException;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult;
import org.apache.phoenix.exception.SQLExceptionCode;
import org.apache.phoenix.exception.SQLExceptionInfo;
@@ -50,7 +50,7 @@ public class TransactionUtil {
return serverTimeStamp / TransactionFactory.getTransactionFactory().getTransactionContext().getMaxTransactionsPerSecond();
}
- public static PhoenixTransactionalTable getPhoenixTransactionTable(PhoenixTransactionContext phoenixTransactionContext, HTableInterface htable, PTable pTable) {
+ public static PhoenixTransactionalTable getPhoenixTransactionTable(PhoenixTransactionContext phoenixTransactionContext, Table htable, PTable pTable) {
return new TephraTransactionTable(phoenixTransactionContext, htable, pTable);
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
index f5825b4..33ad7e5 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
@@ -74,6 +74,7 @@ import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
@@ -82,12 +83,12 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.regionserver.LocalIndexSplitter;
import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
@@ -208,8 +209,8 @@ public class UpgradeUtil {
private static void deleteSequenceSnapshot(HBaseAdmin admin) throws SQLException {
byte[] tableName = getSequenceSnapshotName();
try {
- admin.disableTable(tableName);;
- admin.deleteTable(tableName);
+ admin.disableTable(TableName.valueOf(tableName));
+ admin.deleteTable(TableName.valueOf(tableName));
} catch (IOException e) {
throw ServerUtil.parseServerException(e);
}
@@ -225,40 +226,40 @@ public class UpgradeUtil {
scan.setRaw(true);
scan.setMaxVersions(MetaDataProtocol.DEFAULT_MAX_META_DATA_VERSIONS);
ResultScanner scanner = null;
- HTableInterface source = null;
- HTableInterface target = null;
+ Table source = null;
+ Table target = null;
try {
source = conn.getQueryServices().getTable(sourceName);
target = conn.getQueryServices().getTable(targetName);
scanner = source.getScanner(scan);
Result result;
while ((result = scanner.next()) != null) {
- for (KeyValue keyValue : result.raw()) {
- sizeBytes += keyValue.getLength();
- if (KeyValue.Type.codeToType(keyValue.getType()) == KeyValue.Type.Put) {
+ for (Cell keyValue : result.rawCells()) {
+ sizeBytes += CellUtil.estimatedSerializedSizeOf(keyValue);
+ if (KeyValue.Type.codeToType(keyValue.getTypeByte()) == KeyValue.Type.Put) {
// Put new value
- Put put = new Put(keyValue.getRow());
+ Put put = new Put(keyValue.getRowArray(), keyValue.getRowOffset(), keyValue.getRowLength());
put.add(keyValue);
mutations.add(put);
- } else if (KeyValue.Type.codeToType(keyValue.getType()) == KeyValue.Type.Delete){
+ } else if (KeyValue.Type.codeToType(keyValue.getTypeByte()) == KeyValue.Type.Delete){
// Copy delete marker using new key so that it continues
// to delete the key value preceding it that will be updated
// as well.
- Delete delete = new Delete(keyValue.getRow());
+ Delete delete = new Delete(keyValue.getRowArray(), keyValue.getRowOffset(), keyValue.getRowLength());
delete.addDeleteMarker(keyValue);
mutations.add(delete);
}
}
if (sizeBytes >= batchSizeBytes) {
logger.info("Committing bactch of temp rows");
- target.batch(mutations);
+ target.batch(mutations, null);
mutations.clear();
sizeBytes = 0;
}
}
if (!mutations.isEmpty()) {
logger.info("Committing last bactch of temp rows");
- target.batch(mutations);
+ target.batch(mutations, null);
}
logger.info("Successfully completed copy");
} catch (SQLException e) {
@@ -293,11 +294,11 @@ public class UpgradeUtil {
return;
}
logger.warn("Pre-splitting SYSTEM.SEQUENCE table " + nSaltBuckets + "-ways. This may take some time - please do not close window.");
- HTableDescriptor desc = admin.getTableDescriptor(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES);
+ HTableDescriptor desc = admin.getTableDescriptor(TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES));
createSequenceSnapshot(admin, conn);
snapshotCreated = true;
- admin.disableTable(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME);
- admin.deleteTable(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME);
+ admin.disableTable(TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME));
+ admin.deleteTable(TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME));
byte[][] splitPoints = SaltingUtil.getSalteByteSplitPoints(nSaltBuckets);
admin.createTable(desc, splitPoints);
restoreSequenceSnapshot(admin, conn);
@@ -356,7 +357,7 @@ public class UpgradeUtil {
if(dataTableDesc.getFamily(Bytes.toBytes(localIndexCf))==null){
HColumnDescriptor colDef =
new HColumnDescriptor(localIndexCf);
- for(Entry<ImmutableBytesWritable, ImmutableBytesWritable>keyValue: cf.getValues().entrySet()){
+ for(Entry<ImmutableBytesWritable, ImmutableBytesWritable> keyValue: cf.getValues().entrySet()){
colDef.setValue(keyValue.getKey().copyBytes(), keyValue.getValue().copyBytes());
}
dataTableDesc.addFamily(colDef);
@@ -371,7 +372,7 @@ public class UpgradeUtil {
}
}
if(modifyTable) {
- admin.modifyTable(dataTableDesc.getName(), dataTableDesc);
+ admin.modifyTable(dataTableDesc.getTableName(), dataTableDesc);
}
}
admin.disableTables(MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX+".*");
@@ -644,7 +645,7 @@ public class UpgradeUtil {
logger.info("Upgrading SYSTEM.SEQUENCE table");
byte[] seqTableKey = SchemaUtil.getTableKey(null, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_SCHEMA, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_TABLE);
- HTableInterface sysTable = conn.getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
+ Table sysTable = conn.getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
try {
logger.info("Setting SALT_BUCKETS property of SYSTEM.SEQUENCE to " + SaltingUtil.MAX_BUCKET_NUM);
KeyValue saltKV = KeyValueUtil.newKeyValue(seqTableKey,
@@ -697,7 +698,7 @@ public class UpgradeUtil {
Scan scan = new Scan();
scan.setRaw(true);
scan.setMaxVersions(MetaDataProtocol.DEFAULT_MAX_META_DATA_VERSIONS);
- HTableInterface seqTable = conn.getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES);
+ Table seqTable = conn.getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES);
try {
boolean committed = false;
logger.info("Adding salt byte to all SYSTEM.SEQUENCE rows");
@@ -705,14 +706,14 @@ public class UpgradeUtil {
try {
Result result;
while ((result = scanner.next()) != null) {
- for (KeyValue keyValue : result.raw()) {
+ for (Cell keyValue : result.rawCells()) {
KeyValue newKeyValue = addSaltByte(keyValue, nSaltBuckets);
if (newKeyValue != null) {
sizeBytes += newKeyValue.getLength();
- if (KeyValue.Type.codeToType(newKeyValue.getType()) == KeyValue.Type.Put) {
+ if (KeyValue.Type.codeToType(newKeyValue.getTypeByte()) == KeyValue.Type.Put) {
// Delete old value
- byte[] buf = keyValue.getBuffer();
- Delete delete = new Delete(keyValue.getRow());
+ byte[] buf = keyValue.getRowArray();
+ Delete delete = new Delete(keyValue.getRowArray(), keyValue.getRowOffset(), keyValue.getRowLength());
KeyValue deleteKeyValue = new KeyValue(buf, keyValue.getRowOffset(), keyValue.getRowLength(),
buf, keyValue.getFamilyOffset(), keyValue.getFamilyLength(),
buf, keyValue.getQualifierOffset(), keyValue.getQualifierLength(),
@@ -722,21 +723,21 @@ public class UpgradeUtil {
mutations.add(delete);
sizeBytes += deleteKeyValue.getLength();
// Put new value
- Put put = new Put(newKeyValue.getRow());
+ Put put = new Put(newKeyValue.getRowArray(), newKeyValue.getRowOffset(), newKeyValue.getRowLength());
put.add(newKeyValue);
mutations.add(put);
- } else if (KeyValue.Type.codeToType(newKeyValue.getType()) == KeyValue.Type.Delete){
+ } else if (KeyValue.Type.codeToType(newKeyValue.getTypeByte()) == KeyValue.Type.Delete){
// Copy delete marker using new key so that it continues
// to delete the key value preceding it that will be updated
// as well.
- Delete delete = new Delete(newKeyValue.getRow());
+ Delete delete = new Delete(newKeyValue.getRowArray(), newKeyValue.getRowOffset(), newKeyValue.getRowLength());
delete.addDeleteMarker(newKeyValue);
mutations.add(delete);
}
}
if (sizeBytes >= batchSizeBytes) {
logger.info("Committing bactch of SYSTEM.SEQUENCE rows");
- seqTable.batch(mutations);
+ seqTable.batch(mutations, null);
mutations.clear();
sizeBytes = 0;
committed = true;
@@ -745,7 +746,7 @@ public class UpgradeUtil {
}
if (!mutations.isEmpty()) {
logger.info("Committing last bactch of SYSTEM.SEQUENCE rows");
- seqTable.batch(mutations);
+ seqTable.batch(mutations, null);
}
preSplitSequenceTable(conn, nSaltBuckets);
logger.info("Successfully completed upgrade of SYSTEM.SEQUENCE");
@@ -803,8 +804,8 @@ public class UpgradeUtil {
}
@SuppressWarnings("deprecation")
- private static KeyValue addSaltByte(KeyValue keyValue, int nSaltBuckets) {
- byte[] buf = keyValue.getBuffer();
+ private static KeyValue addSaltByte(Cell keyValue, int nSaltBuckets) {
+ byte[] buf = keyValue.getRowArray();
int length = keyValue.getRowLength();
int offset = keyValue.getRowOffset();
boolean isViewSeq = length > SEQ_PREFIX_BYTES.length && Bytes.compareTo(SEQ_PREFIX_BYTES, 0, SEQ_PREFIX_BYTES.length, buf, offset, SEQ_PREFIX_BYTES.length) == 0;
@@ -834,7 +835,7 @@ public class UpgradeUtil {
return new KeyValue(newBuf, 0, newBuf.length,
buf, keyValue.getFamilyOffset(), keyValue.getFamilyLength(),
buf, keyValue.getQualifierOffset(), keyValue.getQualifierLength(),
- keyValue.getTimestamp(), KeyValue.Type.codeToType(keyValue.getType()),
+ keyValue.getTimestamp(), KeyValue.Type.codeToType(keyValue.getTypeByte()),
buf, keyValue.getValueOffset(), keyValue.getValueLength());
}
@@ -1440,9 +1441,9 @@ public class UpgradeUtil {
String msg = "Taking snapshot of physical table " + physicalName + " prior to upgrade...";
System.out.println(msg);
logger.info(msg);
- admin.disableTable(physicalName);
- admin.snapshot(snapshotName, physicalName);
- admin.enableTable(physicalName);
+ admin.disableTable(TableName.valueOf(physicalName));
+ admin.snapshot(snapshotName, TableName.valueOf(physicalName));
+ admin.enableTable(TableName.valueOf(physicalName));
restoreSnapshot = true;
}
String escapedTableName = SchemaUtil.getEscapedTableName(schemaName, tableName);
@@ -1515,9 +1516,9 @@ public class UpgradeUtil {
boolean restored = false;
try {
if (!success && restoreSnapshot) {
- admin.disableTable(physicalName);
+ admin.disableTable(TableName.valueOf(physicalName));
admin.restoreSnapshot(snapshotName, false);
- admin.enableTable(physicalName);
+ admin.enableTable(TableName.valueOf(physicalName));
String msg = "Restored snapshot of " + physicalName + " due to failure of upgrade";
System.out.println(msg);
logger.info(msg);
@@ -1664,7 +1665,7 @@ public class UpgradeUtil {
tableMetadata.add(put);
}
- public static boolean truncateStats(HTableInterface metaTable, HTableInterface statsTable)
+ public static boolean truncateStats(Table metaTable, Table statsTable)
throws IOException, InterruptedException {
byte[] statsTableKey = SchemaUtil.getTableKey(null, PhoenixDatabaseMetaData.SYSTEM_SCHEMA_NAME,
PhoenixDatabaseMetaData.SYSTEM_STATS_TABLE);
@@ -1693,10 +1694,10 @@ public class UpgradeUtil {
int count = 0;
while ((r = statsScanner.next()) != null) {
Delete delete = null;
- for (KeyValue keyValue : r.raw()) {
- if (KeyValue.Type.codeToType(keyValue.getType()) == KeyValue.Type.Put) {
+ for (Cell keyValue : r.rawCells()) {
+ if (KeyValue.Type.codeToType(keyValue.getTypeByte()) == KeyValue.Type.Put) {
if (delete == null) {
- delete = new Delete(keyValue.getRow());
+ delete = new Delete(keyValue.getRowArray(), keyValue.getRowOffset(), keyValue.getRowLength());
}
KeyValue deleteKeyValue = new KeyValue(keyValue.getRowArray(), keyValue.getRowOffset(),
keyValue.getRowLength(), keyValue.getFamilyArray(), keyValue.getFamilyOffset(),
@@ -1709,7 +1710,7 @@ public class UpgradeUtil {
if (delete != null) {
mutations.add(delete);
if (count > 10) {
- statsTable.batch(mutations);
+ statsTable.batch(mutations, null);
mutations.clear();
count = 0;
}
@@ -1717,7 +1718,7 @@ public class UpgradeUtil {
}
}
if (!mutations.isEmpty()) {
- statsTable.batch(mutations);
+ statsTable.batch(mutations, null);
}
return true;
}
@@ -1725,7 +1726,7 @@ public class UpgradeUtil {
return false;
}
- private static void mapTableToNamespace(HBaseAdmin admin, HTableInterface metatable, String srcTableName,
+ private static void mapTableToNamespace(HBaseAdmin admin, Table metatable, String srcTableName,
String destTableName, ReadOnlyProps props, Long ts, String phoenixTableName, PTableType pTableType,PName tenantId)
throws SnapshotCreationException, IllegalArgumentException, IOException, InterruptedException,
SQLException {
@@ -1786,7 +1787,7 @@ public class UpgradeUtil {
* Method to map existing phoenix table to a namespace. Should not be use if tables has views and indexes ,instead
* use map table utility in psql.py
*/
- public static void mapTableToNamespace(HBaseAdmin admin, HTableInterface metatable, String tableName,
+ public static void mapTableToNamespace(HBaseAdmin admin, Table metatable, String tableName,
ReadOnlyProps props, Long ts, PTableType pTableType, PName tenantId) throws SnapshotCreationException,
IllegalArgumentException, IOException, InterruptedException, SQLException {
String destTablename = SchemaUtil
@@ -1803,7 +1804,7 @@ public class UpgradeUtil {
readOnlyProps)) { throw new IllegalArgumentException(
QueryServices.IS_NAMESPACE_MAPPING_ENABLED + " is not enabled!!"); }
try (HBaseAdmin admin = conn.getQueryServices().getAdmin();
- HTableInterface metatable = conn.getQueryServices()
+ Table metatable = conn.getQueryServices()
.getTable(SchemaUtil
.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, readOnlyProps)
.getName());) {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/FakeTableFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/FakeTableFactory.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/FakeTableFactory.java
index 4483a7f..a5c0883 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/FakeTableFactory.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/FakeTableFactory.java
@@ -22,25 +22,25 @@ import java.io.IOException;
import java.util.Map;
import java.util.concurrent.ExecutorService;
-import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.phoenix.hbase.index.table.HTableFactory;
import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
/**
* Simple table factory that just looks up the tables based on name. Useful for mocking up
- * {@link HTableInterface}s without having to mock up the factory too.
+ * {@link Table}s without having to mock up the factory too.
*/
class FakeTableFactory implements HTableFactory {
boolean shutdown = false;
- private Map<ImmutableBytesPtr, HTableInterface> tables;
+ private Map<ImmutableBytesPtr, Table> tables;
- public FakeTableFactory(Map<ImmutableBytesPtr, HTableInterface> tables) {
+ public FakeTableFactory(Map<ImmutableBytesPtr, Table> tables) {
this.tables = tables;
}
@Override
- public HTableInterface getTable(ImmutableBytesPtr tablename) throws IOException {
+ public Table getTable(ImmutableBytesPtr tablename) throws IOException {
return getTable(tablename, null);
}
@@ -50,7 +50,7 @@ class FakeTableFactory implements HTableFactory {
}
@Override
- public HTableInterface getTable(ImmutableBytesPtr tablename, ExecutorService pool) throws IOException {
+ public Table getTable(ImmutableBytesPtr tablename, ExecutorService pool) throws IOException {
return this.tables.get(tablename);
}
}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java
index 091bbf7..f6e00cc 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java
@@ -39,9 +39,9 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Abortable;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.Stoppable;
-import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.regionserver.Region;
import org.apache.hadoop.hbase.util.Bytes;
@@ -100,7 +100,7 @@ public class TestIndexWriter {
Mockito.when(e.getConfiguration()).thenReturn(conf);
Mockito.when(e.getSharedData()).thenReturn(new ConcurrentHashMap<String,Object>());
ExecutorService exec = Executors.newFixedThreadPool(1);
- Map<ImmutableBytesPtr, HTableInterface> tables = new HashMap<ImmutableBytesPtr, HTableInterface>();
+ Map<ImmutableBytesPtr, Table> tables = new HashMap<ImmutableBytesPtr, Table>();
FakeTableFactory factory = new FakeTableFactory(tables);
byte[] tableName = this.testName.getTableName();
@@ -109,7 +109,7 @@ public class TestIndexWriter {
Collection<Pair<Mutation, byte[]>> indexUpdates = Arrays.asList(new Pair<Mutation, byte[]>(m,
tableName));
- HTableInterface table = Mockito.mock(HTableInterface.class);
+ Table table = Mockito.mock(Table.class);
final boolean[] completed = new boolean[] { false };
Mockito.when(table.batch(Mockito.anyList())).thenAnswer(new Answer<Void>() {
@@ -150,7 +150,7 @@ public class TestIndexWriter {
Abortable abort = new StubAbortable();
// single thread factory so the older request gets queued
ExecutorService exec = Executors.newFixedThreadPool(1);
- Map<ImmutableBytesPtr, HTableInterface> tables = new HashMap<ImmutableBytesPtr, HTableInterface>();
+ Map<ImmutableBytesPtr, Table> tables = new HashMap<ImmutableBytesPtr, Table>();
RegionCoprocessorEnvironment e =Mockito.mock(RegionCoprocessorEnvironment.class);
Configuration conf =new Configuration();
Mockito.when(e.getConfiguration()).thenReturn(conf);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleIndexWriter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleIndexWriter.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleIndexWriter.java
index b620cca..8573fb1 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleIndexWriter.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleIndexWriter.java
@@ -31,9 +31,9 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Abortable;
import org.apache.hadoop.hbase.Stoppable;
-import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.VersionInfo;
@@ -65,7 +65,7 @@ public class TestParalleIndexWriter {
Mockito.when(e.getConfiguration()).thenReturn(conf);
Mockito.when(e.getSharedData()).thenReturn(new ConcurrentHashMap<String,Object>());
FakeTableFactory factory = new FakeTableFactory(
- Collections.<ImmutableBytesPtr, HTableInterface> emptyMap());
+ Collections.<ImmutableBytesPtr, Table> emptyMap());
TrackingParallelWriterIndexCommitter writer = new TrackingParallelWriterIndexCommitter(VersionInfo.getVersion());
Abortable mockAbort = Mockito.mock(Abortable.class);
Stoppable mockStop = Mockito.mock(Stoppable.class);
@@ -86,8 +86,8 @@ public class TestParalleIndexWriter {
Abortable abort = new StubAbortable();
Stoppable stop = Mockito.mock(Stoppable.class);
ExecutorService exec = Executors.newFixedThreadPool(1);
- Map<ImmutableBytesPtr, HTableInterface> tables =
- new LinkedHashMap<ImmutableBytesPtr, HTableInterface>();
+ Map<ImmutableBytesPtr, Table> tables =
+ new LinkedHashMap<ImmutableBytesPtr, Table>();
FakeTableFactory factory = new FakeTableFactory(tables);
RegionCoprocessorEnvironment e =Mockito.mock(RegionCoprocessorEnvironment.class);
Configuration conf =new Configuration();
@@ -100,7 +100,7 @@ public class TestParalleIndexWriter {
ArrayListMultimap.<HTableInterfaceReference, Mutation> create();
indexUpdates.put(new HTableInterfaceReference(tableName), m);
- HTableInterface table = Mockito.mock(HTableInterface.class);
+ Table table = Mockito.mock(Table.class);
final boolean[] completed = new boolean[] { false };
Mockito.when(table.batch(Mockito.anyList())).thenAnswer(new Answer<Void>() {
@@ -111,7 +111,7 @@ public class TestParalleIndexWriter {
return null;
}
});
- Mockito.when(table.getTableName()).thenReturn(test.getTableName());
+ Mockito.when(table.getName()).thenReturn(org.apache.hadoop.hbase.TableName.valueOf(test.getTableName()));
// add the table to the set of tables, so its returned to the writer
tables.put(tableName, table);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleWriterIndexCommitter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleWriterIndexCommitter.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleWriterIndexCommitter.java
index 2744ee6..2377ff1 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleWriterIndexCommitter.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleWriterIndexCommitter.java
@@ -31,9 +31,9 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Abortable;
import org.apache.hadoop.hbase.Stoppable;
-import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.VersionInfo;
@@ -61,7 +61,7 @@ public class TestParalleWriterIndexCommitter {
public void testCorrectlyCleansUpResources() throws Exception{
ExecutorService exec = Executors.newFixedThreadPool(1);
FakeTableFactory factory = new FakeTableFactory(
- Collections.<ImmutableBytesPtr, HTableInterface> emptyMap());
+ Collections.<ImmutableBytesPtr, Table> emptyMap());
TrackingParallelWriterIndexCommitter writer = new TrackingParallelWriterIndexCommitter(VersionInfo.getVersion());
Abortable mockAbort = Mockito.mock(Abortable.class);
Stoppable mockStop = Mockito.mock(Stoppable.class);
@@ -90,8 +90,8 @@ public class TestParalleWriterIndexCommitter {
Mockito.when(e.getSharedData()).thenReturn(new ConcurrentHashMap<String,Object>());
Stoppable stop = Mockito.mock(Stoppable.class);
ExecutorService exec = Executors.newFixedThreadPool(1);
- Map<ImmutableBytesPtr, HTableInterface> tables =
- new LinkedHashMap<ImmutableBytesPtr, HTableInterface>();
+ Map<ImmutableBytesPtr, Table> tables =
+ new LinkedHashMap<ImmutableBytesPtr, Table>();
FakeTableFactory factory = new FakeTableFactory(tables);
ImmutableBytesPtr tableName = new ImmutableBytesPtr(this.test.getTableName());
@@ -101,7 +101,7 @@ public class TestParalleWriterIndexCommitter {
ArrayListMultimap.<HTableInterfaceReference, Mutation> create();
indexUpdates.put(new HTableInterfaceReference(tableName), m);
- HTableInterface table = Mockito.mock(HTableInterface.class);
+ Table table = Mockito.mock(Table.class);
final boolean[] completed = new boolean[] { false };
Mockito.when(table.batch(Mockito.anyList())).thenAnswer(new Answer<Void>() {
@@ -112,7 +112,7 @@ public class TestParalleWriterIndexCommitter {
return null;
}
});
- Mockito.when(table.getTableName()).thenReturn(test.getTableName());
+ Mockito.when(table.getName()).thenReturn(org.apache.hadoop.hbase.TableName.valueOf(test.getTableName()));
// add the table to the set of tables, so its returned to the writer
tables.put(tableName, table);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java b/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
index f3fb7df..b6f1bef 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
@@ -62,6 +62,7 @@ import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.coprocessor.Batch;
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
@@ -450,7 +451,7 @@ public class TestUtil {
public static void clearMetaDataCache(Connection conn) throws Throwable {
PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
- HTableInterface htable = pconn.getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
+ Table htable = pconn.getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
htable.coprocessorService(MetaDataService.class, HConstants.EMPTY_START_ROW,
HConstants.EMPTY_END_ROW, new Batch.Call<MetaDataService, ClearCacheResponse>() {
@Override
@@ -781,7 +782,7 @@ public class TestUtil {
if (table.isTransactional()) {
mutationState.startTransaction();
}
- try (HTableInterface htable = mutationState.getHTable(table)) {
+ try (Table htable = mutationState.getHTable(table)) {
byte[] markerRowKey = Bytes.toBytes("TO_DELETE");
Put put = new Put(markerRowKey);
@@ -808,7 +809,7 @@ public class TestUtil {
scan.setStopRow(Bytes.add(markerRowKey, new byte[] { 0 }));
scan.setRaw(true);
- try (HTableInterface htableForRawScan = services.getTable(Bytes.toBytes(tableName))) {
+ try (Table htableForRawScan = services.getTable(Bytes.toBytes(tableName))) {
ResultScanner scanner = htableForRawScan.getScanner(scan);
List<Result> results = Lists.newArrayList(scanner);
LOG.info("Results: " + results);
@@ -832,7 +833,7 @@ public class TestUtil {
conn.createStatement().execute("create table " + tableName + TestUtil.TEST_TABLE_SCHEMA + "TRANSACTIONAL=true");
}
- public static void dumpTable(HTableInterface table) throws IOException {
+ public static void dumpTable(Table table) throws IOException {
System.out.println("************ dumping " + table + " **************");
Scan s = new Scan();
s.setRaw(true);;
@@ -852,7 +853,7 @@ public class TestUtil {
}
public static void dumpIndexStatus(Connection conn, String indexName) throws IOException, SQLException {
- try (HTableInterface table = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES)) {
+ try (Table table = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES)) {
System.out.println("************ dumping index status for " + indexName + " **************");
Scan s = new Scan();
s.setRaw(true);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-hive/src/main/java/org/apache/phoenix/hive/mapreduce/PhoenixInputFormat.java
----------------------------------------------------------------------
diff --git a/phoenix-hive/src/main/java/org/apache/phoenix/hive/mapreduce/PhoenixInputFormat.java b/phoenix-hive/src/main/java/org/apache/phoenix/hive/mapreduce/PhoenixInputFormat.java
index f0a5dd6..f9330ef 100644
--- a/phoenix-hive/src/main/java/org/apache/phoenix/hive/mapreduce/PhoenixInputFormat.java
+++ b/phoenix-hive/src/main/java/org/apache/phoenix/hive/mapreduce/PhoenixInputFormat.java
@@ -19,6 +19,7 @@ package org.apache.phoenix.hive.mapreduce;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
+
import java.io.IOException;
import java.sql.Connection;
import java.sql.Statement;
@@ -26,6 +27,7 @@ import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.Properties;
+
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
@@ -33,8 +35,7 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.HConnection;
-import org.apache.hadoop.hbase.client.HConnectionManager;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.util.Bytes;
@@ -150,7 +151,7 @@ public class PhoenixInputFormat<T extends DBWritable> implements InputFormat<Wri
setScanCacheSize(jobConf);
// Adding Localization
- HConnection connection = HConnectionManager.createConnection(PhoenixConnectionUtil.getConfiguration(jobConf));
+ org.apache.hadoop.hbase.client.Connection connection = ConnectionFactory.createConnection(PhoenixConnectionUtil.getConfiguration(jobConf));
RegionLocator regionLocator = connection.getRegionLocator(TableName.valueOf(qplan
.getTableRef().getTable().getPhysicalName().toString()));
RegionSizeCalculator sizeCalculator = new RegionSizeCalculator(regionLocator, connection
[3/3] phoenix git commit: PHOENIX-4303 Replace HTableInterface,
HConnection with Table, Connection interfaces respectively(Rajeshbabu)
Posted by ra...@apache.org.
PHOENIX-4303 Replace HTableInterface,HConnection with Table,Connection interfaces respectively(Rajeshbabu)
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/11390427
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/11390427
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/11390427
Branch: refs/heads/5.x-HBase-2.0
Commit: 113904275d0689755aea100aaeb43aed9bd9cc9d
Parents: 136c7a6
Author: Rajeshbabu Chintaguntla <ra...@apache.org>
Authored: Wed Nov 8 16:07:14 2017 +0530
Committer: Rajeshbabu Chintaguntla <ra...@apache.org>
Committed: Wed Nov 8 16:07:14 2017 +0530
----------------------------------------------------------------------
.../phoenix/end2end/AlterTableWithViewsIT.java | 4 +-
.../org/apache/phoenix/end2end/BaseViewIT.java | 4 +-
.../end2end/ColumnProjectionOptimizationIT.java | 4 +-
.../phoenix/end2end/DisableLocalIndexIT.java | 4 +-
.../apache/phoenix/end2end/DynamicColumnIT.java | 6 +-
.../apache/phoenix/end2end/DynamicFamilyIT.java | 6 +-
.../phoenix/end2end/MappingTableDataTypeIT.java | 9 +-
.../phoenix/end2end/MetaDataEndPointIT.java | 4 +-
.../phoenix/end2end/NativeHBaseTypesIT.java | 10 +-
.../phoenix/end2end/PhoenixRuntimeIT.java | 16 +-
.../end2end/QueryDatabaseMetaDataIT.java | 4 +-
.../end2end/RebuildIndexConnectionPropsIT.java | 10 +-
.../phoenix/end2end/StatsCollectorIT.java | 8 +-
.../UpdateCacheAcrossDifferentClientsIT.java | 4 +-
.../org/apache/phoenix/end2end/UpgradeIT.java | 7 +-
.../apache/phoenix/end2end/UpsertValuesIT.java | 4 +-
.../phoenix/end2end/index/BaseIndexIT.java | 4 +-
.../phoenix/end2end/index/LocalIndexIT.java | 4 +-
.../phoenix/end2end/index/MutableIndexIT.java | 4 +-
.../end2end/index/PartialIndexRebuilderIT.java | 36 ++--
.../phoenix/tx/FlappingTransactionIT.java | 4 +-
.../phoenix/tx/ParameterizedTransactionIT.java | 8 +-
.../apache/phoenix/cache/ServerCacheClient.java | 9 +-
.../apache/phoenix/compile/FromCompiler.java | 4 +-
.../DelegateRegionCoprocessorEnvironment.java | 15 +-
.../coprocessor/MetaDataEndpointImpl.java | 14 +-
.../coprocessor/MetaDataRegionObserver.java | 6 +-
.../apache/phoenix/execute/DelegateHTable.java | 185 ++++++++-----------
.../apache/phoenix/execute/MutationState.java | 18 +-
.../index/table/CoprocessorHTableFactory.java | 6 +-
.../hbase/index/table/HTableFactory.java | 6 +-
.../hbase/index/write/IndexWriterUtils.java | 19 +-
.../write/ParallelWriterIndexCommitter.java | 4 +-
.../TrackingParallelWriterIndexCommitter.java | 4 +-
.../index/PhoenixIndexFailurePolicy.java | 6 +-
.../index/PhoenixTransactionalIndexer.java | 2 +-
.../phoenix/iterate/BaseResultIterators.java | 2 +-
.../apache/phoenix/iterate/SnapshotScanner.java | 4 +-
.../phoenix/iterate/TableResultIterator.java | 6 +-
.../phoenix/mapreduce/PhoenixRecordReader.java | 3 +-
.../mapreduce/index/DirectHTableWriter.java | 13 +-
.../phoenix/mapreduce/index/IndexTool.java | 8 +-
.../phoenix/query/ConnectionQueryServices.java | 7 +-
.../query/ConnectionQueryServicesImpl.java | 65 +++----
.../query/ConnectionlessQueryServicesImpl.java | 6 +-
.../query/DelegateConnectionQueryServices.java | 7 +-
.../apache/phoenix/query/GuidePostsCache.java | 4 +-
.../phoenix/query/HConnectionFactory.java | 10 +-
.../org/apache/phoenix/query/HTableFactory.java | 11 +-
.../stats/DefaultStatisticsCollector.java | 4 +-
.../phoenix/schema/stats/StatisticsUtil.java | 4 +-
.../phoenix/schema/stats/StatisticsWriter.java | 12 +-
.../transaction/OmidTransactionTable.java | 34 +---
.../transaction/PhoenixTransactionalTable.java | 22 +--
.../transaction/TephraTransactionTable.java | 101 +++++-----
.../phoenix/transaction/TransactionFactory.java | 6 +-
.../java/org/apache/phoenix/util/IndexUtil.java | 19 +-
.../org/apache/phoenix/util/MetaDataUtil.java | 14 +-
.../org/apache/phoenix/util/ServerUtil.java | 21 +--
.../apache/phoenix/util/TransactionUtil.java | 4 +-
.../org/apache/phoenix/util/UpgradeUtil.java | 93 +++++-----
.../hbase/index/write/FakeTableFactory.java | 12 +-
.../hbase/index/write/TestIndexWriter.java | 8 +-
.../index/write/TestParalleIndexWriter.java | 12 +-
.../write/TestParalleWriterIndexCommitter.java | 12 +-
.../java/org/apache/phoenix/util/TestUtil.java | 11 +-
.../hive/mapreduce/PhoenixInputFormat.java | 7 +-
67 files changed, 488 insertions(+), 516 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableWithViewsIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableWithViewsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableWithViewsIT.java
index 6b57148..aeb892e 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableWithViewsIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableWithViewsIT.java
@@ -34,7 +34,7 @@ import java.util.Arrays;
import java.util.Collection;
import org.apache.commons.lang.ArrayUtils;
-import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.coprocessor.PhoenixTransactionalProcessor;
import org.apache.phoenix.exception.SQLExceptionCode;
@@ -747,7 +747,7 @@ public class AlterTableWithViewsIT extends ParallelStatsDisabledIT {
PName tenantId = isMultiTenant ? PNameFactory.newName("tenant1") : null;
PhoenixConnection phoenixConn = conn.unwrap(PhoenixConnection.class);
- HTableInterface htable = phoenixConn.getQueryServices().getTable(Bytes.toBytes(baseTableName));
+ Table htable = phoenixConn.getQueryServices().getTable(Bytes.toBytes(baseTableName));
assertFalse(htable.getTableDescriptor().getCoprocessors().contains(PhoenixTransactionalProcessor.class.getName()));
assertFalse(phoenixConn.getTable(new PTableKey(null, baseTableName)).isTransactional());
assertFalse(viewConn.unwrap(PhoenixConnection.class).getTable(new PTableKey(tenantId, viewOfTable)).isTransactional());
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseViewIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseViewIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseViewIT.java
index 478b234..b024d03 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseViewIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseViewIT.java
@@ -33,10 +33,10 @@ import java.util.Collection;
import java.util.List;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.phoenix.jdbc.PhoenixConnection;
@@ -85,7 +85,7 @@ public abstract class BaseViewIT extends ParallelStatsEnabledIT {
// Confirm that dropping the view also deletes the rows in the index
if (saltBuckets == null) {
try (Connection conn = DriverManager.getConnection(getUrl())) {
- HTableInterface htable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(tableName));
+ Table htable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(tableName));
if(ScanUtil.isLocalIndex(scan)) {
ScanUtil.setLocalIndexAttributes(scan, 0, HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, scan.getStartRow(), scan.getStopRow());
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnProjectionOptimizationIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnProjectionOptimizationIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnProjectionOptimizationIT.java
index 43dc302..08ecee6 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnProjectionOptimizationIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnProjectionOptimizationIT.java
@@ -45,8 +45,8 @@ import java.util.Properties;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.schema.types.PInteger;
@@ -238,7 +238,7 @@ public class ColumnProjectionOptimizationIT extends ParallelStatsDisabledIT {
byte[] c1 = Bytes.toBytes("COL1");
byte[] c2 = Bytes.toBytes("COL2");
byte[] c3 = Bytes.toBytes("COL3");
- HTableInterface htable = null;
+ Table htable = null;
try {
htable = conn2.getQueryServices().getTable(htableName);
Put put = new Put(PInteger.INSTANCE.toBytes(1));
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/it/java/org/apache/phoenix/end2end/DisableLocalIndexIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DisableLocalIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DisableLocalIndexIT.java
index 8eab9e2..01fc24c 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DisableLocalIndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DisableLocalIndexIT.java
@@ -27,7 +27,7 @@ import java.sql.SQLException;
import java.util.Properties;
import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.exception.SQLExceptionCode;
import org.apache.phoenix.jdbc.PhoenixConnection;
@@ -58,7 +58,7 @@ public class DisableLocalIndexIT extends ParallelStatsDisabledIT {
assertFalse(admin.tableExists(Bytes.toBytes(MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + tableName)));
admin.close();
try {
- HTableInterface t = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + tableName));
+ Table t = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + tableName));
t.getTableDescriptor(); // Exception no longer thrown by getTable, but instead need to force an RPC
fail("Local index table should not have been created");
} catch (org.apache.hadoop.hbase.TableNotFoundException e) {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/it/java/org/apache/phoenix/end2end/DynamicColumnIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DynamicColumnIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DynamicColumnIT.java
index 70c56a0..714f80a 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DynamicColumnIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DynamicColumnIT.java
@@ -35,9 +35,9 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Row;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.query.ConnectionQueryServices;
@@ -76,7 +76,7 @@ public class DynamicColumnIT extends ParallelStatsDisabledIT {
admin.createTable(htd);
}
- try (HTableInterface hTable = services.getTable(Bytes.toBytes(tableName))) {
+ try (Table hTable = services.getTable(Bytes.toBytes(tableName))) {
// Insert rows using standard HBase mechanism with standard HBase "types"
List<Row> mutations = new ArrayList<Row>();
byte[] dv = Bytes.toBytes("DV");
@@ -96,7 +96,7 @@ public class DynamicColumnIT extends ParallelStatsDisabledIT {
put.addColumn(FAMILY_NAME_B, f2v2, Bytes.toBytes("f2value2"));
mutations.add(put);
- hTable.batch(mutations);
+ hTable.batch(mutations, null);
// Create Phoenix table after HBase table was created through the native APIs
// The timestamp of the table creation must be later than the timestamp of the data
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/it/java/org/apache/phoenix/end2end/DynamicFamilyIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DynamicFamilyIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DynamicFamilyIT.java
index acae6ee..84c00ba 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DynamicFamilyIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DynamicFamilyIT.java
@@ -33,9 +33,9 @@ import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
-import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Row;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.phoenix.exception.PhoenixParserException;
@@ -109,7 +109,7 @@ public class DynamicFamilyIT extends ParallelStatsDisabledIT {
@SuppressWarnings("deprecation")
private static void initTableValues() throws Exception {
ConnectionQueryServices services = driver.getConnectionQueryServices(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES));
- HTableInterface hTable = services.getTable(SchemaUtil.getTableNameAsBytes(WEB_STATS_SCHEMA_NAME,WEB_STATS));
+ Table hTable = services.getTable(SchemaUtil.getTableNameAsBytes(WEB_STATS_SCHEMA_NAME,WEB_STATS));
try {
// Insert rows using standard HBase mechanism with standard HBase "types"
Put put;
@@ -136,7 +136,7 @@ public class DynamicFamilyIT extends ParallelStatsDisabledIT {
put.addColumn(B_CF, ByteUtil.concat(LAST_LOGIN_TIME_DYNCOL_PREFIX, USER_ID3_BYTES), PTime.INSTANCE.toBytes(ENTRY3_USER_ID3_LOGIN_TIME));
mutations.add(put);
- hTable.batch(mutations);
+ hTable.batch(mutations, null);
} finally {
hTable.close();
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/it/java/org/apache/phoenix/end2end/MappingTableDataTypeIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MappingTableDataTypeIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MappingTableDataTypeIT.java
index 546c133..5173fe4 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MappingTableDataTypeIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MappingTableDataTypeIT.java
@@ -38,11 +38,11 @@ import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.util.PropertiesUtil;
@@ -66,7 +66,7 @@ public class MappingTableDataTypeIT extends ParallelStatsDisabledIT {
descriptor.addFamily(columnDescriptor1);
descriptor.addFamily(columnDescriptor2);
admin.createTable(descriptor);
- HTableInterface t = conn.getQueryServices().getTable(Bytes.toBytes(mtest));
+ Table t = conn.getQueryServices().getTable(Bytes.toBytes(mtest));
insertData(tableName.getName(), admin, t);
t.close();
// create phoenix table that maps to existing HBase table
@@ -104,14 +104,13 @@ public class MappingTableDataTypeIT extends ParallelStatsDisabledIT {
}
}
- private void insertData(final byte[] tableName, HBaseAdmin admin, HTableInterface t) throws IOException,
+ private void insertData(final byte[] tableName, HBaseAdmin admin, Table t) throws IOException,
InterruptedException {
Put p = new Put(Bytes.toBytes("row"));
p.addColumn(Bytes.toBytes("cf1"), Bytes.toBytes("q1"), Bytes.toBytes("value1"));
p.addColumn(Bytes.toBytes("cf2"), Bytes.toBytes("q2"), Bytes.toBytes("value2"));
t.put(p);
- t.flushCommits();
- admin.flush(tableName);
+ admin.flush(TableName.valueOf(tableName));
}
/**
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/it/java/org/apache/phoenix/end2end/MetaDataEndPointIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MetaDataEndPointIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MetaDataEndPointIT.java
index 08b8cc6..2f39ec8 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MetaDataEndPointIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MetaDataEndPointIT.java
@@ -22,7 +22,7 @@ import static org.junit.Assert.assertEquals;
import java.sql.Connection;
import java.sql.DriverManager;
-import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
@@ -44,7 +44,7 @@ public class MetaDataEndPointIT extends ParallelStatsDisabledIT {
conn.createStatement().execute("CREATE TABLE " + fullTableName + "(k INTEGER PRIMARY KEY, v1 INTEGER, v2 INTEGER) COLUMN_ENCODED_BYTES = 0, STORE_NULLS=true, GUIDE_POSTS_WIDTH=1000");
conn.createStatement().execute("CREATE INDEX " + indexName1 + " ON " + fullTableName + " (v1) INCLUDE (v2)");
conn.commit();
- HTableInterface metaTable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
+ Table metaTable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
MutationCode code = IndexUtil.updateIndexState(fullIndexName1, 0L, metaTable, PIndexState.DISABLE).getMutationCode();
assertEquals(MutationCode.TABLE_ALREADY_EXISTS, code);
long ts = EnvironmentEdgeManager.currentTimeMillis();
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/it/java/org/apache/phoenix/end2end/NativeHBaseTypesIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/NativeHBaseTypesIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/NativeHBaseTypesIT.java
index 50563d4..3b17ad1 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/NativeHBaseTypesIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/NativeHBaseTypesIT.java
@@ -38,10 +38,10 @@ import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Row;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.jdbc.PhoenixConnection;
@@ -84,7 +84,7 @@ public class NativeHBaseTypesIT extends ParallelStatsDisabledIT {
}
ConnectionQueryServices services = driver.getConnectionQueryServices(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES));
- HTableInterface hTable = services.getTable(tableBytes);
+ Table hTable = services.getTable(tableBytes);
try {
// Insert rows using standard HBase mechanism with standard HBase "types"
List<Row> mutations = new ArrayList<Row>();
@@ -132,7 +132,7 @@ public class NativeHBaseTypesIT extends ParallelStatsDisabledIT {
put.addColumn(family, ulongCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(40000L));
mutations.add(put);
- hTable.batch(mutations);
+ hTable.batch(mutations, null);
Result r = hTable.get(new Get(bKey));
assertFalse(r.isEmpty());
@@ -273,7 +273,7 @@ public class NativeHBaseTypesIT extends ParallelStatsDisabledIT {
String tableName = initTableValues();
String query = "SELECT string_key FROM " + tableName + " WHERE uint_key > 100000";
PhoenixConnection conn = DriverManager.getConnection(getUrl()).unwrap(PhoenixConnection.class);
- HTableInterface hTable = conn.getQueryServices().getTable(tableName.getBytes());
+ Table hTable = conn.getQueryServices().getTable(tableName.getBytes());
List<Row> mutations = new ArrayList<Row>();
byte[] family = Bytes.toBytes("1");
@@ -290,7 +290,7 @@ public class NativeHBaseTypesIT extends ParallelStatsDisabledIT {
put.addColumn(family, ulongCol, HConstants.LATEST_TIMESTAMP, Bytes.toBytes(100L));
put.addColumn(family, QueryConstants.EMPTY_COLUMN_BYTES, HConstants.LATEST_TIMESTAMP, ByteUtil.EMPTY_BYTE_ARRAY);
mutations.add(put);
- hTable.batch(mutations);
+ hTable.batch(mutations, null);
// Demonstrates weakness of HBase Bytes serialization. Negative numbers
// show up as bigger than positive numbers
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/it/java/org/apache/phoenix/end2end/PhoenixRuntimeIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/PhoenixRuntimeIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/PhoenixRuntimeIT.java
index 72ff21e..5652b58 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/PhoenixRuntimeIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/PhoenixRuntimeIT.java
@@ -29,10 +29,10 @@ import java.util.HashSet;
import java.util.Properties;
import java.util.Set;
-import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.filter.FilterList;
@@ -55,7 +55,7 @@ import org.junit.Test;
import com.google.common.collect.Sets;
public class PhoenixRuntimeIT extends ParallelStatsDisabledIT {
- private static void assertTenantIds(Expression e, HTableInterface htable, Filter filter, String[] tenantIds) throws IOException {
+ private static void assertTenantIds(Expression e, Table htable, Filter filter, String[] tenantIds) throws IOException {
ImmutableBytesWritable ptr = new ImmutableBytesWritable();
Scan scan = new Scan();
scan.setFilter(filter);
@@ -109,17 +109,17 @@ public class PhoenixRuntimeIT extends ParallelStatsDisabledIT {
Connection tsconn = DriverManager.getConnection(getUrl(), props);
tsconn.createStatement().execute("CREATE SEQUENCE " + sequenceName);
Expression e1 = PhoenixRuntime.getTenantIdExpression(tsconn, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME);
- HTableInterface htable1 = tsconn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES);
+ Table htable1 = tsconn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES);
assertTenantIds(e1, htable1, new FirstKeyOnlyFilter(), new String[] {"", t1} );
String viewName = generateUniqueName();
tsconn.createStatement().execute("CREATE VIEW " + viewName + "(V1 VARCHAR) AS SELECT * FROM " + tableName);
Expression e2 = PhoenixRuntime.getTenantIdExpression(tsconn, PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME);
- HTableInterface htable2 = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
+ Table htable2 = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
assertTenantIds(e2, htable2, getUserTableAndViewsFilter(), new String[] {"", t1} );
Expression e3 = PhoenixRuntime.getTenantIdExpression(conn, tableName);
- HTableInterface htable3 = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(tableName));
+ Table htable3 = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(tableName));
assertTenantIds(e3, htable3, new FirstKeyOnlyFilter(), new String[] {t1, t2} );
String basTableName = generateUniqueName();
@@ -130,13 +130,13 @@ public class PhoenixRuntimeIT extends ParallelStatsDisabledIT {
String indexName1 = generateUniqueName();
tsconn.createStatement().execute("CREATE INDEX " + indexName1 + " ON " + viewName + "(V1)");
Expression e5 = PhoenixRuntime.getTenantIdExpression(tsconn, indexName1);
- HTableInterface htable5 = tsconn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(MetaDataUtil.VIEW_INDEX_TABLE_PREFIX + tableName));
+ Table htable5 = tsconn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(MetaDataUtil.VIEW_INDEX_TABLE_PREFIX + tableName));
assertTenantIds(e5, htable5, new FirstKeyOnlyFilter(), new String[] {t1} );
String indexName2 = generateUniqueName();
conn.createStatement().execute("CREATE INDEX " + indexName2 + " ON " + tableName + "(k2)");
Expression e6 = PhoenixRuntime.getTenantIdExpression(conn, indexName2);
- HTableInterface htable6 = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(indexName2));
+ Table htable6 = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(indexName2));
assertTenantIds(e6, htable6, new FirstKeyOnlyFilter(), new String[] {t1, t2} );
tableName = generateUniqueName() + "BAR_" + (isSalted ? "SALTED" : "UNSALTED");
@@ -144,7 +144,7 @@ public class PhoenixRuntimeIT extends ParallelStatsDisabledIT {
conn.createStatement().execute("UPSERT INTO " + tableName + " VALUES('" + t1 + "','x')");
conn.createStatement().execute("UPSERT INTO " + tableName + " VALUES('" + t2 + "','y')");
Expression e7 = PhoenixRuntime.getFirstPKColumnExpression(conn, tableName);
- HTableInterface htable7 = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(tableName));
+ Table htable7 = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(tableName));
assertTenantIds(e7, htable7, new FirstKeyOnlyFilter(), new String[] {t1, t2} );
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryDatabaseMetaDataIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryDatabaseMetaDataIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryDatabaseMetaDataIT.java
index 6d675f5..af5a52a 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryDatabaseMetaDataIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryDatabaseMetaDataIT.java
@@ -48,8 +48,8 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.coprocessor.GroupedAggregateRegionObserver;
@@ -863,7 +863,7 @@ public class QueryDatabaseMetaDataIT extends ParallelStatsDisabledIT {
// expected to fail b/c table is read-only
}
- HTableInterface htable =
+ Table htable =
pconn.getQueryServices()
.getTable(SchemaUtil.getTableNameAsBytes(schemaName, tableName));
Put put = new Put(Bytes.toBytes("0"));
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/it/java/org/apache/phoenix/end2end/RebuildIndexConnectionPropsIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/RebuildIndexConnectionPropsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/RebuildIndexConnectionPropsIT.java
index 33a891f..363b657 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/RebuildIndexConnectionPropsIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/RebuildIndexConnectionPropsIT.java
@@ -31,7 +31,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.Connection;
import org.apache.phoenix.coprocessor.MetaDataRegionObserver;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.jdbc.PhoenixDriver;
@@ -103,11 +103,11 @@ public class RebuildIndexConnectionPropsIT extends BaseUniqueNamesOwnClusterIT {
Long.toString(NUM_RPC_RETRIES),
rebuildQueryServicesConfig.get(HConstants.HBASE_CLIENT_RETRIES_NUMBER));
ConnectionQueryServices rebuildQueryServices = rebuildIndexConnection.getQueryServices();
- HConnection rebuildIndexHConnection =
- (HConnection) Whitebox.getInternalState(rebuildQueryServices,
+ Connection rebuildIndexHConnection =
+ (Connection) Whitebox.getInternalState(rebuildQueryServices,
"connection");
- HConnection regularHConnection =
- (HConnection) Whitebox.getInternalState(
+ Connection regularHConnection =
+ (Connection) Whitebox.getInternalState(
regularConnection.getQueryServices(), "connection");
// assert that a new HConnection was created
assertFalse(
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
index da8e78d..e18552a 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
@@ -41,10 +41,10 @@ import java.util.Random;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionLocation;
-import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
@@ -459,7 +459,7 @@ public abstract class StatsCollectorIT extends BaseUniqueNamesOwnClusterIT {
Scan scan = new Scan();
scan.setRaw(true);
PhoenixConnection phxConn = conn.unwrap(PhoenixConnection.class);
- try (HTableInterface htable = phxConn.getQueryServices().getTable(Bytes.toBytes(tableName))) {
+ try (Table htable = phxConn.getQueryServices().getTable(Bytes.toBytes(tableName))) {
ResultScanner scanner = htable.getScanner(scan);
Result result;
while ((result = scanner.next())!=null) {
@@ -472,7 +472,7 @@ public abstract class StatsCollectorIT extends BaseUniqueNamesOwnClusterIT {
scan = new Scan();
scan.setRaw(true);
phxConn = conn.unwrap(PhoenixConnection.class);
- try (HTableInterface htable = phxConn.getQueryServices().getTable(Bytes.toBytes(tableName))) {
+ try (Table htable = phxConn.getQueryServices().getTable(Bytes.toBytes(tableName))) {
ResultScanner scanner = htable.getScanner(scan);
Result result;
while ((result = scanner.next())!=null) {
@@ -709,7 +709,7 @@ public abstract class StatsCollectorIT extends BaseUniqueNamesOwnClusterIT {
conn.createStatement().execute("UPDATE STATISTICS " + tableName);
ConnectionQueryServices queryServices =
conn.unwrap(PhoenixConnection.class).getQueryServices();
- try (HTableInterface statsHTable =
+ try (Table statsHTable =
queryServices.getTable(
SchemaUtil.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_STATS_NAME_BYTES,
queryServices.getProps()).getName())) {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpdateCacheAcrossDifferentClientsIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpdateCacheAcrossDifferentClientsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpdateCacheAcrossDifferentClientsIT.java
index 25e2367..4c85a0c 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpdateCacheAcrossDifferentClientsIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpdateCacheAcrossDifferentClientsIT.java
@@ -21,7 +21,7 @@ import java.sql.ResultSet;
import java.util.Map;
import java.util.Properties;
-import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
import org.apache.phoenix.jdbc.PhoenixDriver;
@@ -128,7 +128,7 @@ public class UpdateCacheAcrossDifferentClientsIT extends BaseUniqueNamesOwnClust
String fullIndexName = SchemaUtil.getTableName(schemaName, indexName);
conn1.createStatement().execute("CREATE TABLE " + fullTableName + "(k INTEGER PRIMARY KEY, v1 INTEGER, v2 INTEGER) COLUMN_ENCODED_BYTES = 0, STORE_NULLS=true");
conn1.createStatement().execute("CREATE INDEX " + indexName + " ON " + fullTableName + " (v1) INCLUDE (v2)");
- HTableInterface metaTable = conn2.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
+ Table metaTable = conn2.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
IndexUtil.updateIndexState(fullIndexName, 0, metaTable, PIndexState.DISABLE);
conn2.createStatement().execute("UPSERT INTO " + fullTableName + " VALUES(1,2,3)");
conn2.commit();
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java
index 551247f..14c5d8a 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java
@@ -47,9 +47,9 @@ import java.util.concurrent.atomic.AtomicInteger;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RowMutations;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.coprocessor.MetaDataProtocol;
@@ -626,7 +626,7 @@ public class UpgradeIT extends ParallelStatsDisabledIT {
try (PhoenixConnection conn =
(DriverManager.getConnection(getUrl())).unwrap(PhoenixConnection.class)) {
- try (HTableInterface htable =
+ try (Table htable =
conn.getQueryServices().getTable(
Bytes.toBytes(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME))) {
RowMutations mutations = new RowMutations(rowKey);
@@ -731,13 +731,12 @@ public class UpgradeIT extends ParallelStatsDisabledIT {
private void putUnlockKVInSysMutex(byte[] row) throws Exception {
try (Connection conn = getConnection(false, null)) {
ConnectionQueryServices services = conn.unwrap(PhoenixConnection.class).getQueryServices();
- try (HTableInterface sysMutexTable = services.getTable(PhoenixDatabaseMetaData.SYSTEM_MUTEX_NAME_BYTES)) {
+ try (Table sysMutexTable = services.getTable(PhoenixDatabaseMetaData.SYSTEM_MUTEX_NAME_BYTES)) {
byte[] family = PhoenixDatabaseMetaData.SYSTEM_MUTEX_FAMILY_NAME_BYTES;
byte[] qualifier = UPGRADE_MUTEX;
Put put = new Put(row);
put.addColumn(family, qualifier, UPGRADE_MUTEX_UNLOCKED);
sysMutexTable.put(put);
- sysMutexTable.flushCommits();
}
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertValuesIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertValuesIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertValuesIT.java
index e89cc58..8365ca0 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertValuesIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertValuesIT.java
@@ -37,10 +37,10 @@ import java.sql.Time;
import java.sql.Timestamp;
import java.util.Properties;
-import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.exception.SQLExceptionCode;
import org.apache.phoenix.jdbc.PhoenixConnection;
@@ -662,7 +662,7 @@ public class UpsertValuesIT extends ParallelStatsDisabledIT {
}
// Issue a raw hbase scan and assert that key values have the expected column qualifiers.
try (Connection conn = DriverManager.getConnection(getUrl())) {
- HTableInterface table = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(fullTableName));
+ Table table = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(fullTableName));
ResultScanner scanner = table.getScanner(new Scan());
Result next = scanner.next();
assertTrue(next.containsColumn(Bytes.toBytes("CF1"), PInteger.INSTANCE.toBytes(1)));
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/BaseIndexIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/BaseIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/BaseIndexIT.java
index 049416c..c1f0628 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/BaseIndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/BaseIndexIT.java
@@ -46,10 +46,10 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.ipc.PhoenixRpcSchedulerFactory;
import org.apache.phoenix.compile.ColumnResolver;
import org.apache.phoenix.compile.FromCompiler;
@@ -954,7 +954,7 @@ public abstract class BaseIndexIT extends ParallelStatsDisabledIT {
PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
PTable index = pconn.getTable(new PTableKey(null, fullIndexName));
byte[] physicalIndexTable = index.getPhysicalName().getBytes();
- try (HTableInterface hIndex = pconn.getQueryServices().getTable(physicalIndexTable)) {
+ try (Table hIndex = pconn.getQueryServices().getTable(physicalIndexTable)) {
Scan scan = new Scan();
scan.setRaw(true);
if (this.transactional) {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
index 48221ab..6ea96a9 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
@@ -46,10 +46,10 @@ import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.Pair;
@@ -577,7 +577,7 @@ public class LocalIndexIT extends BaseLocalIndexIT {
public void testLocalIndexAutomaticRepair() throws Exception {
if (isNamespaceMapped) { return; }
PhoenixConnection conn = DriverManager.getConnection(getUrl()).unwrap(PhoenixConnection.class);
- try (HTableInterface metaTable = conn.getQueryServices().getTable(TableName.META_TABLE_NAME.getName());
+ try (Table metaTable = conn.getQueryServices().getTable(TableName.META_TABLE_NAME.getName());
HBaseAdmin admin = conn.getQueryServices().getAdmin();) {
Statement statement = conn.createStatement();
final String tableName = "T_AUTO_MATIC_REPAIR";
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
index e46a213..e1d0b31 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
@@ -41,7 +41,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Threads;
@@ -658,7 +658,7 @@ public class MutableIndexIT extends ParallelStatsDisabledIT {
TableName indexTable = TableName.valueOf(localIndex?tableName: indexName);
admin.flush(indexTable);
boolean merged = false;
- HTableInterface table = connectionQueryServices.getTable(indexTable.getName());
+ Table table = connectionQueryServices.getTable(indexTable.getName());
// merge regions until 1 left
long numRegions = 0;
while (true) {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java
index a1da339..61cca0b 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java
@@ -37,8 +37,8 @@ import java.util.concurrent.TimeUnit;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver;
@@ -202,7 +202,7 @@ public class PartialIndexRebuilderIT extends BaseUniqueNamesOwnClusterIT {
final String fullTableName = SchemaUtil.getTableName(schemaName, tableName);
final String fullIndexName = SchemaUtil.getTableName(schemaName, indexName);
Connection conn = DriverManager.getConnection(getUrl());
- HTableInterface metaTable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
+ Table metaTable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
conn.createStatement().execute("CREATE TABLE " + fullTableName + "(k1 INTEGER NOT NULL, k2 INTEGER NOT NULL, v1 INTEGER, CONSTRAINT pk PRIMARY KEY (k1,k2)) STORE_NULLS=true, VERSIONS=1");
conn.createStatement().execute("CREATE INDEX " + indexName + " ON " + fullTableName + "(v1)");
@@ -337,7 +337,7 @@ public class PartialIndexRebuilderIT extends BaseUniqueNamesOwnClusterIT {
conn.commit();
clock.time += 100;
long disableTS = EnvironmentEdgeManager.currentTimeMillis();
- HTableInterface metaTable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
+ Table metaTable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
IndexUtil.updateIndexState(fullIndexName1, disableTS, metaTable, PIndexState.DISABLE);
IndexUtil.updateIndexState(fullIndexName2, disableTS, metaTable, PIndexState.DISABLE);
clock.time += 100;
@@ -367,7 +367,7 @@ public class PartialIndexRebuilderIT extends BaseUniqueNamesOwnClusterIT {
conn.createStatement().execute("CREATE INDEX " + indexName + " ON " + fullTableName + " (v1) INCLUDE (v2)");
mutateRandomly(conn, fullTableName, nRows);
long disableTS = EnvironmentEdgeManager.currentTimeMillis();
- HTableInterface metaTable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
+ Table metaTable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
IndexUtil.updateIndexState(fullIndexName, disableTS, metaTable, PIndexState.DISABLE);
boolean[] cancel = new boolean[1];
try {
@@ -395,7 +395,7 @@ public class PartialIndexRebuilderIT extends BaseUniqueNamesOwnClusterIT {
conn.createStatement().execute("CREATE TABLE " + fullTableName + "(k INTEGER PRIMARY KEY, v1 INTEGER, v2 INTEGER) COLUMN_ENCODED_BYTES = 0, STORE_NULLS=true");
conn.createStatement().execute("CREATE INDEX " + indexName + " ON " + fullTableName + " (v1) INCLUDE (v2)");
mutateRandomly(conn, fullTableName, nRows);
- HTableInterface metaTable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
+ Table metaTable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
final boolean[] hasInactiveIndex = new boolean[1];
final CountDownLatch doneSignal = new CountDownLatch(1);
Runnable r = new Runnable() {
@@ -446,7 +446,7 @@ public class PartialIndexRebuilderIT extends BaseUniqueNamesOwnClusterIT {
conn.createStatement().execute("UPSERT INTO " + fullTableName + " VALUES('a','bb')");
conn.commit();
long disableTS = EnvironmentEdgeManager.currentTimeMillis();
- HTableInterface metaTable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
+ Table metaTable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
IndexUtil.updateIndexState(fullIndexName, disableTS, metaTable, PIndexState.DISABLE);
conn.createStatement().execute("UPSERT INTO " + fullTableName + " VALUES('a','ccc')");
conn.commit();
@@ -476,7 +476,7 @@ public class PartialIndexRebuilderIT extends BaseUniqueNamesOwnClusterIT {
conn.createStatement().execute("UPSERT INTO " + fullTableName + " VALUES('a','a')");
conn.commit();
long disableTS = EnvironmentEdgeManager.currentTimeMillis();
- HTableInterface metaTable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
+ Table metaTable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
IndexUtil.updateIndexState(fullIndexName, disableTS, metaTable, PIndexState.DISABLE);
conn.createStatement().execute("UPSERT INTO " + fullTableName + " VALUES('a',null)");
conn.commit();
@@ -506,7 +506,7 @@ public class PartialIndexRebuilderIT extends BaseUniqueNamesOwnClusterIT {
conn.createStatement().execute("UPSERT INTO " + fullTableName + " VALUES('a',null)");
conn.commit();
long disableTS = EnvironmentEdgeManager.currentTimeMillis();
- HTableInterface metaTable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
+ Table metaTable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
IndexUtil.updateIndexState(fullIndexName, disableTS, metaTable, PIndexState.DISABLE);
conn.createStatement().execute("UPSERT INTO " + fullTableName + " VALUES('a','bb')");
conn.commit();
@@ -536,7 +536,7 @@ public class PartialIndexRebuilderIT extends BaseUniqueNamesOwnClusterIT {
conn.createStatement().execute("UPSERT INTO " + fullTableName + " VALUES('a',null)");
conn.commit();
long disableTS = EnvironmentEdgeManager.currentTimeMillis();
- HTableInterface metaTable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
+ Table metaTable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
IndexUtil.updateIndexState(fullIndexName, disableTS, metaTable, PIndexState.DISABLE);
conn.createStatement().execute("UPSERT INTO " + fullTableName + " VALUES('a','b')");
conn.commit();
@@ -566,7 +566,7 @@ public class PartialIndexRebuilderIT extends BaseUniqueNamesOwnClusterIT {
conn.createStatement().execute("DELETE FROM " + fullTableName);
conn.commit();
long disableTS = EnvironmentEdgeManager.currentTimeMillis();
- HTableInterface metaTable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
+ Table metaTable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
IndexUtil.updateIndexState(fullIndexName, disableTS, metaTable, PIndexState.DISABLE);
conn.createStatement().execute("UPSERT INTO " + fullTableName + " VALUES('a','b')");
conn.commit();
@@ -622,7 +622,7 @@ public class PartialIndexRebuilderIT extends BaseUniqueNamesOwnClusterIT {
conn.createStatement().execute("UPSERT INTO " + fullTableName + " VALUES('a','a')");
conn.commit();
clock.time += 100;
- HTableInterface metaTable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
+ Table metaTable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
IndexUtil.updateIndexState(fullIndexName, clock.currentTime(), metaTable, PIndexState.DISABLE);
conn.createStatement().execute("UPSERT INTO " + fullTableName + " VALUES('a','bb')");
conn.commit();
@@ -658,7 +658,7 @@ public class PartialIndexRebuilderIT extends BaseUniqueNamesOwnClusterIT {
conn.createStatement().execute("UPSERT INTO " + fullTableName + " VALUES('a','a','0')");
conn.commit();
clock.time += 100;
- HTableInterface metaTable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
+ Table metaTable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
IndexUtil.updateIndexState(fullIndexName, 0L, metaTable, PIndexState.DISABLE);
clock.time += 100;
long disableTime = clock.currentTime();
@@ -705,7 +705,7 @@ public class PartialIndexRebuilderIT extends BaseUniqueNamesOwnClusterIT {
conn.createStatement().execute("UPSERT INTO " + fullTableName + " VALUES('a','a','0')");
conn.commit();
clock.time += 100;
- HTableInterface metaTable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
+ Table metaTable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
long disableTime = clock.currentTime();
IndexUtil.updateIndexState(fullIndexName, disableTime, metaTable, PIndexState.DISABLE);
clock.time += 100;
@@ -752,7 +752,7 @@ public class PartialIndexRebuilderIT extends BaseUniqueNamesOwnClusterIT {
clock.time += 100;
conn.createStatement().execute("CREATE INDEX " + indexName + " ON " + fullTableName + " (v1, v2)");
clock.time += 100;
- HTableInterface metaTable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
+ Table metaTable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
IndexUtil.updateIndexState(fullIndexName, 0L, metaTable, PIndexState.DISABLE);
clock.time += 100;
long disableTime = clock.currentTime();
@@ -801,7 +801,7 @@ public class PartialIndexRebuilderIT extends BaseUniqueNamesOwnClusterIT {
conn.createStatement().execute("UPSERT INTO " + fullTableName + " VALUES('a','a','0','x')");
conn.commit();
clock.time += 100;
- try (HTableInterface metaTable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES)) {
+ try (Table metaTable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES)) {
// By using an INDEX_DISABLE_TIMESTAMP of 0, we prevent the partial index rebuilder from triggering
IndexUtil.updateIndexState(fullIndexName, 0L, metaTable, PIndexState.DISABLE);
clock.time += 100;
@@ -868,7 +868,7 @@ public class PartialIndexRebuilderIT extends BaseUniqueNamesOwnClusterIT {
conn.createStatement().execute("UPSERT INTO " + fullTableName + " VALUES('a','a','0')");
conn.commit();
clock.time += 100;
- HTableInterface metaTable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
+ Table metaTable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
long disableTime = clock.currentTime();
// Simulates an index write failure
@@ -952,7 +952,7 @@ public class PartialIndexRebuilderIT extends BaseUniqueNamesOwnClusterIT {
conn.createStatement().execute("UPSERT INTO " + fullTableName + " VALUES('a','a')");
conn.commit();
clock.time += 100;
- HTableInterface metaTable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
+ Table metaTable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
IndexUtil.updateIndexState(fullIndexName, clock.currentTime(), metaTable, PIndexState.DISABLE);
conn.createStatement().execute("DELETE FROM " + fullTableName + " WHERE k='a'");
conn.commit();
@@ -986,7 +986,7 @@ public class PartialIndexRebuilderIT extends BaseUniqueNamesOwnClusterIT {
conn.createStatement().execute("UPSERT INTO " + fullTableName + " VALUES('a','a')");
conn.commit();
clock.time += 100;
- HTableInterface metaTable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
+ Table metaTable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
IndexUtil.updateIndexState(fullIndexName, clock.currentTime(), metaTable, PIndexState.DISABLE);
conn.createStatement().execute("UPSERT INTO " + fullTableName + " VALUES('a','ccc')");
conn.commit();
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/it/java/org/apache/phoenix/tx/FlappingTransactionIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/tx/FlappingTransactionIT.java b/phoenix-core/src/it/java/org/apache/phoenix/tx/FlappingTransactionIT.java
index 301768b..dca651e 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/tx/FlappingTransactionIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/tx/FlappingTransactionIT.java
@@ -34,9 +34,9 @@ import java.sql.Statement;
import java.util.Properties;
import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.end2end.ParallelStatsDisabledIT;
import org.apache.phoenix.exception.SQLExceptionCode;
@@ -214,7 +214,7 @@ public class FlappingTransactionIT extends ParallelStatsDisabledIT {
Statement stmt = conn.createStatement();
stmt.execute("CREATE TABLE " + fullTableName + "(K VARCHAR PRIMARY KEY, V1 VARCHAR, V2 VARCHAR) TRANSACTIONAL=true");
- HTableInterface htable = pconn.getQueryServices().getTable(Bytes.toBytes(fullTableName));
+ Table htable = pconn.getQueryServices().getTable(Bytes.toBytes(fullTableName));
stmt.executeUpdate("upsert into " + fullTableName + " values('x', 'a', 'a')");
conn.commit();
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/it/java/org/apache/phoenix/tx/ParameterizedTransactionIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/tx/ParameterizedTransactionIT.java b/phoenix-core/src/it/java/org/apache/phoenix/tx/ParameterizedTransactionIT.java
index 897007d..042d915 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/tx/ParameterizedTransactionIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/tx/ParameterizedTransactionIT.java
@@ -39,8 +39,8 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.coprocessor.PhoenixTransactionalProcessor;
import org.apache.phoenix.end2end.ParallelStatsDisabledIT;
@@ -270,7 +270,7 @@ public class ParameterizedTransactionIT extends ParallelStatsDisabledIT {
String index = generateUniqueName();
conn.createStatement().execute("CREATE INDEX " + index + " ON " + nonTxTableName + "(v)");
// Reset empty column value to an empty value like it is pre-transactions
- HTableInterface htable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes( nonTxTableName));
+ Table htable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes( nonTxTableName));
List<Put>puts = Lists.newArrayList(new Put(PInteger.INSTANCE.toBytes(1)), new Put(PInteger.INSTANCE.toBytes(2)), new Put(PInteger.INSTANCE.toBytes(3)));
for (Put put : puts) {
put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, ByteUtil.EMPTY_BYTE_ARRAY);
@@ -331,7 +331,7 @@ public class ParameterizedTransactionIT extends ParallelStatsDisabledIT {
conn.createStatement().execute("UPSERT INTO \"SYSTEM\"." + nonTxTableName + " VALUES (1)");
conn.commit();
// Reset empty column value to an empty value like it is pre-transactions
- HTableInterface htable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes("SYSTEM." + nonTxTableName));
+ Table htable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes("SYSTEM." + nonTxTableName));
Put put = new Put(PInteger.INSTANCE.toBytes(1));
put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, ByteUtil.EMPTY_BYTE_ARRAY);
htable.put(put);
@@ -373,7 +373,7 @@ public class ParameterizedTransactionIT extends ParallelStatsDisabledIT {
conn.createStatement().execute(ddl);
PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
PTable table = pconn.getTable(new PTableKey(null, t1));
- HTableInterface htable = pconn.getQueryServices().getTable(Bytes.toBytes(t1));
+ Table htable = pconn.getQueryServices().getTable(Bytes.toBytes(t1));
assertTrue(table.isTransactional());
assertTrue(htable.getTableDescriptor().getCoprocessors().contains(PhoenixTransactionalProcessor.class.getName()));
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java b/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java
index 26b6f01..e9b5b37 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java
@@ -43,6 +43,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.coprocessor.Batch;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils.BlockingRpcCallback;
@@ -256,7 +257,7 @@ public class ServerCacheClient {
servers.add(entry);
if (LOG.isDebugEnabled()) {LOG.debug(addCustomAnnotations("Adding cache entry to be sent for " + entry, connection));}
final byte[] key = getKeyInRegion(entry.getRegionInfo().getStartKey());
- final HTableInterface htable = services.getTable(cacheUsingTableRef.getTable().getPhysicalName().getBytes());
+ final Table htable = services.getTable(cacheUsingTableRef.getTable().getPhysicalName().getBytes());
closeables.add(htable);
futures.add(executor.submit(new JobCallable<Boolean>() {
@@ -330,7 +331,7 @@ public class ServerCacheClient {
* @throws IllegalStateException if hashed table cannot be removed on any region server on which it was added
*/
private void removeServerCache(final ServerCache cache, Set<HRegionLocation> remainingOnServers) throws SQLException {
- HTableInterface iterateOverTable = null;
+ Table iterateOverTable = null;
final byte[] cacheId = cache.getId();
try {
ConnectionQueryServices services = connection.getQueryServices();
@@ -431,7 +432,7 @@ public class ServerCacheClient {
public boolean addServerCache(byte[] startkeyOfRegion, ServerCache cache, HashCacheFactory cacheFactory,
byte[] txState, PTable pTable) throws Exception {
- HTableInterface table = null;
+ Table table = null;
boolean success = true;
byte[] cacheId = cache.getId();
try {
@@ -453,7 +454,7 @@ public class ServerCacheClient {
}
}
- public boolean addServerCache(HTableInterface htable, byte[] key, final PTable cacheUsingTable, final byte[] cacheId,
+ public boolean addServerCache(Table htable, byte[] key, final PTable cacheUsingTable, final byte[] cacheId,
final ImmutableBytesWritable cachePtr, final ServerCacheFactory cacheFactory, final byte[] txState)
throws Exception {
byte[] keyInRegion = getKeyInRegion(key);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
index f88b34b..0d06f0a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
@@ -29,7 +29,7 @@ import java.util.List;
import java.util.Map;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.coprocessor.MetaDataProtocol;
import org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult;
@@ -187,7 +187,7 @@ public class FromCompiler {
boolean isNamespaceMapped = SchemaUtil.isNamespaceMappingEnabled(statement.getTableType(), connection.getQueryServices().getProps());
byte[] fullTableName = SchemaUtil.getPhysicalHBaseTableName(
baseTable.getSchemaName(), baseTable.getTableName(), isNamespaceMapped).getBytes();
- HTableInterface htable = null;
+ Table htable = null;
try {
htable = services.getTable(fullTableName);
} catch (UnsupportedOperationException ignore) {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java
index 380212e..da5e7a8 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java
@@ -25,7 +25,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Coprocessor;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.regionserver.Region;
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
@@ -59,7 +59,6 @@ public class DelegateRegionCoprocessorEnvironment implements RegionCoprocessorEn
public Coprocessor getInstance() {
return delegate.getInstance();
}
-
@Override
public int getPriority() {
return delegate.getPriority();
@@ -76,12 +75,12 @@ public class DelegateRegionCoprocessorEnvironment implements RegionCoprocessorEn
}
@Override
- public HTableInterface getTable(TableName tableName) throws IOException {
+ public Table getTable(TableName tableName) throws IOException {
return delegate.getTable(tableName);
}
@Override
- public HTableInterface getTable(TableName tableName, ExecutorService service)
+ public Table getTable(TableName tableName, ExecutorService service)
throws IOException {
return delegate.getTable(tableName, service);
}
@@ -102,13 +101,13 @@ public class DelegateRegionCoprocessorEnvironment implements RegionCoprocessorEn
}
@Override
- public RegionServerServices getRegionServerServices() {
- return delegate.getRegionServerServices();
+ public ConcurrentMap<String, Object> getSharedData() {
+ return delegate.getSharedData();
}
@Override
- public ConcurrentMap<String, Object> getSharedData() {
- return delegate.getSharedData();
+ public RegionServerServices getRegionServerServices() {
+ return delegate.getRegionServerServices();
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index fe13c09..a42e1b7 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -107,12 +107,12 @@ import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValue.Type;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.coprocessor.CoprocessorException;
import org.apache.hadoop.hbase.coprocessor.CoprocessorService;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
@@ -1711,10 +1711,10 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
// RegionScanner scanner = region.getScanner(scan);
// The following *should* work, but doesn't due to HBASE-11837
// TableName systemCatalogTableName = region.getTableDesc().getTableName();
- // HTableInterface hTable = env.getTable(systemCatalogTableName);
+ // Table hTable = env.getTable(systemCatalogTableName);
// These deprecated calls work around the issue
- try (HTableInterface hTable = ServerUtil.getHTableForCoprocessorScan(env,
- region.getTableDesc().getTableName().getName())) {
+ try (Table hTable = ServerUtil.getHTableForCoprocessorScan(env,
+ region.getTableDesc().getTableName())) {
boolean allViewsInCurrentRegion = true;
int numOfChildViews = 0;
List<ViewInfo> viewInfoList = Lists.newArrayList();
@@ -1760,10 +1760,10 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
// RegionScanner scanner = region.getScanner(scan);
// The following *should* work, but doesn't due to HBASE-11837
// TableName systemCatalogTableName = region.getTableDesc().getTableName();
- // HTableInterface hTable = env.getTable(systemCatalogTableName);
+ // Table hTable = env.getTable(systemCatalogTableName);
// These deprecated calls work around the issue
- try (HTableInterface hTable = ServerUtil.getHTableForCoprocessorScan(env,
- region.getTableDesc().getTableName().getName())) {
+ try (Table hTable = ServerUtil.getHTableForCoprocessorScan(env,
+ region.getTableDesc().getTableName())) {
boolean allViewsInCurrentRegion = true;
int numOfChildViews = 0;
List<ViewInfo> viewInfoList = Lists.newArrayList();
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
index e11ff14..e7b9a35 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
@@ -160,8 +160,8 @@ public class MetaDataRegionObserver implements RegionObserver,RegionCoprocessor
Runnable r = new Runnable() {
@Override
public void run() {
- HTableInterface metaTable = null;
- HTableInterface statsTable = null;
+ Table metaTable = null;
+ Table statsTable = null;
try {
ReadOnlyProps props=new ReadOnlyProps(env.getConfiguration().iterator());
Thread.sleep(1000);
@@ -411,7 +411,7 @@ public class MetaDataRegionObserver implements RegionObserver,RegionCoprocessor
List<Pair<PTable,Long>> pairs = entry.getValue();
List<PTable> indexesToPartiallyRebuild = Lists.newArrayListWithExpectedSize(pairs.size());
try (
- HTableInterface metaTable = env.getTable(
+ Table metaTable = env.getTable(
SchemaUtil.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, props))) {
long earliestDisableTimestamp = Long.MAX_VALUE;
long latestUpperBoundTimestamp = Long.MIN_VALUE;
[2/3] phoenix git commit: PHOENIX-4303 Replace HTableInterface,
HConnection with Table, Connection interfaces respectively(Rajeshbabu)
Posted by ra...@apache.org.
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/execute/DelegateHTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/DelegateHTable.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/DelegateHTable.java
index f45b356..444bb5d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/DelegateHTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/DelegateHTable.java
@@ -22,13 +22,13 @@ import java.util.List;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.CompareOperator;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Append;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.Increment;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
@@ -36,6 +36,8 @@ import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Row;
import org.apache.hadoop.hbase.client.RowMutations;
import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.coprocessor.Batch.Call;
import org.apache.hadoop.hbase.client.coprocessor.Batch.Callback;
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
@@ -46,19 +48,14 @@ import com.google.protobuf.Message;
import com.google.protobuf.Service;
import com.google.protobuf.ServiceException;
-public class DelegateHTable implements HTableInterface {
- protected final HTableInterface delegate;
+public class DelegateHTable implements Table {
+ protected final Table delegate;
- public DelegateHTable(HTableInterface delegate) {
+ public DelegateHTable(Table delegate) {
this.delegate = delegate;
}
@Override
- public byte[] getTableName() {
- return delegate.getTableName();
- }
-
- @Override
public TableName getName() {
return delegate.getName();
}
@@ -79,34 +76,22 @@ public class DelegateHTable implements HTableInterface {
}
@Override
- public Boolean[] exists(List<Get> gets) throws IOException {
- return delegate.exists(gets);
+ public boolean[] existsAll(List<Get> gets) throws IOException {
+ return delegate.existsAll(gets);
}
@Override
- public void batch(List<? extends Row> actions, Object[] results) throws IOException, InterruptedException {
+ public void batch(List<? extends Row> actions, Object[] results) throws IOException,
+ InterruptedException {
delegate.batch(actions, results);
}
- @SuppressWarnings("deprecation")
- @Override
- public Object[] batch(List<? extends Row> actions) throws IOException, InterruptedException {
- return delegate.batch(actions);
- }
-
@Override
- public <R> void batchCallback(List<? extends Row> actions, Object[] results, Callback<R> callback)
- throws IOException, InterruptedException {
+ public <R> void batchCallback(List<? extends Row> actions, Object[] results,
+ Callback<R> callback) throws IOException, InterruptedException {
delegate.batchCallback(actions, results, callback);
}
- @SuppressWarnings("deprecation")
- @Override
- public <R> Object[] batchCallback(List<? extends Row> actions, Callback<R> callback) throws IOException,
- InterruptedException {
- return delegate.batchCallback(actions, callback);
- }
-
@Override
public Result get(Get get) throws IOException {
return delegate.get(get);
@@ -117,12 +102,6 @@ public class DelegateHTable implements HTableInterface {
return delegate.get(gets);
}
- @SuppressWarnings("deprecation")
- @Override
- public Result getRowOrBefore(byte[] row, byte[] family) throws IOException {
- return delegate.getRowOrBefore(row, family);
- }
-
@Override
public ResultScanner getScanner(Scan scan) throws IOException {
return delegate.getScanner(scan);
@@ -149,11 +128,18 @@ public class DelegateHTable implements HTableInterface {
}
@Override
- public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, byte[] value, Put put) throws IOException {
+ public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, byte[] value, Put put)
+ throws IOException {
return delegate.checkAndPut(row, family, qualifier, value, put);
}
@Override
+ public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, CompareOp compareOp,
+ byte[] value, Put put) throws IOException {
+ return delegate.checkAndPut(row, family, qualifier, compareOp, value, put);
+ }
+
+ @Override
public void delete(Delete delete) throws IOException {
delegate.delete(delete);
}
@@ -164,12 +150,18 @@ public class DelegateHTable implements HTableInterface {
}
@Override
- public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, byte[] value, Delete delete)
- throws IOException {
+ public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, byte[] value,
+ Delete delete) throws IOException {
return delegate.checkAndDelete(row, family, qualifier, value, delete);
}
@Override
+ public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, CompareOp compareOp,
+ byte[] value, Delete delete) throws IOException {
+ return delegate.checkAndDelete(row, family, qualifier, compareOp, value, delete);
+ }
+
+ @Override
public void mutateRow(RowMutations rm) throws IOException {
delegate.mutateRow(rm);
}
@@ -185,33 +177,17 @@ public class DelegateHTable implements HTableInterface {
}
@Override
- public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount) throws IOException {
+ public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount)
+ throws IOException {
return delegate.incrementColumnValue(row, family, qualifier, amount);
}
@Override
- public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount, Durability durability)
- throws IOException {
+ public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount,
+ Durability durability) throws IOException {
return delegate.incrementColumnValue(row, family, qualifier, amount, durability);
}
- @SuppressWarnings("deprecation")
- @Override
- public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount, boolean writeToWAL)
- throws IOException {
- return delegate.incrementColumnValue(row, family, qualifier, amount, writeToWAL);
- }
-
- @Override
- public boolean isAutoFlush() {
- return delegate.isAutoFlush();
- }
-
- @Override
- public void flushCommits() throws IOException {
- delegate.flushCommits();
- }
-
@Override
public void close() throws IOException {
delegate.close();
@@ -223,98 +199,99 @@ public class DelegateHTable implements HTableInterface {
}
@Override
- public <T extends Service, R> Map<byte[], R> coprocessorService(Class<T> service, byte[] startKey, byte[] endKey,
- Call<T, R> callable) throws ServiceException, Throwable {
+ public <T extends Service, R> Map<byte[], R> coprocessorService(Class<T> service,
+ byte[] startKey, byte[] endKey, Call<T, R> callable) throws ServiceException, Throwable {
return delegate.coprocessorService(service, startKey, endKey, callable);
}
@Override
- public <T extends Service, R> void coprocessorService(Class<T> service, byte[] startKey, byte[] endKey,
- Call<T, R> callable, Callback<R> callback) throws ServiceException, Throwable {
+ public <T extends Service, R> void coprocessorService(Class<T> service, byte[] startKey,
+ byte[] endKey, Call<T, R> callable, Callback<R> callback) throws ServiceException,
+ Throwable {
delegate.coprocessorService(service, startKey, endKey, callable, callback);
+
}
- @SuppressWarnings("deprecation")
@Override
- public void setAutoFlush(boolean autoFlush) {
- delegate.setAutoFlush(autoFlush);
+ public <R extends Message> Map<byte[], R> batchCoprocessorService(
+ MethodDescriptor methodDescriptor, Message request, byte[] startKey, byte[] endKey,
+ R responsePrototype) throws ServiceException, Throwable {
+ return delegate.batchCoprocessorService(methodDescriptor, request, startKey, endKey, responsePrototype);
}
@Override
- public void setAutoFlush(boolean autoFlush, boolean clearBufferOnFail) {
- delegate.setAutoFlush(autoFlush, clearBufferOnFail);
+ public <R extends Message> void batchCoprocessorService(MethodDescriptor methodDescriptor,
+ Message request, byte[] startKey, byte[] endKey, R responsePrototype,
+ Callback<R> callback) throws ServiceException, Throwable {
+ delegate.batchCoprocessorService(methodDescriptor, request, startKey, endKey, responsePrototype);
}
@Override
- public void setAutoFlushTo(boolean autoFlush) {
- delegate.setAutoFlushTo(autoFlush);
+ public boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, CompareOp compareOp,
+ byte[] value, RowMutations mutation) throws IOException {
+ return delegate.checkAndMutate(row, family, qualifier, compareOp, value, mutation);
}
@Override
- public long getWriteBufferSize() {
- return delegate.getWriteBufferSize();
+ public void setOperationTimeout(int operationTimeout) {
+ delegate.setOperationTimeout(operationTimeout);
}
@Override
- public void setWriteBufferSize(long writeBufferSize) throws IOException {
- delegate.setWriteBufferSize(writeBufferSize);
+ public int getOperationTimeout() {
+ return delegate.getOperationTimeout();
}
@Override
- public <R extends Message> Map<byte[], R> batchCoprocessorService(MethodDescriptor methodDescriptor,
- Message request, byte[] startKey, byte[] endKey, R responsePrototype) throws ServiceException, Throwable {
- return delegate.batchCoprocessorService(methodDescriptor, request, startKey, endKey, responsePrototype);
+ public int getRpcTimeout() {
+ return delegate.getRpcTimeout();
}
@Override
- public <R extends Message> void batchCoprocessorService(MethodDescriptor methodDescriptor, Message request,
- byte[] startKey, byte[] endKey, R responsePrototype, Callback<R> callback) throws ServiceException,
- Throwable {
- delegate.batchCoprocessorService(methodDescriptor, request, startKey, endKey, responsePrototype, callback);
+ public void setRpcTimeout(int rpcTimeout) {
+ delegate.setRpcTimeout(rpcTimeout);
}
@Override
- public boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, CompareOp compareOp, byte[] value,
- RowMutations mutation) throws IOException {
- return delegate.checkAndMutate(row, family, qualifier, compareOp, value, mutation);
+ public TableDescriptor getDescriptor() throws IOException {
+ return delegate.getDescriptor();
}
@Override
- public void setOperationTimeout(int i) {
- delegate.setOperationTimeout(i);
+ public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, CompareOperator op,
+ byte[] value, Put put) throws IOException {
+ return delegate.checkAndPut(row, family, qualifier, op, value, put);
}
@Override
- public int getOperationTimeout() {
- return delegate.getOperationTimeout();
+ public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, CompareOperator op,
+ byte[] value, Delete delete) throws IOException {
+ return delegate.checkAndDelete(row, family, qualifier, op, value, delete);
}
@Override
- public void setRpcTimeout(int i) {
- delegate.setRpcTimeout(i);
+ public boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, CompareOperator op,
+ byte[] value, RowMutations mutation) throws IOException {
+ return delegate.checkAndMutate(row, family, qualifier, op, value, mutation);
}
@Override
- public int getRpcTimeout() {
- return delegate.getRpcTimeout();
+ public int getReadRpcTimeout() {
+ return delegate.getReadRpcTimeout();
}
@Override
- public boolean[] existsAll(List<Get> gets) throws IOException {
- return delegate.existsAll(gets);
- }
-
- @Override
- public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier,
- CompareOp compareOp, byte[] value, Put put) throws IOException {
- return delegate.checkAndPut(row, family, qualifier, value, put);
- }
+ public void setReadRpcTimeout(int readRpcTimeout) {
+ delegate.setReadRpcTimeout(readRpcTimeout);
+ }
- @Override
- public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier,
- CompareOp compareOp, byte[] value, Delete delete)
- throws IOException {
- return delegate.checkAndDelete(row, family, qualifier, compareOp, value, delete);
- }
+ @Override
+ public int getWriteRpcTimeout() {
+ return delegate.getWriteRpcTimeout();
+ }
+ @Override
+ public void setWriteRpcTimeout(int writeRpcTimeout) {
+ delegate.setWriteRpcTimeout(writeRpcTimeout);
+ }
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
index f2edca4..bd0743c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
@@ -38,9 +38,10 @@ import javax.annotation.Nonnull;
import javax.annotation.concurrent.Immutable;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
@@ -286,8 +287,8 @@ public class MutationState implements SQLCloseable {
// be called by TableResultIterator in a multi-threaded manner. Since we do not want to expose
// the Transaction outside of MutationState, this seems reasonable, as the member variables
// would not change as these threads are running.
- public HTableInterface getHTable(PTable table) throws SQLException {
- HTableInterface htable = this.getConnection().getQueryServices().getTable(table.getPhysicalName().getBytes());
+ public Table getHTable(PTable table) throws SQLException {
+ Table htable = this.getConnection().getQueryServices().getTable(table.getPhysicalName().getBytes());
if (table.isTransactional() && phoenixTransactionContext.isTransactionRunning()) {
PhoenixTransactionalTable phoenixTransactionTable = TransactionUtil.getPhoenixTransactionTable(phoenixTransactionContext, htable, table);
// Using cloned mutationState as we may have started a new transaction already
@@ -779,7 +780,7 @@ public class MutationState implements SQLCloseable {
private class MetaDataAwareHTable extends DelegateHTable {
private final TableRef tableRef;
- private MetaDataAwareHTable(HTableInterface delegate, TableRef tableRef) {
+ private MetaDataAwareHTable(Table delegate, TableRef tableRef) {
super(delegate);
this.tableRef = tableRef;
}
@@ -809,7 +810,7 @@ public class MutationState implements SQLCloseable {
ImmutableBytesWritable ptr = new ImmutableBytesWritable();
for (PTable index : rowKeyIndexes) {
List<Delete> indexDeletes = IndexUtil.generateDeleteIndexData(table, index, deletes, ptr, connection.getKeyValueBuilder(), connection);
- HTableInterface hindex = connection.getQueryServices().getTable(index.getPhysicalName().getBytes());
+ Table hindex = connection.getQueryServices().getTable(index.getPhysicalName().getBytes());
hindex.delete(indexDeletes);
}
}
@@ -976,7 +977,7 @@ public class MutationState implements SQLCloseable {
// region servers.
shouldRetry = cache!=null;
SQLException sqlE = null;
- HTableInterface hTable = connection.getQueryServices().getTable(htableName);
+ Table hTable = connection.getQueryServices().getTable(htableName);
try {
if (table.isTransactional()) {
// Track tables to which we've sent uncommitted data
@@ -1000,7 +1001,8 @@ public class MutationState implements SQLCloseable {
child.addTimelineAnnotation("Attempt " + retryCount);
List<List<Mutation>> mutationBatchList = getMutationBatchList(batchSize, batchSizeBytes, mutationList);
for (List<Mutation> mutationBatch : mutationBatchList) {
- hTable.batch(mutationBatch);
+ // TODO need to get the the results of batch and fail if any exceptions.
+ hTable.batch(mutationBatch, null);
batchCount++;
}
if (logger.isDebugEnabled()) logger.debug("Sent batch of " + numMutations + " for " + Bytes.toString(htableName));
@@ -1027,7 +1029,7 @@ public class MutationState implements SQLCloseable {
// If it fails again, we don't retry.
String msg = "Swallowing exception and retrying after clearing meta cache on connection. " + inferredE;
logger.warn(LogUtil.addCustomAnnotations(msg, connection));
- connection.getQueryServices().clearTableRegionCache(htableName);
+ connection.getQueryServices().clearTableRegionCache(TableName.valueOf(htableName));
// add a new child span as this one failed
child.addTimelineAnnotation(msg);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/table/CoprocessorHTableFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/table/CoprocessorHTableFactory.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/table/CoprocessorHTableFactory.java
index 45e271d..8426484 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/table/CoprocessorHTableFactory.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/table/CoprocessorHTableFactory.java
@@ -22,7 +22,7 @@ import java.util.concurrent.ExecutorService;
import org.apache.hadoop.hbase.CoprocessorEnvironment;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
public class CoprocessorHTableFactory implements HTableFactory {
@@ -34,12 +34,12 @@ public class CoprocessorHTableFactory implements HTableFactory {
}
@Override
- public HTableInterface getTable(ImmutableBytesPtr tablename) throws IOException {
+ public Table getTable(ImmutableBytesPtr tablename) throws IOException {
return this.e.getTable(TableName.valueOf(tablename.copyBytesIfNecessary()));
}
@Override
- public HTableInterface getTable(ImmutableBytesPtr tablename,ExecutorService pool) throws IOException {
+ public Table getTable(ImmutableBytesPtr tablename,ExecutorService pool) throws IOException {
return this.e.getTable(TableName.valueOf(tablename.copyBytesIfNecessary()), pool);
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/table/HTableFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/table/HTableFactory.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/table/HTableFactory.java
index e6a2e60..a73f403 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/table/HTableFactory.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/table/HTableFactory.java
@@ -21,14 +21,14 @@ package org.apache.phoenix.hbase.index.table;
import java.io.IOException;
import java.util.concurrent.ExecutorService;
-import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
public interface HTableFactory {
- public HTableInterface getTable(ImmutableBytesPtr tablename) throws IOException;
+ public Table getTable(ImmutableBytesPtr tablename) throws IOException;
public void shutdown();
- public HTableInterface getTable(ImmutableBytesPtr tablename, ExecutorService pool) throws IOException;
+ public Table getTable(ImmutableBytesPtr tablename, ExecutorService pool) throws IOException;
}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriterUtils.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriterUtils.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriterUtils.java
index 3649069..16c26ba 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriterUtils.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriterUtils.java
@@ -26,9 +26,10 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.CoprocessorEnvironment;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.CoprocessorHConnection;
-import org.apache.hadoop.hbase.client.HConnection;
-import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
@@ -95,7 +96,7 @@ public class IndexWriterUtils {
IndexManagementUtil.setIfNotSet(conf, HTABLE_THREAD_KEY, htableThreads);
if (env instanceof RegionCoprocessorEnvironment) {
RegionCoprocessorEnvironment e = (RegionCoprocessorEnvironment) env;
- RegionServerServices services = e.getRegionServerServices();
+ RegionServerServices services =e.getRegionServerServices();
if (services instanceof HRegionServer) {
return new CoprocessorHConnectionTableFactory(conf, (HRegionServer) services);
}
@@ -110,7 +111,7 @@ public class IndexWriterUtils {
*/
private static class CoprocessorHConnectionTableFactory implements HTableFactory {
@GuardedBy("CoprocessorHConnectionTableFactory.this")
- private HConnection connection;
+ private Connection connection;
private final Configuration conf;
private final HRegionServer server;
@@ -119,7 +120,7 @@ public class IndexWriterUtils {
this.server = server;
}
- private synchronized HConnection getConnection(Configuration conf) throws IOException {
+ private synchronized Connection getConnection(Configuration conf) throws IOException {
if (connection == null || connection.isClosed()) {
connection = new CoprocessorHConnection(conf, server);
}
@@ -127,8 +128,8 @@ public class IndexWriterUtils {
}
@Override
- public HTableInterface getTable(ImmutableBytesPtr tablename) throws IOException {
- return getConnection(conf).getTable(tablename.copyBytesIfNecessary());
+ public Table getTable(ImmutableBytesPtr tablename) throws IOException {
+ return getConnection(conf).getTable(TableName.valueOf(tablename.copyBytesIfNecessary()));
}
@Override
@@ -143,9 +144,9 @@ public class IndexWriterUtils {
}
@Override
- public HTableInterface getTable(ImmutableBytesPtr tablename, ExecutorService pool)
+ public Table getTable(ImmutableBytesPtr tablename, ExecutorService pool)
throws IOException {
- return getConnection(conf).getTable(tablename.copyBytesIfNecessary(), pool);
+ return getConnection(conf).getTable(TableName.valueOf(tablename.copyBytesIfNecessary()), pool);
}
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
index e4e8343..dadaf75 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
@@ -22,8 +22,8 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Abortable;
import org.apache.hadoop.hbase.Stoppable;
-import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.phoenix.hbase.index.exception.SingleIndexWriteFailureException;
import org.apache.phoenix.hbase.index.parallel.EarlyExitFailure;
@@ -143,7 +143,7 @@ public class ParallelWriterIndexCommitter implements IndexCommitter {
if (LOG.isTraceEnabled()) {
LOG.trace("Writing index update:" + mutations + " to table: " + tableReference);
}
- HTableInterface table = null;
+ Table table = null;
try {
if (allowLocalUpdates
&& env != null
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java
index 0449e9e..a60ced4 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java
@@ -24,8 +24,8 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Abortable;
import org.apache.hadoop.hbase.Stoppable;
-import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.phoenix.hbase.index.CapturingAbortable;
import org.apache.phoenix.hbase.index.exception.MultiIndexWriteFailureException;
@@ -149,7 +149,7 @@ public class TrackingParallelWriterIndexCommitter implements IndexCommitter {
@SuppressWarnings("deprecation")
@Override
public Boolean call() throws Exception {
- HTableInterface table = null;
+ Table table = null;
try {
// this may have been queued, but there was an abort/stop so we try to early exit
throwFailureIfDone();
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
index cc2c6b3..c217d8e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
@@ -35,8 +35,8 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.Stoppable;
-import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.util.Bytes;
@@ -194,7 +194,7 @@ public class PhoenixIndexFailurePolicy extends DelegateIndexFailurePolicy {
timestamp = minTimeStamp;
// If the data table has local index column families then get local indexes to disable.
- if (ref.getTableName().equals(env.getRegion().getTableDesc().getNameAsString())
+ if (ref.getTableName().equals(env.getRegion().getTableDesc().getTableName().getNameAsString())
&& MetaDataUtil.hasLocalIndexColumnFamily(env.getRegion().getTableDesc())) {
for (String tableName : getLocalIndexNames(ref, mutations)) {
indexTableNames.put(tableName, minTimeStamp);
@@ -225,7 +225,7 @@ public class PhoenixIndexFailurePolicy extends DelegateIndexFailurePolicy {
minTimeStamp *= -1;
}
// Disable the index by using the updateIndexState method of MetaDataProtocol end point coprocessor.
- try (HTableInterface systemTable = env.getTable(SchemaUtil
+ try (Table systemTable = env.getTable(SchemaUtil
.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, env.getConfiguration()))) {
MetaDataMutationResult result = IndexUtil.updateIndexState(indexTableName, minTimeStamp,
systemTable, newState);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java
index f3c1dbd..2dea53c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java
@@ -344,7 +344,7 @@ public class PhoenixTransactionalIndexer implements RegionObserver, RegionCoproc
ScanRanges scanRanges = ScanRanges.create(SchemaUtil.VAR_BINARY_SCHEMA, Collections.singletonList(keys), ScanUtil.SINGLE_COLUMN_SLOT_SPAN, KeyRange.EVERYTHING_RANGE, null, true, -1);
scanRanges.initializeScan(scan);
TableName tableName = env.getRegion().getRegionInfo().getTable();
- HTableInterface htable = env.getTable(tableName);
+ Table htable = env.getTable(tableName);
txTable = TransactionFactory.getTransactionFactory().getTransactionalTable(txnContext, htable);
// For rollback, we need to see all versions, including
// the last committed version as there may be multiple
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
index f037a20..46fd55c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
@@ -949,7 +949,7 @@ public abstract class BaseResultIterators extends ExplainTable implements Result
} catch (StaleRegionBoundaryCacheException | HashJoinCacheNotFoundException e2){
// Catch only to try to recover from region boundary cache being out of date
if (!clearedCache) { // Clear cache once so that we rejigger job based on new boundaries
- services.clearTableRegionCache(physicalTableName);
+ services.clearTableRegionCache(TableName.valueOf(physicalTableName));
context.getOverallQueryMetrics().cacheRefreshedDueToSplits();
}
// Resubmit just this portion of work again
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/iterate/SnapshotScanner.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/SnapshotScanner.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/SnapshotScanner.java
index 68592ef..c09b3c4 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/SnapshotScanner.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/SnapshotScanner.java
@@ -164,12 +164,12 @@ public class SnapshotScanner extends AbstractClientScanner {
}
@Override
- public HTableInterface getTable(TableName tableName) throws IOException {
+ public Table getTable(TableName tableName) throws IOException {
throw new UnsupportedOperationException();
}
@Override
- public HTableInterface getTable(TableName tableName, ExecutorService executorService)
+ public Table getTable(TableName tableName, ExecutorService executorService)
throws IOException {
throw new UnsupportedOperationException();
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/iterate/TableResultIterator.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/TableResultIterator.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/TableResultIterator.java
index e812854..c9e7bfb 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/TableResultIterator.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/TableResultIterator.java
@@ -37,8 +37,8 @@ import java.util.concurrent.locks.ReentrantLock;
import javax.annotation.concurrent.GuardedBy;
import org.apache.hadoop.hbase.client.AbstractClientScanner;
-import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.cache.ServerCacheClient;
@@ -75,7 +75,7 @@ import com.google.common.annotations.VisibleForTesting;
*/
public class TableResultIterator implements ResultIterator {
private final Scan scan;
- private final HTableInterface htable;
+ private final Table htable;
private final ScanMetricsHolder scanMetricsHolder;
private static final ResultIterator UNINITIALIZED_SCANNER = ResultIterator.EMPTY_ITERATOR;
private final long renewLeaseThreshold;
@@ -188,7 +188,7 @@ public class TableResultIterator implements ResultIterator {
newScan.setStartRow(ByteUtil.nextKey(startRowSuffix));
}
}
- plan.getContext().getConnection().getQueryServices().clearTableRegionCache(htable.getTableName());
+ plan.getContext().getConnection().getQueryServices().clearTableRegionCache(htable.getName());
if (e1 instanceof HashJoinCacheNotFoundException) {
logger.debug(
"Retrying when Hash Join cache is not found on the server ,by sending the cache again");
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordReader.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordReader.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordReader.java
index ec1b451..bf4e277 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordReader.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordReader.java
@@ -24,6 +24,7 @@ import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.NullWritable;
@@ -109,7 +110,7 @@ public class PhoenixRecordReader<T extends DBWritable> extends RecordReader<Null
// Clear the table region boundary cache to make sure long running jobs stay up to date
byte[] tableNameBytes = queryPlan.getTableRef().getTable().getPhysicalName().getBytes();
ConnectionQueryServices services = queryPlan.getContext().getConnection().getQueryServices();
- services.clearTableRegionCache(tableNameBytes);
+ services.clearTableRegionCache(TableName.valueOf(tableNameBytes));
long renewScannerLeaseThreshold = queryPlan.getContext().getConnection().getQueryServices().getRenewLeaseThresholdMilliSeconds();
boolean isRequestMetricsEnabled = readMetrics.isRequestMetricsEnabled();
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/DirectHTableWriter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/DirectHTableWriter.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/DirectHTableWriter.java
index 59b26b2..11a8176 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/DirectHTableWriter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/DirectHTableWriter.java
@@ -22,8 +22,11 @@ import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.mapreduce.TableOutputFormat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -36,7 +39,7 @@ public class DirectHTableWriter {
private Configuration conf = null;
- private HTable table;
+ private Table table;
public DirectHTableWriter(Configuration otherConf) {
setConf(otherConf);
@@ -51,8 +54,8 @@ public class DirectHTableWriter {
}
try {
- this.table = new HTable(this.conf, tableName);
- this.table.setAutoFlush(false, true);
+ Connection conn = ConnectionFactory.createConnection(this.conf);
+ this.table = conn.getTable(TableName.valueOf(tableName));
LOG.info("Created table instance for " + tableName);
} catch (IOException e) {
LOG.error("IOException : ", e);
@@ -69,7 +72,7 @@ public class DirectHTableWriter {
return conf;
}
- protected HTable getTable() {
+ protected Table getTable() {
return table;
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java
index 671e4cf..af080b4 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java
@@ -46,11 +46,14 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.HFileOutputFormat;
+import org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2;
import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles;
import org.apache.hadoop.hbase.mapreduce.TableInputFormat;
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
@@ -431,8 +434,9 @@ public class IndexTool extends Configured implements Tool {
final Configuration configuration = job.getConfiguration();
final String physicalIndexTable =
PhoenixConfigurationUtil.getPhysicalTableName(configuration);
- final HTable htable = new HTable(configuration, physicalIndexTable);
- HFileOutputFormat.configureIncrementalLoad(job, htable);
+ org.apache.hadoop.hbase.client.Connection conn = ConnectionFactory.createConnection(configuration);
+ TableName tablename = TableName.valueOf(physicalIndexTable);
+ HFileOutputFormat2.configureIncrementalLoad(job, conn.getTable(tablename),conn.getRegionLocator(tablename));
return job;
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java
index 45ab5fa..558df85 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java
@@ -26,9 +26,10 @@ import java.util.Set;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.phoenix.compile.MutationPlan;
@@ -66,7 +67,7 @@ public interface ConnectionQueryServices extends QueryServices, MetaDataMutated
* @return the HTableInterface
* @throws SQLException
*/
- public HTableInterface getTable(byte[] tableName) throws SQLException;
+ public Table getTable(byte[] tableName) throws SQLException;
public HTableDescriptor getTableDescriptor(byte[] tableName) throws SQLException;
@@ -93,7 +94,7 @@ public interface ConnectionQueryServices extends QueryServices, MetaDataMutated
public int getLowestClusterHBaseVersion();
public HBaseAdmin getAdmin() throws SQLException;
- void clearTableRegionCache(byte[] tableName) throws SQLException;
+ void clearTableRegionCache(TableName name) throws SQLException;
boolean hasIndexWALCodec();
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 248e4a3..1d9a521 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -106,13 +106,14 @@ import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.TableExistsException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Append;
+import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HConnection;
-import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.Increment;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.coprocessor.Batch;
import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
@@ -281,7 +282,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
private final Object connectionCountLock = new Object();
private final boolean returnSequenceValues ;
- private HConnection connection;
+ private Connection connection;
private ZKClientService txZKClientService;
private volatile boolean initialized;
private volatile int nSequenceSaltBuckets;
@@ -423,7 +424,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
}
@Override
- public HTableInterface getTable(byte[] tableName) throws SQLException {
+ public Table getTable(byte[] tableName) throws SQLException {
try {
return HBaseFactoryProvider.getHTableFactory().getTable(tableName, connection, null);
} catch (org.apache.hadoop.hbase.TableNotFoundException e) {
@@ -435,7 +436,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
@Override
public HTableDescriptor getTableDescriptor(byte[] tableName) throws SQLException {
- HTableInterface htable = getTable(tableName);
+ Table htable = getTable(tableName);
try {
return htable.getTableDescriptor();
} catch (IOException e) {
@@ -544,8 +545,8 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
}
@Override
- public void clearTableRegionCache(byte[] tableName) throws SQLException {
- connection.clearRegionCache(TableName.valueOf(tableName));
+ public void clearTableRegionCache(TableName tableName) throws SQLException {
+ ((ClusterConnection)connection).clearRegionCache(tableName);
}
@Override
@@ -564,7 +565,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
List<HRegionLocation> locations = Lists.newArrayList();
byte[] currentKey = HConstants.EMPTY_START_ROW;
do {
- HRegionLocation regionLocation = connection.getRegionLocation(
+ HRegionLocation regionLocation = ((ClusterConnection)connection).getRegionLocation(
TableName.valueOf(tableName), currentKey, reload);
locations.add(regionLocation);
currentKey = regionLocation.getRegionInfo().getEndKey();
@@ -1170,7 +1171,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
boolean isIncompatible = false;
int minHBaseVersion = Integer.MAX_VALUE;
boolean isTableNamespaceMappingEnabled = false;
- HTableInterface ht = null;
+ Table ht = null;
try {
List<HRegionLocation> locations = this
.getAllTableRegions(metaTable);
@@ -1271,7 +1272,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
connection.relocateRegion(SchemaUtil.getPhysicalName(tableName, this.getProps()), tableKey);
}
- HTableInterface ht = this.getTable(SchemaUtil.getPhysicalName(tableName, this.getProps()).getName());
+ Table ht = this.getTable(SchemaUtil.getPhysicalName(tableName, this.getProps()).getName());
try {
final Map<byte[], MetaDataResponse> results =
ht.coprocessorService(MetaDataService.class, tableKey, tableKey, callable);
@@ -1323,14 +1324,15 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
boolean wasDeleted = false;
try (HBaseAdmin admin = getAdmin()) {
try {
- HTableDescriptor desc = admin.getTableDescriptor(physicalIndexName);
+ TableName physicalIndexTableName = TableName.valueOf(physicalIndexName);
+ HTableDescriptor desc = admin.getTableDescriptor(physicalIndexTableName);
if (Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(desc.getValue(MetaDataUtil.IS_VIEW_INDEX_TABLE_PROP_BYTES)))) {
final ReadOnlyProps props = this.getProps();
final boolean dropMetadata = props.getBoolean(DROP_METADATA_ATTRIB, DEFAULT_DROP_METADATA);
if (dropMetadata) {
- admin.disableTable(physicalIndexName);
- admin.deleteTable(physicalIndexName);
- clearTableRegionCache(physicalIndexName);
+ admin.disableTable(physicalIndexTableName);
+ admin.deleteTable(physicalIndexTableName);
+ clearTableRegionCache(physicalIndexTableName);
wasDeleted = true;
} else {
this.tableStatsCache.invalidateAll(desc);
@@ -1366,7 +1368,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
for(String cf: columnFamiles) {
admin.deleteColumn(physicalTableName, cf);
}
- clearTableRegionCache(physicalTableName);
+ clearTableRegionCache(TableName.valueOf(physicalTableName));
wasDeleted = true;
}
} catch (org.apache.hadoop.hbase.TableNotFoundException ignore) {
@@ -1626,7 +1628,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
admin.disableTable(tableName);
admin.deleteTable(tableName);
tableStatsCache.invalidateAll(htableDesc);
- clearTableRegionCache(tableName);
+ clearTableRegionCache(TableName.valueOf(tableName));
} catch (TableNotFoundException ignore) {
}
}
@@ -2513,7 +2515,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
columnDesc.setTimeToLive(TTL_FOR_MUTEX); // Let mutex expire after some time
tableDesc.addFamily(columnDesc);
admin.createTable(tableDesc);
- try (HTableInterface sysMutexTable = getTable(mutexTableName.getName())) {
+ try (Table sysMutexTable = getTable(mutexTableName.getName())) {
byte[] mutexRowKey = SchemaUtil.getTableKey(null, PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA,
PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE);
Put put = new Put(mutexRowKey);
@@ -2845,7 +2847,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_SCHEMA_BYTES,
PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_TABLE_BYTES,
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP);
- clearTableRegionCache(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES);
+ clearTableRegionCache(TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES));
}
nSequenceSaltBuckets = nSaltBuckets;
} else {
@@ -3113,7 +3115,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
byte[] mutexRowKey = SchemaUtil.getTableKey(null, PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA,
PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE);
- HTableInterface metatable = null;
+ Table metatable = null;
try (HBaseAdmin admin = getAdmin()) {
// SYSTEM namespace needs to be created via HBase API's because "CREATE SCHEMA" statement tries to write its metadata
// in SYSTEM:CATALOG table. Without SYSTEM namespace, SYSTEM:CATALOG table cannot be created.
@@ -3207,7 +3209,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
getVersion(MIN_SYSTEM_TABLE_TIMESTAMP));
}
- try (HTableInterface sysMutexTable = getTable(sysMutexPhysicalTableNameBytes)) {
+ try (Table sysMutexTable = getTable(sysMutexPhysicalTableNameBytes)) {
byte[] family = PhoenixDatabaseMetaData.SYSTEM_MUTEX_FAMILY_NAME_BYTES;
byte[] qualifier = UPGRADE_MUTEX;
byte[] oldValue = UPGRADE_MUTEX_UNLOCKED;
@@ -3243,7 +3245,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
return true;
}
- try (HTableInterface sysMutexTable = getTable(sysMutexPhysicalTableNameBytes)) {
+ try (Table sysMutexTable = getTable(sysMutexPhysicalTableNameBytes)) {
byte[] family = PhoenixDatabaseMetaData.SYSTEM_MUTEX_FAMILY_NAME_BYTES;
byte[] qualifier = UPGRADE_MUTEX;
byte[] expectedValue = UPGRADE_MUTEX_LOCKED;
@@ -3457,7 +3459,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
latestMetaData = newEmptyMetaData();
}
tableStatsCache.invalidateAll();
- try (HTableInterface htable =
+ try (Table htable =
this.getTable(
SchemaUtil.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES,
this.getProps()).getName())) {
@@ -3561,9 +3563,8 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
sequence.getLock().lock();
// Now that we have the lock we need, create the sequence
Append append = sequence.createSequence(startWith, incrementBy, cacheSize, timestamp, minValue, maxValue, cycle);
- HTableInterface htable = this.getTable(SchemaUtil
+ Table htable = this.getTable(SchemaUtil
.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES, this.getProps()).getName());
- htable.setAutoFlush(true);
try {
Result result = htable.append(append);
return sequence.createSequence(result, minValue, maxValue, cycle);
@@ -3589,7 +3590,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
sequence.getLock().lock();
// Now that we have the lock we need, create the sequence
Append append = sequence.dropSequence(timestamp);
- HTableInterface htable = this.getTable(SchemaUtil
+ Table htable = this.getTable(SchemaUtil
.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES, this.getProps()).getName());
try {
Result result = htable.append(append);
@@ -3686,11 +3687,11 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
if (toIncrementList.isEmpty()) {
return;
}
- HTableInterface hTable = this.getTable(SchemaUtil.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES,this.getProps()).getName());
+ Table hTable = this.getTable(SchemaUtil.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES,this.getProps()).getName());
Object[] resultObjects = null;
SQLException sqlE = null;
try {
- resultObjects= hTable.batch(incrementBatch);
+ resultObjects= hTable.batch(incrementBatch, null);
} catch (IOException e) {
sqlE = ServerUtil.parseServerException(e);
} catch (InterruptedException e) {
@@ -3735,7 +3736,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
// clear the meta data cache for the table here
try {
SQLException sqlE = null;
- HTableInterface htable = this.getTable(SchemaUtil
+ Table htable = this.getTable(SchemaUtil
.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, this.getProps()).getName());
try {
@@ -3810,12 +3811,12 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
if (toReturnList.isEmpty()) {
return;
}
- HTableInterface hTable = this.getTable(SchemaUtil
+ Table hTable = this.getTable(SchemaUtil
.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES, this.getProps()).getName());
Object[] resultObjects = null;
SQLException sqlE = null;
try {
- resultObjects= hTable.batch(mutations);
+ hTable.batch(mutations, resultObjects);
} catch (IOException e){
sqlE = ServerUtil.parseServerException(e);
} catch (InterruptedException e){
@@ -3864,11 +3865,11 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
if (mutations.isEmpty()) {
return;
}
- HTableInterface hTable = this.getTable(
+ Table hTable = this.getTable(
SchemaUtil.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES, this.getProps()).getName());
SQLException sqlE = null;
try {
- hTable.batch(mutations);
+ hTable.batch(mutations, null);
} catch (IOException e) {
sqlE = ServerUtil.parseServerException(e);
} catch (InterruptedException e) {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
index f15e0b1..410bb71 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
@@ -37,8 +37,8 @@ import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.util.Addressing;
import org.apache.hadoop.hbase.util.Bytes;
@@ -149,7 +149,7 @@ public class ConnectionlessQueryServicesImpl extends DelegateQueryServices imple
}
@Override
- public HTableInterface getTable(byte[] tableName) throws SQLException {
+ public Table getTable(byte[] tableName) throws SQLException {
throw new UnsupportedOperationException();
}
@@ -388,7 +388,7 @@ public class ConnectionlessQueryServicesImpl extends DelegateQueryServices imple
}
@Override
- public void clearTableRegionCache(byte[] tableName) throws SQLException {
+ public void clearTableRegionCache(TableName tableName) throws SQLException {
}
@Override
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java b/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java
index 6c464eb..e57dadd 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java
@@ -26,9 +26,10 @@ import java.util.Set;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.phoenix.compile.MutationPlan;
@@ -66,7 +67,7 @@ public class DelegateConnectionQueryServices extends DelegateQueryServices imple
}
@Override
- public HTableInterface getTable(byte[] tableName) throws SQLException {
+ public Table getTable(byte[] tableName) throws SQLException {
return getDelegate().getTable(tableName);
}
@@ -162,7 +163,7 @@ public class DelegateConnectionQueryServices extends DelegateQueryServices imple
}
@Override
- public void clearTableRegionCache(byte[] tableName) throws SQLException {
+ public void clearTableRegionCache(TableName tableName) throws SQLException {
getDelegate().clearTableRegionCache(tableName);
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/query/GuidePostsCache.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/GuidePostsCache.java b/phoenix-core/src/main/java/org/apache/phoenix/query/GuidePostsCache.java
index d27be1b..d695f41 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/GuidePostsCache.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/GuidePostsCache.java
@@ -26,7 +26,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
import org.apache.phoenix.schema.PColumnFamily;
@@ -90,7 +90,7 @@ public class GuidePostsCache {
@Override
public GuidePostsInfo load(GuidePostsKey statsKey) throws Exception {
@SuppressWarnings("deprecation")
- HTableInterface statsHTable = queryServices.getTable(SchemaUtil.getPhysicalName(
+ Table statsHTable = queryServices.getTable(SchemaUtil.getPhysicalName(
PhoenixDatabaseMetaData.SYSTEM_STATS_NAME_BYTES,
queryServices.getProps()).getName());
try {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/query/HConnectionFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/HConnectionFactory.java b/phoenix-core/src/main/java/org/apache/phoenix/query/HConnectionFactory.java
index 72f4182..0912c2c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/HConnectionFactory.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/HConnectionFactory.java
@@ -20,8 +20,8 @@ package org.apache.phoenix.query;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.client.HConnection;
-import org.apache.hadoop.hbase.client.HConnectionManager;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
/**
* Factory for creating {@link HConnection}
@@ -36,15 +36,15 @@ public interface HConnectionFactory {
* @param configuration object
* @return A HConnection instance
*/
- HConnection createConnection(Configuration conf) throws IOException;
+ Connection createConnection(Configuration conf) throws IOException;
/**
* Default implementation. Uses standard HBase HConnections.
*/
static class HConnectionFactoryImpl implements HConnectionFactory {
@Override
- public HConnection createConnection(Configuration conf) throws IOException {
- return HConnectionManager.createConnection(conf);
+ public Connection createConnection(Configuration conf) throws IOException {
+ return ConnectionFactory.createConnection(conf);
}
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/query/HTableFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/HTableFactory.java b/phoenix-core/src/main/java/org/apache/phoenix/query/HTableFactory.java
index 09dbff6..10a531f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/HTableFactory.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/HTableFactory.java
@@ -20,8 +20,9 @@ package org.apache.phoenix.query;
import java.io.IOException;
import java.util.concurrent.ExecutorService;
-import org.apache.hadoop.hbase.client.HConnection;
-import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Table;
/**
* Creates clients to access HBase tables.
@@ -39,16 +40,16 @@ public interface HTableFactory {
* @return An client to access an HBase table.
* @throws IOException if a server or network exception occurs
*/
- HTableInterface getTable(byte[] tableName, HConnection connection, ExecutorService pool) throws IOException;
+ Table getTable(byte[] tableName, Connection connection, ExecutorService pool) throws IOException;
/**
* Default implementation. Uses standard HBase HTables.
*/
static class HTableFactoryImpl implements HTableFactory {
@Override
- public HTableInterface getTable(byte[] tableName, HConnection connection, ExecutorService pool) throws IOException {
+ public Table getTable(byte[] tableName, Connection connection, ExecutorService pool) throws IOException {
// Let the HBase client manage the thread pool instead of passing ours through
- return connection.getTable(tableName);
+ return connection.getTable(TableName.valueOf(tableName));
}
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java
index daf7c70..8f36fd6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java
@@ -30,10 +30,10 @@ import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.regionserver.InternalScanner;
@@ -124,7 +124,7 @@ class DefaultStatisticsCollector implements StatisticsCollector {
env.getRegion().getTableDesc());
} else {
long guidepostWidth = -1;
- HTableInterface htable = null;
+ Table htable = null;
try {
// Next check for GUIDE_POST_WIDTH on table
htable = env.getTable(
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsUtil.java
index 0b9c409..71b01ae 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsUtil.java
@@ -26,10 +26,10 @@ import org.apache.hadoop.hbase.CellScanner;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.coprocessor.MetaDataProtocol;
@@ -128,7 +128,7 @@ public class StatisticsUtil {
return key;
}
- public static GuidePostsInfo readStatistics(HTableInterface statsHTable, GuidePostsKey key, long clientTimeStamp)
+ public static GuidePostsInfo readStatistics(Table statsHTable, GuidePostsKey key, long clientTimeStamp)
throws IOException {
ImmutableBytesWritable ptr = new ImmutableBytesWritable();
ptr.set(key.getColumnFamily());
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java
index dfca30e..04ad575 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java
@@ -32,12 +32,12 @@ import java.util.List;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
@@ -80,24 +80,24 @@ public class StatisticsWriter implements Closeable {
if (clientTimeStamp == HConstants.LATEST_TIMESTAMP) {
clientTimeStamp = EnvironmentEdgeManager.currentTimeMillis();
}
- HTableInterface statsWriterTable = env.getTable(
+ Table statsWriterTable = env.getTable(
SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_STATS_NAME_BYTES, env.getConfiguration()));
- HTableInterface statsReaderTable = ServerUtil.getHTableForCoprocessorScan(env, statsWriterTable);
+ Table statsReaderTable = ServerUtil.getHTableForCoprocessorScan(env, statsWriterTable);
StatisticsWriter statsTable = new StatisticsWriter(statsReaderTable, statsWriterTable, tableName,
clientTimeStamp, guidePostDepth);
return statsTable;
}
- private final HTableInterface statsWriterTable;
+ private final Table statsWriterTable;
// In HBase 0.98.4 or above, the reader and writer will be the same.
// In pre HBase 0.98.4, there was a bug in using the HTable returned
// from a coprocessor for scans, so in that case it'll be different.
- private final HTableInterface statsReaderTable;
+ private final Table statsReaderTable;
private final byte[] tableName;
private final long clientTimeStamp;
private final long guidePostDepth;
- private StatisticsWriter(HTableInterface statsReaderTable, HTableInterface statsWriterTable, String tableName,
+ private StatisticsWriter(Table statsReaderTable, Table statsWriterTable, String tableName,
long clientTimeStamp, long guidePostDepth) {
this.statsReaderTable = statsReaderTable;
this.statsWriterTable = statsWriterTable;
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java
index 0957e56..9798f79 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java
@@ -28,7 +28,6 @@ import org.apache.hadoop.hbase.client.Append;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.Increment;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
@@ -36,6 +35,7 @@ import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Row;
import org.apache.hadoop.hbase.client.RowMutations;
import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.coprocessor.Batch.Call;
import org.apache.hadoop.hbase.client.coprocessor.Batch.Callback;
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
@@ -48,7 +48,7 @@ import com.google.protobuf.ServiceException;
public class OmidTransactionTable implements PhoenixTransactionalTable {
- public OmidTransactionTable(PhoenixTransactionContext ctx, HTableInterface hTable) {
+ public OmidTransactionTable(PhoenixTransactionContext ctx, Table hTable) {
// TODO Auto-generated constructor stub
}
@@ -162,36 +162,6 @@ public class OmidTransactionTable implements PhoenixTransactionalTable {
}
@Override
- public long incrementColumnValue(byte[] row, byte[] family,
- byte[] qualifier, long amount, boolean writeToWAL)
- throws IOException {
- // TODO Auto-generated method stub
- return 0;
- }
-
- @Override
- public Boolean[] exists(List<Get> gets) throws IOException {
- // TODO Auto-generated method stub
- return null;
- }
-
- @Override
- public void setAutoFlush(boolean autoFlush, boolean clearBufferOnFail) {
- // TODO Auto-generated method stub
- }
-
- @Override
- public void setAutoFlushTo(boolean autoFlush) {
- // TODO Auto-generated method stub
- }
-
- @Override
- public Result getRowOrBefore(byte[] row, byte[] family) throws IOException {
- // TODO Auto-generated method stub
- return null;
- }
-
- @Override
public TableName getName() {
// TODO Auto-generated method stub
return null;
http://git-wip-us.apache.org/repos/asf/phoenix/blob/11390427/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionalTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionalTable.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionalTable.java
index 7af1c08..1293a21 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionalTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionalTable.java
@@ -19,36 +19,36 @@ package org.apache.phoenix.transaction;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HTableDescriptor;
import java.io.IOException;
import java.util.List;
-public interface PhoenixTransactionalTable extends HTableInterface {
+public interface PhoenixTransactionalTable extends Table {
/**
- * Transaction version of {@link HTableInterface#get(Get get)}
+ * Transaction version of {@link Table#get(Get get)}
* @param get
* @throws IOException
*/
public Result get(Get get) throws IOException;
/**
- * Transactional version of {@link HTableInterface#put(Put put)}
+ * Transactional version of {@link Table#put(Put put)}
* @param put
* @throws IOException
*/
public void put(Put put) throws IOException;
/**
- * Transactional version of {@link HTableInterface#delete(Delete delete)}
+ * Transactional version of {@link Table#delete(Delete delete)}
*
* @param delete
* @throws IOException
@@ -56,7 +56,7 @@ public interface PhoenixTransactionalTable extends HTableInterface {
public void delete(Delete delete) throws IOException;
/**
- * Transactional version of {@link HTableInterface#getScanner(Scan scan)}
+ * Transactional version of {@link Table#getScanner(Scan scan)}
*
* @param scan
* @return ResultScanner
@@ -87,31 +87,31 @@ public interface PhoenixTransactionalTable extends HTableInterface {
public boolean exists(Get get) throws IOException;
/**
- * Transactional version of {@link HTableInterface#get(List gets)}
+ * Transactional version of {@link Table#get(List gets)}
* @throws IOException
*/
public Result[] get(List<Get> gets) throws IOException;
/**
- * Transactional version of {@link HTableInterface#getScanner(byte[] family)}
+ * Transactional version of {@link Table#getScanner(byte[] family)}
* @throws IOException
*/
public ResultScanner getScanner(byte[] family) throws IOException;
/**
- * Transactional version of {@link HTableInterface#getScanner(byte[] family, byte[] qualifier)}
+ * Transactional version of {@link Table#getScanner(byte[] family, byte[] qualifier)}
* @throws IOException
*/
public ResultScanner getScanner(byte[] family, byte[] qualifier) throws IOException;
/**
- * Transactional version of {@link HTableInterface#put(List puts)}
+ * Transactional version of {@link Table#put(List puts)}
* @throws IOException
*/
public void put(List<Put> puts) throws IOException;
/**
- * Transactional version of {@link HTableInterface#delete(List deletes)}
+ * Transactional version of {@link Table#delete(List deletes)}
* @throws IOException
*/
public void delete(List<Delete> deletes) throws IOException;