You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by an...@apache.org on 2017/11/27 13:03:09 UTC
[1/8] phoenix git commit: PHOENIX-4304 Refactoring to avoid using
deprecated HTableDescriptor, HColumnDescriptor,
HRegionInfo( Rajeshbabu Chintaguntla)
Repository: phoenix
Updated Branches:
refs/heads/5.x-HBase-2.0 1beac2746 -> 372006816
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
index b748568..142b80e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
@@ -31,14 +31,15 @@ import java.util.Set;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HRegionLocation;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.util.Addressing;
import org.apache.hadoop.hbase.util.Bytes;
@@ -159,9 +160,11 @@ public class ConnectionlessQueryServicesImpl extends DelegateQueryServices imple
if (regions != null) {
return regions;
}
- return Collections.singletonList(new HRegionLocation(
- new HRegionInfo(TableName.valueOf(tableName), HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW),
- SERVER_NAME, -1));
+ RegionInfo hri =
+ RegionInfoBuilder.newBuilder(TableName.valueOf(tableName))
+ .setStartKey(HConstants.EMPTY_START_ROW)
+ .setStartKey(HConstants.EMPTY_END_ROW).build();
+ return Collections.singletonList(new HRegionLocation(hri, SERVER_NAME, -1));
}
@Override
@@ -222,14 +225,14 @@ public class ConnectionlessQueryServicesImpl extends DelegateQueryServices imple
byte[] startKey = HConstants.EMPTY_START_ROW;
List<HRegionLocation> regions = Lists.newArrayListWithExpectedSize(splits.length);
for (byte[] split : splits) {
- regions.add(new HRegionLocation(
- new HRegionInfo(TableName.valueOf(physicalName), startKey, split),
- SERVER_NAME, -1));
+ regions.add(new HRegionLocation(RegionInfoBuilder
+ .newBuilder(TableName.valueOf(physicalName)).setStartKey(startKey)
+ .setEndKey(split).build(), SERVER_NAME, -1));
startKey = split;
}
- regions.add(new HRegionLocation(
- new HRegionInfo(TableName.valueOf(physicalName), startKey, HConstants.EMPTY_END_ROW),
- SERVER_NAME, -1));
+ regions.add(new HRegionLocation(RegionInfoBuilder
+ .newBuilder(TableName.valueOf(physicalName)).setStartKey(startKey)
+ .setEndKey(HConstants.EMPTY_END_ROW).build(), SERVER_NAME, -1));
return regions;
}
@@ -383,7 +386,7 @@ public class ConnectionlessQueryServicesImpl extends DelegateQueryServices imple
}
@Override
- public HTableDescriptor getTableDescriptor(byte[] tableName) throws SQLException {
+ public TableDescriptor getTableDescriptor(byte[] tableName) throws SQLException {
return null;
}
@@ -582,16 +585,16 @@ public class ConnectionlessQueryServicesImpl extends DelegateQueryServices imple
public HRegionLocation getTableRegionLocation(byte[] tableName, byte[] row) throws SQLException {
List<HRegionLocation> regions = tableSplits.get(Bytes.toString(tableName));
if (regions != null) {
- for(HRegionLocation region: regions) {
- if (Bytes.compareTo(region.getRegionInfo().getStartKey(), row) <= 0
- && Bytes.compareTo(region.getRegionInfo().getEndKey(), row) > 0) {
- return region;
- }
- }
+ for (HRegionLocation region : regions) {
+ if (Bytes.compareTo(region.getRegion().getStartKey(), row) <= 0
+ && Bytes.compareTo(region.getRegion().getEndKey(), row) > 0) {
+ return region;
+ }
+ }
}
- return new HRegionLocation(
- new HRegionInfo(TableName.valueOf(tableName), HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW),
- SERVER_NAME, -1);
+ return new HRegionLocation(RegionInfoBuilder.newBuilder(TableName.valueOf(tableName))
+ .setStartKey(HConstants.EMPTY_START_ROW).setEndKey(HConstants.EMPTY_END_ROW)
+ .build(), SERVER_NAME, -1);
}
@Override
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java b/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java
index bb24602..3c307e0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java
@@ -25,11 +25,11 @@ import java.util.Set;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HRegionLocation;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.phoenix.compile.MutationPlan;
@@ -158,7 +158,7 @@ public class DelegateConnectionQueryServices extends DelegateQueryServices imple
}
@Override
- public HTableDescriptor getTableDescriptor(byte[] tableName) throws SQLException {
+ public TableDescriptor getTableDescriptor(byte[] tableName) throws SQLException {
return getDelegate().getTableDescriptor(tableName);
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/main/java/org/apache/phoenix/query/GuidePostsCache.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/GuidePostsCache.java b/phoenix-core/src/main/java/org/apache/phoenix/query/GuidePostsCache.java
index d695f41..7c57122 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/GuidePostsCache.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/GuidePostsCache.java
@@ -24,9 +24,9 @@ import java.util.concurrent.TimeUnit;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
import org.apache.phoenix.schema.PColumnFamily;
@@ -186,9 +186,9 @@ public class GuidePostsCache {
}
}
- public void invalidateAll(HTableDescriptor htableDesc) {
+ public void invalidateAll(TableDescriptor htableDesc) {
byte[] tableName = htableDesc.getTableName().getName();
- for (byte[] fam : htableDesc.getFamiliesKeys()) {
+ for (byte[] fam : htableDesc.getColumnFamilyNames()) {
invalidate(new GuidePostsKey(tableName, fam));
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java
index 7607388..28e96a8 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java
@@ -112,9 +112,9 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_TYPE;
import java.math.BigDecimal;
-import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.coprocessor.MetaDataProtocol;
import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
@@ -327,9 +327,9 @@ public interface QueryConstants {
"CONSTRAINT " + SYSTEM_TABLE_PK_NAME + " PRIMARY KEY (" + TENANT_ID + ","
+ TABLE_SCHEM + "," + TABLE_NAME + "," + COLUMN_NAME + "," + COLUMN_FAMILY + "))\n" +
HConstants.VERSIONS + "=" + MetaDataProtocol.DEFAULT_MAX_META_DATA_VERSIONS + ",\n" +
- HColumnDescriptor.KEEP_DELETED_CELLS + "=" + MetaDataProtocol.DEFAULT_META_DATA_KEEP_DELETED_CELLS + ",\n" +
+ ColumnFamilyDescriptorBuilder.KEEP_DELETED_CELLS + "=" + MetaDataProtocol.DEFAULT_META_DATA_KEEP_DELETED_CELLS + ",\n" +
// Install split policy to prevent a tenant's metadata from being split across regions.
- HTableDescriptor.SPLIT_POLICY + "='" + MetaDataSplitPolicy.class.getName() + "',\n" +
+ TableDescriptorBuilder.SPLIT_POLICY + "='" + MetaDataSplitPolicy.class.getName() + "',\n" +
PhoenixDatabaseMetaData.TRANSACTIONAL + "=" + Boolean.FALSE;
public static final String CREATE_STATS_TABLE_METADATA =
@@ -345,9 +345,9 @@ public interface QueryConstants {
+ PHYSICAL_NAME + ","
+ COLUMN_FAMILY + ","+ GUIDE_POST_KEY+"))\n" +
HConstants.VERSIONS + "=" + MetaDataProtocol.DEFAULT_MAX_STAT_DATA_VERSIONS + ",\n" +
- HColumnDescriptor.KEEP_DELETED_CELLS + "=" + MetaDataProtocol.DEFAULT_STATS_KEEP_DELETED_CELLS + ",\n" +
+ ColumnFamilyDescriptorBuilder.KEEP_DELETED_CELLS + "=" + MetaDataProtocol.DEFAULT_STATS_KEEP_DELETED_CELLS + ",\n" +
// Install split policy to prevent a physical table's stats from being split across regions.
- HTableDescriptor.SPLIT_POLICY + "='" + MetaDataSplitPolicy.class.getName() + "',\n" +
+ TableDescriptorBuilder.SPLIT_POLICY + "='" + MetaDataSplitPolicy.class.getName() + "',\n" +
PhoenixDatabaseMetaData.TRANSACTIONAL + "=" + Boolean.FALSE;
public static final String CREATE_SEQUENCE_METADATA =
@@ -366,7 +366,7 @@ public interface QueryConstants {
LIMIT_REACHED_FLAG + " BOOLEAN \n" +
" CONSTRAINT " + SYSTEM_TABLE_PK_NAME + " PRIMARY KEY (" + TENANT_ID + "," + SEQUENCE_SCHEMA + "," + SEQUENCE_NAME + "))\n" +
HConstants.VERSIONS + "=" + MetaDataProtocol.DEFAULT_MAX_META_DATA_VERSIONS + ",\n" +
- HColumnDescriptor.KEEP_DELETED_CELLS + "=" + MetaDataProtocol.DEFAULT_META_DATA_KEEP_DELETED_CELLS + ",\n" +
+ ColumnFamilyDescriptorBuilder.KEEP_DELETED_CELLS + "=" + MetaDataProtocol.DEFAULT_META_DATA_KEEP_DELETED_CELLS + ",\n" +
PhoenixDatabaseMetaData.TRANSACTIONAL + "=" + Boolean.FALSE;
public static final String CREATE_SYSTEM_SCHEMA = "CREATE SCHEMA " + SYSTEM_CATALOG_SCHEMA;
public static final String UPGRADE_TABLE_SNAPSHOT_PREFIX = "_UPGRADING_TABLE_";
@@ -391,9 +391,9 @@ public interface QueryConstants {
MAX_VALUE + " VARCHAR, \n" +
" CONSTRAINT " + SYSTEM_TABLE_PK_NAME + " PRIMARY KEY (" + TENANT_ID + ", " + FUNCTION_NAME + ", " + TYPE + ", " + ARG_POSITION + "))\n" +
HConstants.VERSIONS + "=" + MetaDataProtocol.DEFAULT_MAX_META_DATA_VERSIONS + ",\n" +
- HColumnDescriptor.KEEP_DELETED_CELLS + "=" + MetaDataProtocol.DEFAULT_META_DATA_KEEP_DELETED_CELLS + ",\n"+
+ ColumnFamilyDescriptorBuilder.KEEP_DELETED_CELLS + "=" + MetaDataProtocol.DEFAULT_META_DATA_KEEP_DELETED_CELLS + ",\n"+
// Install split policy to prevent a tenant's metadata from being split across regions.
- HTableDescriptor.SPLIT_POLICY + "='" + MetaDataSplitPolicy.class.getName() + "',\n" +
+ TableDescriptorBuilder.SPLIT_POLICY + "='" + MetaDataSplitPolicy.class.getName() + "',\n" +
PhoenixDatabaseMetaData.TRANSACTIONAL + "=" + Boolean.FALSE;
public static final byte[] OFFSET_FAMILY = "f_offset".getBytes();
public static final byte[] OFFSET_COLUMN = "c_offset".getBytes();
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index d618183..ee9ddc0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -19,7 +19,6 @@ package org.apache.phoenix.schema;
import static com.google.common.collect.Sets.newLinkedHashSet;
import static com.google.common.collect.Sets.newLinkedHashSetWithExpectedSize;
-import static org.apache.hadoop.hbase.HColumnDescriptor.TTL;
import static org.apache.phoenix.coprocessor.BaseScannerRegionObserver.ANALYZE_TABLE;
import static org.apache.phoenix.coprocessor.BaseScannerRegionObserver.RUN_UPDATE_STATS_ASYNC_ATTRIB;
import static org.apache.phoenix.exception.SQLExceptionCode.INSUFFICIENT_MULTI_TENANT_COLUMNS;
@@ -127,14 +126,15 @@ import java.util.Map.Entry;
import java.util.Properties;
import java.util.Set;
-import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.phoenix.compile.ColumnResolver;
@@ -1065,11 +1065,11 @@ public class MetaDataClient {
private void populatePropertyMaps(ListMultimap<String,Pair<String,Object>> props, Map<String, Object> tableProps,
Map<String, Object> commonFamilyProps) {
// Somewhat hacky way of determining if property is for HColumnDescriptor or HTableDescriptor
- HColumnDescriptor defaultDescriptor = new HColumnDescriptor(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES);
+ ColumnFamilyDescriptor defaultDescriptor = ColumnFamilyDescriptorBuilder.of(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES);
if (!props.isEmpty()) {
Collection<Pair<String,Object>> propsList = props.get(QueryConstants.ALL_FAMILY_PROPERTIES_KEY);
for (Pair<String,Object> prop : propsList) {
- if (defaultDescriptor.getValue(prop.getFirst()) == null) {
+ if (defaultDescriptor.getValue(Bytes.toBytes(prop.getFirst())) == null) {
tableProps.put(prop.getFirst(), prop.getSecond());
} else {
commonFamilyProps.put(prop.getFirst(), prop.getSecond());
@@ -2003,7 +2003,7 @@ public class MetaDataClient {
tableProps.put(PhoenixDatabaseMetaData.TRANSACTIONAL, transactional);
if (transactional) {
// If TTL set, use Tephra TTL property name instead
- Object ttl = commonFamilyProps.remove(HColumnDescriptor.TTL);
+ Object ttl = commonFamilyProps.remove(ColumnFamilyDescriptorBuilder.TTL);
if (ttl != null) {
commonFamilyProps.put(PhoenixTransactionContext.PROPERTY_TTL, ttl);
}
@@ -2034,9 +2034,9 @@ public class MetaDataClient {
Integer maxVersionsProp = (Integer) commonFamilyProps.get(HConstants.VERSIONS);
if (maxVersionsProp == null) {
if (parent != null) {
- HTableDescriptor desc = connection.getQueryServices().getTableDescriptor(parent.getPhysicalName().getBytes());
+ TableDescriptor desc = connection.getQueryServices().getTableDescriptor(parent.getPhysicalName().getBytes());
if (desc != null) {
- maxVersionsProp = desc.getFamily(SchemaUtil.getEmptyColumnFamily(parent)).getMaxVersions();
+ maxVersionsProp = desc.getColumnFamily(SchemaUtil.getEmptyColumnFamily(parent)).getMaxVersions();
}
}
if (maxVersionsProp == null) {
@@ -2225,7 +2225,7 @@ public class MetaDataClient {
byte[] tableNameBytes = SchemaUtil.getTableNameAsBytes(schemaName, tableName);
boolean tableExists = true;
try {
- HTableDescriptor tableDescriptor = connection.getQueryServices().getTableDescriptor(tableNameBytes);
+ TableDescriptor tableDescriptor = connection.getQueryServices().getTableDescriptor(tableNameBytes);
if (tableDescriptor == null) { // for connectionless
tableExists = false;
}
@@ -2420,7 +2420,7 @@ public class MetaDataClient {
for (Pair<String,Object> prop : props) {
// Don't allow specifying column families for TTL. TTL can only apply for the all the column families of the table
// i.e. it can't be column family specific.
- if (!familyName.equals(QueryConstants.ALL_FAMILY_PROPERTIES_KEY) && prop.getFirst().equals(TTL)) {
+ if (!familyName.equals(QueryConstants.ALL_FAMILY_PROPERTIES_KEY) && prop.getFirst().equals(ColumnFamilyDescriptorBuilder.TTL)) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.COLUMN_FAMILY_NOT_ALLOWED_FOR_TTL).build().buildException();
}
combinedFamilyProps.put(prop.getFirst(), prop.getSecond());
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java
index 9798f79..047ccf6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java
@@ -20,8 +20,10 @@ package org.apache.phoenix.transaction;
import java.io.IOException;
import java.util.List;
import java.util.Map;
+import java.util.concurrent.TimeUnit;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.CompareOperator;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Append;
@@ -36,6 +38,7 @@ import org.apache.hadoop.hbase.client.Row;
import org.apache.hadoop.hbase.client.RowMutations;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.coprocessor.Batch.Call;
import org.apache.hadoop.hbase.client.coprocessor.Batch.Callback;
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
@@ -180,13 +183,6 @@ public class OmidTransactionTable implements PhoenixTransactionalTable {
}
@Override
- public Object[] batch(List<? extends Row> actions) throws IOException,
- InterruptedException {
- // TODO Auto-generated method stub
- return null;
- }
-
- @Override
public <R> void batchCallback(List<? extends Row> actions,
Object[] results, Callback<R> callback) throws IOException,
InterruptedException {
@@ -194,13 +190,6 @@ public class OmidTransactionTable implements PhoenixTransactionalTable {
}
@Override
- public <R> Object[] batchCallback(List<? extends Row> actions,
- Callback<R> callback) throws IOException, InterruptedException {
- // TODO Auto-generated method stub
- return null;
- }
-
- @Override
public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier,
byte[] value, Put put) throws IOException {
// TODO Auto-generated method stub
@@ -331,4 +320,85 @@ public class OmidTransactionTable implements PhoenixTransactionalTable {
}
+ @Override
+ public TableDescriptor getDescriptor() throws IOException {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ @Override
+ public boolean[] exists(List<Get> gets) throws IOException {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ @Override
+ public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, CompareOperator op, byte[] value, Put put)
+ throws IOException {
+ // TODO Auto-generated method stub
+ return false;
+ }
+
+ @Override
+ public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, CompareOperator op, byte[] value,
+ Delete delete) throws IOException {
+ // TODO Auto-generated method stub
+ return false;
+ }
+
+ @Override
+ public boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, CompareOperator op, byte[] value,
+ RowMutations mutation) throws IOException {
+ // TODO Auto-generated method stub
+ return false;
+ }
+
+ @Override
+ public long getRpcTimeout(TimeUnit unit) {
+ // TODO Auto-generated method stub
+ return 0;
+ }
+
+ @Override
+ public long getReadRpcTimeout(TimeUnit unit) {
+ // TODO Auto-generated method stub
+ return 0;
+ }
+
+ @Override
+ public int getReadRpcTimeout() {
+ // TODO Auto-generated method stub
+ return 0;
+ }
+
+ @Override
+ public void setReadRpcTimeout(int readRpcTimeout) {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public long getWriteRpcTimeout(TimeUnit unit) {
+ // TODO Auto-generated method stub
+ return 0;
+ }
+
+ @Override
+ public int getWriteRpcTimeout() {
+ // TODO Auto-generated method stub
+ return 0;
+ }
+
+ @Override
+ public void setWriteRpcTimeout(int writeRpcTimeout) {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public long getOperationTimeout(TimeUnit unit) {
+ // TODO Auto-generated method stub
+ return 0;
+ }
+
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java
index 0e46ae9..ed3f44e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionContext.java
@@ -23,6 +23,7 @@ import java.util.concurrent.TimeoutException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.coprocessor.RegionObserver;
+import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.jdbc.PhoenixEmbeddedDriver.ConnectionInfo;
import org.apache.phoenix.schema.PTable;
import org.apache.phoenix.util.ReadOnlyProps;
@@ -45,6 +46,7 @@ public interface PhoenixTransactionContext {
public static final String TX_ROLLBACK_ATTRIBUTE_KEY = "tephra.tx.rollback"; //"phoenix.tx.rollback";
public static final String PROPERTY_TTL = "dataset.table.ttl";
+ public static final byte[] PROPERTY_TTL_BYTES = Bytes.toBytes(PROPERTY_TTL);
public static final String READ_NON_TX_DATA = "data.tx.read.pre.existing";
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java
index ede2896..e248f33 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java
@@ -20,6 +20,7 @@ package org.apache.phoenix.transaction;
import java.io.IOException;
import java.util.List;
import java.util.Map;
+import java.util.concurrent.TimeUnit;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.CompareOperator;
@@ -42,10 +43,10 @@ import org.apache.hadoop.hbase.client.coprocessor.Batch.Call;
import org.apache.hadoop.hbase.client.coprocessor.Batch.Callback;
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
-import org.apache.tephra.TxConstants;
-import org.apache.tephra.hbase.TransactionAwareHTable;
import org.apache.phoenix.schema.PTable;
import org.apache.phoenix.schema.PTableType;
+import org.apache.tephra.TxConstants;
+import org.apache.tephra.hbase.TransactionAwareHTable;
import com.google.protobuf.Descriptors.MethodDescriptor;
import com.google.protobuf.Message;
@@ -354,4 +355,29 @@ public class TephraTransactionTable implements PhoenixTransactionalTable {
public void setWriteRpcTimeout(int writeRpcTimeout) {
return transactionAwareHTable.setWriteRpcTimeout(writeRpcTimeout);
}
+
+ @Override
+ public boolean[] exists(List<Get> gets) throws IOException {
+ return transactionAwareHTable.exists(gets);
+ }
+
+ @Override
+ public long getRpcTimeout(TimeUnit unit) {
+ return transactionAwareHTable.getRpcTimeout();
+ }
+
+ @Override
+ public long getReadRpcTimeout(TimeUnit unit) {
+ return transactionAwareHTable.getReadRpcTimeout(unit);
+ }
+
+ @Override
+ public long getWriteRpcTimeout(TimeUnit unit) {
+ return transactionAwareHTable.getWriteRpcTimeout(unit);
+ }
+
+ @Override
+ public long getOperationTimeout(TimeUnit unit) {
+ return transactionAwareHTable.getOperationTimeout(unit);
+ }
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
index 90760bc..9193172 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
@@ -282,8 +282,8 @@ public class IndexUtil {
byte[] regionEndkey = null;
if(maintainer.isLocalIndex()) {
HRegionLocation tableRegionLocation = connection.getQueryServices().getTableRegionLocation(table.getPhysicalName().getBytes(), dataMutation.getRow());
- regionStartKey = tableRegionLocation.getRegionInfo().getStartKey();
- regionEndkey = tableRegionLocation.getRegionInfo().getEndKey();
+ regionStartKey = tableRegionLocation.getRegion().getStartKey();
+ regionEndkey = tableRegionLocation.getRegion().getEndKey();
}
Delete delete = maintainer.buildDeleteMutation(kvBuilder, null, ptr, Collections.<Cell>emptyList(), ts, regionStartKey, regionEndkey);
delete.setAttribute(PhoenixTransactionContext.TX_ROLLBACK_ATTRIBUTE_KEY, dataMutation.getAttribute(PhoenixTransactionContext.TX_ROLLBACK_ATTRIBUTE_KEY));
@@ -349,8 +349,8 @@ public class IndexUtil {
byte[] regionEndkey = null;
if(maintainer.isLocalIndex()) {
HRegionLocation tableRegionLocation = connection.getQueryServices().getTableRegionLocation(table.getPhysicalName().getBytes(), dataMutation.getRow());
- regionStartKey = tableRegionLocation.getRegionInfo().getStartKey();
- regionEndkey = tableRegionLocation.getRegionInfo().getEndKey();
+ regionStartKey = tableRegionLocation.getRegion().getStartKey();
+ regionEndkey = tableRegionLocation.getRegion().getEndKey();
}
indexMutations.add(maintainer.buildUpdateMutation(kvBuilder, valueGetter, ptr, ts, regionStartKey, regionEndkey));
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
index 7914e3e..6a9ec85 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
@@ -28,25 +28,23 @@ import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionLocation;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.ClusterConnection;
-import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.ipc.HBaseRpcController;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.protobuf.RequestConverter;
-import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
-import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
-import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.phoenix.coprocessor.MetaDataProtocol;
@@ -459,7 +457,7 @@ public class MetaDataUtil {
throws SQLException {
byte[] physicalIndexName = MetaDataUtil.getViewIndexPhysicalName(physicalTableName);
try {
- HTableDescriptor desc = connection.getQueryServices().getTableDescriptor(physicalIndexName);
+ TableDescriptor desc = connection.getQueryServices().getTableDescriptor(physicalIndexName);
return desc != null && Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(desc.getValue(IS_VIEW_INDEX_TABLE_PROP_BYTES)));
} catch (TableNotFoundException e) {
return false;
@@ -472,7 +470,7 @@ public class MetaDataUtil {
public static boolean hasLocalIndexTable(PhoenixConnection connection, byte[] physicalTableName) throws SQLException {
try {
- HTableDescriptor desc = connection.getQueryServices().getTableDescriptor(physicalTableName);
+ TableDescriptor desc = connection.getQueryServices().getTableDescriptor(physicalTableName);
if(desc == null ) return false;
return hasLocalIndexColumnFamily(desc);
} catch (TableNotFoundException e) {
@@ -480,8 +478,8 @@ public class MetaDataUtil {
}
}
- public static boolean hasLocalIndexColumnFamily(HTableDescriptor desc) {
- for (HColumnDescriptor cf : desc.getColumnFamilies()) {
+ public static boolean hasLocalIndexColumnFamily(TableDescriptor desc) {
+ for (ColumnFamilyDescriptor cf : desc.getColumnFamilies()) {
if (cf.getNameAsString().startsWith(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX)) {
return true;
}
@@ -489,9 +487,9 @@ public class MetaDataUtil {
return false;
}
- public static List<byte[]> getNonLocalIndexColumnFamilies(HTableDescriptor desc) {
+ public static List<byte[]> getNonLocalIndexColumnFamilies(TableDescriptor desc) {
List<byte[]> families = new ArrayList<byte[]>(desc.getColumnFamilies().length);
- for (HColumnDescriptor cf : desc.getColumnFamilies()) {
+ for (ColumnFamilyDescriptor cf : desc.getColumnFamilies()) {
if (!cf.getNameAsString().startsWith(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX)) {
families.add(cf.getName());
}
@@ -500,10 +498,10 @@ public class MetaDataUtil {
}
public static List<byte[]> getLocalIndexColumnFamilies(PhoenixConnection conn, byte[] physicalTableName) throws SQLException {
- HTableDescriptor desc = conn.getQueryServices().getTableDescriptor(physicalTableName);
+ TableDescriptor desc = conn.getQueryServices().getTableDescriptor(physicalTableName);
if(desc == null ) return Collections.emptyList();
List<byte[]> families = new ArrayList<byte[]>(desc.getColumnFamilies().length / 2);
- for (HColumnDescriptor cf : desc.getColumnFamilies()) {
+ for (ColumnFamilyDescriptor cf : desc.getColumnFamilies()) {
if (cf.getNameAsString().startsWith(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX)) {
families.add(cf.getName());
}
@@ -530,10 +528,10 @@ public class MetaDataUtil {
* @throws
*/
public static boolean tableRegionsOnline(Configuration conf, PTable table) {
- Connection hcon = null;
+ ClusterConnection hcon = null;
try {
- hcon = ConnectionFactory.createConnection(conf);
+ hcon = (ClusterConnection)ConnectionFactory.createConnection(conf);
List<HRegionLocation> locations = ((ClusterConnection)hcon).locateRegions(
org.apache.hadoop.hbase.TableName.valueOf(table.getPhysicalName().getBytes()));
@@ -542,17 +540,12 @@ public class MetaDataUtil {
ServerName sn = loc.getServerName();
if (sn == null) continue;
- AdminService.BlockingInterface admin = (BlockingInterface) ((ClusterConnection)hcon).getAdmin(sn);
- GetRegionInfoRequest request = RequestConverter.buildGetRegionInfoRequest(
- loc.getRegionInfo().getRegionName());
-
- admin.getRegionInfo(null, request);
- } catch (ServiceException e) {
- IOException ie = ProtobufUtil.getRemoteException(e);
- logger.debug("Region " + loc.getRegionInfo().getEncodedName() + " isn't online due to:" + ie);
- return false;
+ AdminService.BlockingInterface admin = ((ClusterConnection) hcon).getAdmin(sn);
+ HBaseRpcController controller = hcon.getRpcControllerFactory().newController();
+ org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.getRegionInfo(controller,
+ (AdminService.BlockingInterface) admin, loc.getRegion().getRegionName());
} catch (RemoteException e) {
- logger.debug("Cannot get region " + loc.getRegionInfo().getEncodedName() + " info due to error:" + e);
+ logger.debug("Cannot get region " + loc.getRegion().getEncodedName() + " info due to error:" + e);
return false;
}
}
@@ -651,7 +644,7 @@ public class MetaDataUtil {
}
public static boolean isHColumnProperty(String propName) {
- return HColumnDescriptor.getDefaultValues().containsKey(propName);
+ return ColumnFamilyDescriptorBuilder.getDefaultValues().containsKey(propName);
}
public static boolean isHTableProperty(String propName) {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/main/java/org/apache/phoenix/util/RepairUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/RepairUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/RepairUtil.java
index d394a68..0b54819 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/RepairUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/RepairUtil.java
@@ -17,7 +17,7 @@
*/
package org.apache.phoenix.util;
-import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.regionserver.Store;
import org.apache.hadoop.hbase.regionserver.StoreFile;
@@ -29,8 +29,8 @@ public class RepairUtil {
byte[] endKey = environment.getRegion().getRegionInfo().getEndKey();
byte[] indexKeyEmbedded = startKey.length == 0 ? new byte[endKey.length] : startKey;
for (StoreFile file : store.getStorefiles()) {
- if (file.getReader() != null && file.getReader().getFirstKey() != null) {
- byte[] fileFirstRowKey = KeyValue.createKeyValueFromKey(file.getReader().getFirstKey()).getRow();
+ if (file.getFirstKey().isPresent() && file.getFirstKey().get() != null) {
+ byte[] fileFirstRowKey = CellUtil.cloneRow(file.getFirstKey().get());
if ((fileFirstRowKey != null && Bytes.compareTo(fileFirstRowKey, 0,
indexKeyEmbedded.length, indexKeyEmbedded, 0, indexKeyEmbedded.length) != 0)) {
return false; }
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java
index a844226..94ac19f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java
@@ -36,8 +36,8 @@ import java.util.NavigableSet;
import java.util.TreeMap;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.filter.FilterList;
@@ -669,7 +669,7 @@ public class ScanUtil {
}
}
- public static byte[] getActualStartRow(Scan localIndexScan, HRegionInfo regionInfo) {
+ public static byte[] getActualStartRow(Scan localIndexScan, RegionInfo regionInfo) {
return localIndexScan.getAttribute(SCAN_START_ROW_SUFFIX) == null ? localIndexScan
.getStartRow() : ScanRanges.prefixKey(localIndexScan.getAttribute(SCAN_START_ROW_SUFFIX), 0 ,
regionInfo.getStartKey().length == 0 ? new byte[regionInfo.getEndKey().length]
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
index e913d39..bb9ba3b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
@@ -60,6 +60,7 @@ import java.sql.SQLException;
import java.text.Format;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
+import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
@@ -75,12 +76,12 @@ import javax.annotation.Nullable;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Mutation;
@@ -89,7 +90,8 @@ import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.regionserver.LocalIndexSplitter;
import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
import org.apache.hadoop.hbase.util.Bytes;
@@ -190,9 +192,9 @@ public class UpgradeUtil {
private static void createSequenceSnapshot(Admin admin, PhoenixConnection conn) throws SQLException {
byte[] tableName = getSequenceSnapshotName();
- HColumnDescriptor columnDesc = new HColumnDescriptor(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES);
- HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
- desc.addFamily(columnDesc);
+ TableDescriptor desc = TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName))
+ .addColumnFamily(ColumnFamilyDescriptorBuilder.of(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES))
+ .build();
try {
admin.createTable(desc);
copyTable(conn, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES, tableName);
@@ -294,7 +296,7 @@ public class UpgradeUtil {
return;
}
logger.warn("Pre-splitting SYSTEM.SEQUENCE table " + nSaltBuckets + "-ways. This may take some time - please do not close window.");
- HTableDescriptor desc = admin.getTableDescriptor(TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES));
+ TableDescriptor desc = admin.getDescriptor(TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES));
createSequenceSnapshot(admin, conn);
snapshotCreated = true;
admin.disableTable(TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME));
@@ -346,33 +348,34 @@ public class UpgradeUtil {
boolean droppedLocalIndexes = false;
while (rs.next()) {
if(!droppedLocalIndexes) {
- HTableDescriptor[] localIndexTables = admin.listTables(MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX+".*");
+ TableDescriptor[] localIndexTables = admin.listTables(MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX+".*");
String localIndexSplitter = LocalIndexSplitter.class.getName();
- for (HTableDescriptor table : localIndexTables) {
- HTableDescriptor dataTableDesc = admin.getTableDescriptor(TableName.valueOf(MetaDataUtil.getLocalIndexUserTableName(table.getNameAsString())));
- HColumnDescriptor[] columnFamilies = dataTableDesc.getColumnFamilies();
+ for (TableDescriptor table : localIndexTables) {
+ TableDescriptor dataTableDesc = admin.getDescriptor(TableName.valueOf(MetaDataUtil.getLocalIndexUserTableName(table.getTableName().getNameAsString())));
+ TableDescriptorBuilder dataTableDescBuilder = TableDescriptorBuilder.newBuilder(dataTableDesc);
+ ColumnFamilyDescriptor[] columnFamilies = dataTableDesc.getColumnFamilies();
boolean modifyTable = false;
- for(HColumnDescriptor cf : columnFamilies) {
+ for(ColumnFamilyDescriptor cf : columnFamilies) {
String localIndexCf = QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX+cf.getNameAsString();
- if(dataTableDesc.getFamily(Bytes.toBytes(localIndexCf))==null){
- HColumnDescriptor colDef =
- new HColumnDescriptor(localIndexCf);
- for(Entry<ImmutableBytesWritable, ImmutableBytesWritable> keyValue: cf.getValues().entrySet()){
- colDef.setValue(keyValue.getKey().copyBytes(), keyValue.getValue().copyBytes());
+ if(dataTableDesc.getColumnFamily(Bytes.toBytes(localIndexCf))==null){
+ ColumnFamilyDescriptorBuilder colDefBuilder =
+ ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(localIndexCf));
+ for(Entry<Bytes, Bytes> keyValue: cf.getValues().entrySet()){
+ colDefBuilder.setValue(keyValue.getKey().copyBytes(), keyValue.getValue().copyBytes());
}
- dataTableDesc.addFamily(colDef);
+ dataTableDescBuilder.addColumnFamily(colDefBuilder.build());
modifyTable = true;
}
}
- List<String> coprocessors = dataTableDesc.getCoprocessors();
+ Collection<String> coprocessors = dataTableDesc.getCoprocessors();
for(String coprocessor: coprocessors) {
if(coprocessor.equals(localIndexSplitter)) {
- dataTableDesc.removeCoprocessor(localIndexSplitter);
+ dataTableDescBuilder.removeCoprocessor(localIndexSplitter);
modifyTable = true;
}
}
if(modifyTable) {
- admin.modifyTable(dataTableDesc.getTableName(), dataTableDesc);
+ admin.modifyTable(dataTableDescBuilder.build());
}
}
admin.disableTables(MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX+".*");
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java b/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
index b3c7dca..cceed3f 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
@@ -2429,8 +2429,8 @@ public class QueryCompilerTest extends BaseConnectionlessQueryTest {
List<HRegionLocation> regionLocations=
conn.getQueryServices().getAllTableRegions(Bytes.toBytes("SALT_TEST2900"));
for (HRegionLocation regionLocation : regionLocations) {
- assertTrue(ranges.intersectRegion(regionLocation.getRegionInfo().getStartKey(),
- regionLocation.getRegionInfo().getEndKey(), false));
+ assertTrue(ranges.intersectRegion(regionLocation.getRegion().getStartKey(),
+ regionLocation.getRegion().getEndKey(), false));
}
} finally {
conn.close();
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/CoveredColumnIndexSpecifierBuilder.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/CoveredColumnIndexSpecifierBuilder.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/CoveredColumnIndexSpecifierBuilder.java
index c7b2685..6494db2 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/CoveredColumnIndexSpecifierBuilder.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/CoveredColumnIndexSpecifierBuilder.java
@@ -27,7 +27,7 @@ import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Coprocessor;
-import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.phoenix.hbase.index.Indexer;
/**
@@ -127,15 +127,15 @@ public class CoveredColumnIndexSpecifierBuilder {
}
}
- public void build(HTableDescriptor desc) throws IOException {
- build(desc, CoveredColumnIndexCodec.class);
- }
+ public TableDescriptor build(TableDescriptor desc) throws IOException {
+ return build(desc, CoveredColumnIndexCodec.class);
+ }
- public void build(HTableDescriptor desc, Class<? extends IndexCodec> clazz) throws IOException {
+ public TableDescriptor build(TableDescriptor desc, Class<? extends IndexCodec> clazz) throws IOException {
// add the codec for the index to the map of options
Map<String, String> opts = this.convertToMap();
opts.put(NonTxIndexBuilder.CODEC_CLASS_NAME_KEY, clazz.getName());
- Indexer.enableIndexing(desc, NonTxIndexBuilder.class, opts, Coprocessor.PRIORITY_USER);
+ return Indexer.enableIndexing(desc, NonTxIndexBuilder.class, opts, Coprocessor.PRIORITY_USER);
}
public static List<ColumnGroup> getColumns(Configuration conf) {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/NonTxIndexBuilderTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/NonTxIndexBuilderTest.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/NonTxIndexBuilderTest.java
index d94cce0..81529fe 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/NonTxIndexBuilderTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/NonTxIndexBuilderTest.java
@@ -34,11 +34,11 @@ import java.util.Properties;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValue.Type;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
@@ -64,6 +64,7 @@ import org.apache.phoenix.query.QueryConstants;
import org.apache.phoenix.query.QueryServices;
import org.apache.phoenix.schema.PTable;
import org.apache.phoenix.schema.PTableKey;
+import org.apache.phoenix.util.PhoenixKeyValueUtil;
import org.apache.phoenix.util.PropertiesUtil;
import org.apache.phoenix.util.TestUtil;
import org.junit.Before;
@@ -102,7 +103,6 @@ public class NonTxIndexBuilderTest extends BaseConnectionlessQueryTest {
private static final byte[] VALUE_2 = Bytes.toBytes(222);
private static final byte[] VALUE_3 = Bytes.toBytes(333);
private static final byte[] VALUE_4 = Bytes.toBytes(444);
- private static final byte PUT_TYPE = KeyValue.Type.Put.getCode();
private NonTxIndexBuilder indexBuilder;
private PhoenixIndexMetaData mockIndexMetaData;
@@ -139,7 +139,7 @@ public class NonTxIndexBuilderTest extends BaseConnectionlessQueryTest {
});
// the following is called by PhoenixIndexCodec#getIndexUpserts() , getIndexDeletes()
- HRegionInfo mockRegionInfo = Mockito.mock(HRegionInfo.class);
+ RegionInfo mockRegionInfo = Mockito.mock(RegionInfo.class);
Mockito.when(mockRegion.getRegionInfo()).thenReturn(mockRegionInfo);
Mockito.when(mockRegionInfo.getStartKey()).thenReturn(Bytes.toBytes("a"));
Mockito.when(mockRegionInfo.getEndKey()).thenReturn(Bytes.toBytes("z"));
@@ -232,10 +232,10 @@ public class NonTxIndexBuilderTest extends BaseConnectionlessQueryTest {
// the current row state has 3 versions, but if we rebuild as of t=2, scanner in LocalTable
// should only return first
- Cell currentCell1 = CellUtil.createCell(ROW, FAM, INDEXED_QUALIFIER, 1, PUT_TYPE, VALUE_1);
- Cell currentCell2 = CellUtil.createCell(ROW, FAM, INDEXED_QUALIFIER, 2, PUT_TYPE, VALUE_2);
- Cell currentCell3 = CellUtil.createCell(ROW, FAM, INDEXED_QUALIFIER, 3, PUT_TYPE, VALUE_3);
- Cell currentCell4 = CellUtil.createCell(ROW, FAM, INDEXED_QUALIFIER, 4, PUT_TYPE, VALUE_4);
+ Cell currentCell1 = PhoenixKeyValueUtil.newKeyValue(ROW, FAM, INDEXED_QUALIFIER, 1, VALUE_1);
+ Cell currentCell2 = PhoenixKeyValueUtil.newKeyValue(ROW, FAM, INDEXED_QUALIFIER, 2, VALUE_2);
+ Cell currentCell3 = PhoenixKeyValueUtil.newKeyValue(ROW, FAM, INDEXED_QUALIFIER, 3, VALUE_3);
+ Cell currentCell4 = PhoenixKeyValueUtil.newKeyValue(ROW, FAM, INDEXED_QUALIFIER, 4, VALUE_4);
setCurrentRowState(Arrays.asList(currentCell4, currentCell3, currentCell2, currentCell1));
// rebuilder replays mutations starting from t=2
@@ -322,7 +322,7 @@ public class NonTxIndexBuilderTest extends BaseConnectionlessQueryTest {
}
private void setCurrentRowState(byte[] fam2, byte[] indexedQualifier, int i, byte[] value1) {
- Cell cell = CellUtil.createCell(ROW, FAM, INDEXED_QUALIFIER, 1, PUT_TYPE, VALUE_1);
+ Cell cell = PhoenixKeyValueUtil.newKeyValue(ROW, FAM, INDEXED_QUALIFIER, 1, VALUE_1);
currentRowCells = Collections.singletonList(cell);
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/util/TestIndexManagementUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/util/TestIndexManagementUtil.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/util/TestIndexManagementUtil.java
index 317d07a..89386e0 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/util/TestIndexManagementUtil.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/util/TestIndexManagementUtil.java
@@ -20,10 +20,12 @@ package org.apache.phoenix.hbase.index.util;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.KeepDeletedCells;
+import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.regionserver.wal.IndexedHLogReader;
import org.apache.hadoop.hbase.regionserver.wal.IndexedWALEditCodec;
import org.apache.hadoop.hbase.regionserver.wal.WALCellCodec;
@@ -77,19 +79,17 @@ public class TestIndexManagementUtil {
* @throws IOException
*/
public static void createIndexTable(Admin admin, String indexTable) throws IOException {
- createIndexTable(admin, new HTableDescriptor(indexTable));
+ createIndexTable(admin, TableDescriptorBuilder.newBuilder(TableName.valueOf(indexTable)));
}
/**
* @param admin to create the table
* @param index descriptor to update before creating table
*/
- public static void createIndexTable(Admin admin, HTableDescriptor index) throws IOException {
- HColumnDescriptor col =
- new HColumnDescriptor(CoveredColumnIndexCodec.INDEX_ROW_COLUMN_FAMILY);
- // ensure that we can 'see past' delete markers when doing scans
- col.setKeepDeletedCells(true);
- index.addFamily(col);
- admin.createTable(index);
+ public static void createIndexTable(Admin admin, TableDescriptorBuilder indexBuilder) throws IOException {
+ indexBuilder.addColumnFamily(
+ ColumnFamilyDescriptorBuilder.newBuilder(CoveredColumnIndexCodec.INDEX_ROW_COLUMN_FAMILY)
+ .setKeepDeletedCells(KeepDeletedCells.TRUE).build());
+ admin.createTable(indexBuilder.build());
}
}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java
index b302210..57e3ba1 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java
@@ -17,10 +17,8 @@
*/
package org.apache.phoenix.hbase.index.write;
-import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
import java.util.ArrayList;
import java.util.Arrays;
@@ -37,11 +35,12 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Abortable;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.Stoppable;
+import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.regionserver.Region;
import org.apache.hadoop.hbase.util.Bytes;
@@ -79,7 +78,8 @@ public class TestIndexWriter {
Region region = Mockito.mock(Region.class);
Mockito.when(env.getRegion()).thenReturn(region);
Mockito.when(env.getConfiguration()).thenReturn(conf);
- Mockito.when(region.getTableDesc()).thenReturn(new HTableDescriptor());
+ Mockito.when(region.getTableDescriptor()).thenReturn(
+ TableDescriptorBuilder.newBuilder(TableName.valueOf("dummy")).build());
assertNotNull(IndexWriter.getFailurePolicy(env));
}
@@ -111,7 +111,7 @@ public class TestIndexWriter {
Table table = Mockito.mock(Table.class);
final boolean[] completed = new boolean[] { false };
- Mockito.when(table.batch(Mockito.anyList())).thenAnswer(new Answer<Void>() {
+ Mockito.when(table.batch(Mockito.anyList(), Mockito.anyList())).thenAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
@@ -120,7 +120,7 @@ public class TestIndexWriter {
return null;
}
});
- Mockito.when(table.getTableName()).thenReturn(testName.getTableName());
+ Mockito.when(table.getName()).thenReturn(TableName.valueOf(testName.getTableName()));
// add the table to the set of tables, so its returned to the writer
tables.put(new ImmutableBytesPtr(tableName), table);
@@ -158,8 +158,8 @@ public class TestIndexWriter {
FakeTableFactory factory = new FakeTableFactory(tables);
byte[] tableName = this.testName.getTableName();
- HTableInterface table = Mockito.mock(HTableInterface.class);
- Mockito.when(table.getTableName()).thenReturn(tableName);
+ Table table = Mockito.mock(Table.class);
+ Mockito.when(table.getName()).thenReturn(TableName.valueOf(tableName));
final CountDownLatch writeStartedLatch = new CountDownLatch(1);
// latch never gets counted down, so we wait forever
final CountDownLatch waitOnAbortedLatch = new CountDownLatch(1);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java
index 0d6ac7f..1ace4c5 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java
@@ -34,12 +34,11 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Mutation;
@@ -173,18 +172,19 @@ public class TestWALRecoveryCaching {
builder.addIndexGroup(columns);
// create the primary table w/ indexing enabled
- HTableDescriptor primaryTable = new HTableDescriptor(testTable.getTableName());
- primaryTable.addFamily(new HColumnDescriptor(family));
- primaryTable.addFamily(new HColumnDescriptor(nonIndexedFamily));
+ TableDescriptor primaryTable = TableDescriptorBuilder.newBuilder(TableName.valueOf(testTable.getTableName()))
+ .addColumnFamily(ColumnFamilyDescriptorBuilder.of(family))
+ .addColumnFamily(ColumnFamilyDescriptorBuilder.of(nonIndexedFamily)).build();
builder.addArbitraryConfigForTesting(Indexer.RecoveryFailurePolicyKeyForTesting,
ReleaseLatchOnFailurePolicy.class.getName());
builder.build(primaryTable);
admin.createTable(primaryTable);
// create the index table
- HTableDescriptor indexTableDesc = new HTableDescriptor(Bytes.toBytes(getIndexTableName()));
- indexTableDesc.addCoprocessor(IndexTableBlockingReplayObserver.class.getName());
- TestIndexManagementUtil.createIndexTable(admin, indexTableDesc);
+ TableDescriptorBuilder indexTableBuilder = TableDescriptorBuilder
+ .newBuilder(TableName.valueOf(Bytes.toBytes(getIndexTableName())))
+ .addCoprocessor(IndexTableBlockingReplayObserver.class.getName());
+ TestIndexManagementUtil.createIndexTable(admin, indexTableBuilder);
// figure out where our tables live
ServerName shared =
@@ -218,7 +218,8 @@ public class TestWALRecoveryCaching {
LOG.info("\t== Offline: " + server.getServerName());
continue;
}
- List<HRegionInfo> regions = ProtobufUtil.getOnlineRegions(server.getRSRpcServices());
+
+ List<HRegion> regions = server.getRegions();
LOG.info("\t" + server.getServerName() + " regions: " + regions);
}
@@ -268,14 +269,14 @@ public class TestWALRecoveryCaching {
* @param table
* @return
*/
- private List<Region> getRegionsFromServerForTable(MiniHBaseCluster cluster, ServerName server,
+ private List<HRegion> getRegionsFromServerForTable(MiniHBaseCluster cluster, ServerName server,
byte[] table) {
- List<Region> online = Collections.emptyList();
+ List<HRegion> online = Collections.emptyList();
for (RegionServerThread rst : cluster.getRegionServerThreads()) {
// if its the server we are going to kill, get the regions we want to reassign
if (rst.getRegionServer().getServerName().equals(server)) {
- online = rst.getRegionServer().getOnlineRegions(org.apache.hadoop.hbase.TableName.valueOf(table));
- break;
+ online = rst.getRegionServer().getRegions(org.apache.hadoop.hbase.TableName.valueOf(table));
+ break;
}
}
return online;
@@ -306,7 +307,7 @@ public class TestWALRecoveryCaching {
tryIndex = !tryIndex;
for (ServerName server : servers) {
// find the regionserver that matches the passed server
- List<Region> online = getRegionsFromServerForTable(cluster, server, table);
+ List<HRegion> online = getRegionsFromServerForTable(cluster, server, table);
LOG.info("Shutting down and reassigning regions from " + server);
cluster.stopRegionServer(server);
@@ -314,7 +315,7 @@ public class TestWALRecoveryCaching {
// force reassign the regions from the table
for (Region region : online) {
- cluster.getMaster().assignRegion(region.getRegionInfo());
+ cluster.getMaster().getAssignmentManager().assign(region.getRegionInfo());
}
LOG.info("Starting region server:" + server.getHostname());
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/recovery/TestPerRegionIndexWriteCache.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/recovery/TestPerRegionIndexWriteCache.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/recovery/TestPerRegionIndexWriteCache.java
index 7253165..2bfbcbf 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/recovery/TestPerRegionIndexWriteCache.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/recovery/TestPerRegionIndexWriteCache.java
@@ -30,12 +30,14 @@ import java.util.Set;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfoBuilder;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;
@@ -76,15 +78,18 @@ public class TestPerRegionIndexWriteCache {
TEST_UTIL.getConfiguration().set("hbase.rootdir", hbaseRootDir.toString());
FileSystem newFS = FileSystem.newInstance(TEST_UTIL.getConfiguration());
- HRegionInfo hri = new HRegionInfo(tableName, null, null, false);
+ RegionInfo hri = RegionInfoBuilder.newBuilder(tableName).setStartKey(null).setEndKey(null).setSplit(false).build();
Path basedir = FSUtils.getTableDir(hbaseRootDir, tableName);
Random rn = new Random();
tableName = TableName.valueOf("TestPerRegion" + rn.nextInt());
WALFactory walFactory = new WALFactory(TEST_UTIL.getConfiguration(), null, "TestPerRegionIndexWriteCache");
wal = walFactory.getWAL(Bytes.toBytes("logs"), null);
- HTableDescriptor htd = new HTableDescriptor(tableName);
- HColumnDescriptor a = new HColumnDescriptor(Bytes.toBytes("a"));
- htd.addFamily(a);
+ TableDescriptor htd =
+ TableDescriptorBuilder
+ .newBuilder(tableName)
+ .addColumnFamily(
+ ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("a")).build())
+ .build();
r1 = new HRegion(basedir, wal, newFS, TEST_UTIL.getConfiguration(), hri, htd, null) {
@Override
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
index 1133826..8a09420 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
@@ -112,10 +112,10 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.IntegrationTestingUtility;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.ipc.PhoenixRpcSchedulerFactory;
import org.apache.hadoop.hbase.regionserver.RSRpcServices;
import org.apache.hadoop.hbase.util.Bytes;
@@ -1526,9 +1526,9 @@ public abstract class BaseTest {
if (driver == null) return;
Admin admin = driver.getConnectionQueryServices(null, null).getAdmin();
try {
- HTableDescriptor[] tables = admin.listTables();
- for (HTableDescriptor table : tables) {
- String schemaName = SchemaUtil.getSchemaNameFromFullName(table.getName());
+ TableDescriptor[] tables = admin.listTables();
+ for (TableDescriptor table : tables) {
+ String schemaName = SchemaUtil.getSchemaNameFromFullName(table.getTableName().getName());
if (!QueryConstants.SYSTEM_SCHEMA_NAME.equals(schemaName)) {
disableAndDropTable(admin, table.getTableName());
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/test/java/org/apache/phoenix/schema/stats/StatisticsScannerTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/schema/stats/StatisticsScannerTest.java b/phoenix-core/src/test/java/org/apache/phoenix/schema/stats/StatisticsScannerTest.java
index dd96d8c..177aff3 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/schema/stats/StatisticsScannerTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/schema/stats/StatisticsScannerTest.java
@@ -28,7 +28,7 @@ import java.io.IOException;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.regionserver.InternalScanner;
import org.apache.hadoop.hbase.regionserver.Region;
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
@@ -50,7 +50,7 @@ public class StatisticsScannerTest {
private StatisticsScanner mockScanner;
private StatisticsCollector tracker;
private InternalScanner delegate;
- private HRegionInfo regionInfo;
+ private RegionInfo regionInfo;
private Configuration config;
@@ -67,11 +67,10 @@ public class StatisticsScannerTest {
this.mockScanner = mock(StatisticsScanner.class);
this.tracker = mock(StatisticsCollector.class);
this.delegate = mock(InternalScanner.class);
- this.regionInfo = mock(HRegionInfo.class);
+ this.regionInfo = mock(RegionInfo.class);
// Wire up the mocks to the mock StatisticsScanner
when(mockScanner.getStatisticsWriter()).thenReturn(statsWriter);
- when(mockScanner.getRegionServerServices()).thenReturn(rsServices);
when(mockScanner.createCallable()).thenReturn(callable);
when(mockScanner.getStatsCollectionRunTracker(any(Configuration.class))).thenReturn(runTracker);
when(mockScanner.getRegion()).thenReturn(region);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java b/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
index fcc0261..d0bfc7f 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
@@ -55,7 +55,6 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellScanner;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Delete;
@@ -64,6 +63,8 @@ import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.client.coprocessor.Batch;
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
@@ -965,17 +966,20 @@ public class TestUtil {
public static void addCoprocessor(Connection conn, String tableName, Class coprocessorClass) throws Exception {
int priority = QueryServicesOptions.DEFAULT_COPROCESSOR_PRIORITY + 100;
ConnectionQueryServices services = conn.unwrap(PhoenixConnection.class).getQueryServices();
- HTableDescriptor descriptor = services.getTableDescriptor(Bytes.toBytes(tableName));
+ TableDescriptor descriptor = services.getTableDescriptor(Bytes.toBytes(tableName));
+ TableDescriptorBuilder descriptorBuilder = null;
if (!descriptor.getCoprocessors().contains(coprocessorClass.getName())) {
- descriptor.addCoprocessor(coprocessorClass.getName(), null, priority, null);
+ descriptorBuilder=TableDescriptorBuilder.newBuilder(descriptor);
+ descriptorBuilder.addCoprocessor(coprocessorClass.getName(), null, priority, null);
}else{
return;
}
final int retries = 10;
int numTries = 10;
+ descriptor = descriptorBuilder.build();
try (Admin admin = services.getAdmin()) {
- admin.modifyTable(TableName.valueOf(tableName), descriptor);
- while (!admin.getTableDescriptor(TableName.valueOf(tableName)).equals(descriptor)
+ admin.modifyTable(descriptor);
+ while (!admin.getDescriptor(TableName.valueOf(tableName)).equals(descriptor)
&& numTries > 0) {
numTries--;
if (numTries == 0) {
@@ -990,17 +994,20 @@ public class TestUtil {
public static void removeCoprocessor(Connection conn, String tableName, Class coprocessorClass) throws Exception {
ConnectionQueryServices services = conn.unwrap(PhoenixConnection.class).getQueryServices();
- HTableDescriptor descriptor = services.getTableDescriptor(Bytes.toBytes(tableName));
+ TableDescriptor descriptor = services.getTableDescriptor(Bytes.toBytes(tableName));
+ TableDescriptorBuilder descriptorBuilder = null;
if (descriptor.getCoprocessors().contains(coprocessorClass.getName())) {
- descriptor.removeCoprocessor(coprocessorClass.getName());
+ descriptorBuilder=TableDescriptorBuilder.newBuilder(descriptor);
+ descriptorBuilder.removeCoprocessor(coprocessorClass.getName());
}else{
return;
}
final int retries = 10;
int numTries = retries;
+ descriptor = descriptorBuilder.build();
try (Admin admin = services.getAdmin()) {
- admin.modifyTable(TableName.valueOf(tableName), descriptor);
- while (!admin.getTableDescriptor(TableName.valueOf(tableName)).equals(descriptor)
+ admin.modifyTable(descriptor);
+ while (!admin.getDescriptor(TableName.valueOf(tableName)).equals(descriptor)
&& numTries > 0) {
numTries--;
if (numTries == 0) {
[3/8] phoenix git commit: PHOENIX-4304 Refactoring to avoid using
deprecated HTableDescriptor, HColumnDescriptor,
HRegionInfo( Rajeshbabu Chintaguntla)
Posted by an...@apache.org.
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanAfterManualSplitIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanAfterManualSplitIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanAfterManualSplitIT.java
index 658ef92..26b4f5c 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanAfterManualSplitIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanAfterManualSplitIT.java
@@ -30,9 +30,9 @@ import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.Properties;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.query.ConnectionQueryServices;
@@ -69,7 +69,7 @@ public class SkipScanAfterManualSplitIT extends ParallelStatsDisabledIT {
Connection conn = getConnection();
conn.createStatement().execute("CREATE TABLE " + tableName + "("
+ "a VARCHAR PRIMARY KEY, b VARCHAR) "
- + HTableDescriptor.MAX_FILESIZE + "=" + MAX_FILESIZE + ","
+ + TableDescriptorBuilder.MAX_FILESIZE + "=" + MAX_FILESIZE + ","
+ " SALT_BUCKETS = 4");
PreparedStatement stmt = conn.prepareStatement("UPSERT INTO " + tableName + " VALUES(?,?)");
int rowCount = 0;
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/it/java/org/apache/phoenix/end2end/UseSchemaIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UseSchemaIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UseSchemaIT.java
index 92871aa..8f17281 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UseSchemaIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UseSchemaIT.java
@@ -27,15 +27,13 @@ import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.Properties;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.phoenix.exception.SQLExceptionCode;
import org.apache.phoenix.query.QueryConstants;
import org.apache.phoenix.query.QueryServices;
@@ -170,8 +168,8 @@ public class UseSchemaIT extends ParallelStatsDisabledIT {
Connection conn = DriverManager.getConnection(getUrl(), props);
Admin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin();
admin.createNamespace(NamespaceDescriptor.create(schema).build());
- admin.createTable(new HTableDescriptor(fullTablename)
- .addFamily(new HColumnDescriptor(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES)));
+ admin.createTable(TableDescriptorBuilder.newBuilder(TableName.valueOf(fullTablename)).
+ addColumnFamily(ColumnFamilyDescriptorBuilder.of(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES)).build());
Put put = new Put(PVarchar.INSTANCE.toBytes(fullTablename));
put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES,
QueryConstants.EMPTY_COLUMN_VALUE_BYTES);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
index 94f306f..4b64a09 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
@@ -36,10 +36,10 @@ import java.sql.SQLException;
import java.util.List;
import java.util.Properties;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.phoenix.compile.QueryPlan;
import org.apache.phoenix.exception.SQLExceptionCode;
import org.apache.phoenix.jdbc.PhoenixConnection;
@@ -729,9 +729,9 @@ public class ViewIT extends BaseViewIT {
// test for a view that is in non-default schema
{
- HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(NS, TBL));
- desc.addFamily(new HColumnDescriptor(CF));
- admin.createTable(desc);
+ TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(TableName.valueOf(NS, TBL));
+ builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(CF));
+ admin.createTable(builder.build());
String view = NS + "." + TBL;
conn.createStatement().execute(
@@ -746,9 +746,9 @@ public class ViewIT extends BaseViewIT {
// test for a view whose name contains a dot (e.g. "AAA.BBB") in default schema (for backward compatibility)
{
- HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(NS + "." + TBL));
- desc.addFamily(new HColumnDescriptor(CF));
- admin.createTable(desc);
+ TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(TableName.valueOf(NS + "." + TBL));
+ builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(CF));
+ admin.createTable(builder.build());
String view = "\"" + NS + "." + TBL + "\"";
conn.createStatement().execute(
@@ -763,9 +763,9 @@ public class ViewIT extends BaseViewIT {
// test for a view whose name contains a dot (e.g. "AAA.BBB") in non-default schema
{
- HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(NS, NS + "." + TBL));
- desc.addFamily(new HColumnDescriptor(CF));
- admin.createTable(desc);
+ TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(TableName.valueOf(NS, NS + "." + TBL));
+ builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(CF));
+ admin.createTable(builder.build());
String view = NS + ".\"" + NS + "." + TBL + "\"";
conn.createStatement().execute(
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/BaseIndexIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/BaseIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/BaseIndexIT.java
index 7b060e3..3fd6b3b 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/BaseIndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/BaseIndexIT.java
@@ -42,14 +42,14 @@ import java.util.Random;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellScanner;
+import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.ipc.PhoenixRpcSchedulerFactory;
import org.apache.phoenix.compile.ColumnResolver;
import org.apache.phoenix.compile.FromCompiler;
@@ -967,7 +967,7 @@ public abstract class BaseIndexIT extends ParallelStatsDisabledIT {
CellScanner cellScanner = result.cellScanner();
while (cellScanner.advance()) {
Cell current = cellScanner.current();
- assertEquals (KeyValue.Type.Put.getCode(), current.getTypeByte());
+ assertTrue(CellUtil.isPut(current));
}
}
};
@@ -1059,7 +1059,7 @@ public abstract class BaseIndexIT extends ParallelStatsDisabledIT {
String schemaName = rs.getString(PhoenixDatabaseMetaData.TABLE_SCHEM);
String tName = rs.getString(PhoenixDatabaseMetaData.TABLE_NAME);
org.apache.hadoop.hbase.TableName hbaseTableName = SchemaUtil.getPhysicalTableName(SchemaUtil.getTableName(schemaName, tName), p);
- HTableDescriptor htd = admin.getTableDescriptor(hbaseTableName);
+ TableDescriptor htd = admin.getDescriptor(hbaseTableName);
String val = htd.getValue("PRIORITY");
assertNotNull("PRIORITY is not set for table:" + htd, val);
assertTrue(Integer.parseInt(val)
@@ -1078,13 +1078,13 @@ public abstract class BaseIndexIT extends ParallelStatsDisabledIT {
stmt.execute(ddl);
}
- HTableDescriptor dataTable = admin.getTableDescriptor(
+ TableDescriptor dataTable = admin.getDescriptor(
org.apache.hadoop.hbase.TableName.valueOf(fullTableName));
String val = dataTable.getValue("PRIORITY");
assertTrue(val == null || Integer.parseInt(val) < HConstants.HIGH_QOS);
if (!localIndex && mutable) {
- HTableDescriptor indexTable = admin.getTableDescriptor(
+ TableDescriptor indexTable = admin.getDescriptor(
org.apache.hadoop.hbase.TableName.valueOf(indexName));
val = indexTable.getValue("PRIORITY");
assertNotNull("PRIORITY is not set for table:" + indexTable, val);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/DropMetadataIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/DropMetadataIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/DropMetadataIT.java
index 550e9e2..71a9f00 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/DropMetadataIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/DropMetadataIT.java
@@ -23,9 +23,12 @@ import java.sql.Connection;
import java.sql.DriverManager;
import java.util.Properties;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.KeepDeletedCells;
+import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.end2end.ParallelStatsDisabledIT;
@@ -65,12 +68,11 @@ public class DropMetadataIT extends ParallelStatsDisabledIT {
byte[] hbaseNativeBytes = SchemaUtil.getTableNameAsBytes(HBASE_NATIVE_SCHEMA_NAME, hbaseNativeViewName);
try {
- @SuppressWarnings("deprecation")
- HTableDescriptor descriptor = new HTableDescriptor(hbaseNativeBytes);
- HColumnDescriptor columnDescriptor = new HColumnDescriptor(FAMILY_NAME);
- columnDescriptor.setKeepDeletedCells(true);
- descriptor.addFamily(columnDescriptor);
- admin.createTable(descriptor);
+ TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(TableName.valueOf(hbaseNativeBytes));
+ ColumnFamilyDescriptor columnDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(FAMILY_NAME)
+ .setKeepDeletedCells(KeepDeletedCells.TRUE).build();
+ builder.addColumnFamily(columnDescriptor);
+ admin.createTable(builder.build());
} finally {
admin.close();
}
@@ -82,7 +84,7 @@ public class DropMetadataIT extends ParallelStatsDisabledIT {
" \"1\".uint_col unsigned_int," +
" \"1\".ulong_col unsigned_long" +
" CONSTRAINT pk PRIMARY KEY (uint_key, ulong_key, string_key))\n" +
- HColumnDescriptor.DATA_BLOCK_ENCODING + "='" + DataBlockEncoding.NONE + "'");
+ ColumnFamilyDescriptorBuilder.DATA_BLOCK_ENCODING + "='" + DataBlockEncoding.NONE + "'");
conn.createStatement().execute("drop view " + hbaseNativeViewName);
conn.close();
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
index 04f34c6..41616f2 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
@@ -33,7 +33,6 @@ import java.sql.DriverManager;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
-import java.util.Collection;
import java.util.Iterator;
import java.util.List;
@@ -41,18 +40,18 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.Pair;
@@ -177,9 +176,9 @@ public class LocalIndexIT extends BaseLocalIndexIT {
conn1.createStatement().execute("CREATE LOCAL INDEX " + indexName + " ON " + tableName + "(v1)");
conn2.createStatement().executeQuery("SELECT * FROM " + tableName).next();
Admin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin();
- HTableDescriptor htd = admin
- .getTableDescriptor(TableName.valueOf(indexPhysicalTableName));
- assertEquals(IndexRegionSplitPolicy.class.getName(), htd.getValue(HTableDescriptor.SPLIT_POLICY));
+ TableDescriptor htd = admin
+ .getDescriptor(TableName.valueOf(indexPhysicalTableName));
+ assertEquals(IndexRegionSplitPolicy.class.getName(), htd.getValue(TableDescriptorBuilder.SPLIT_POLICY));
try(org.apache.hadoop.hbase.client.Connection c = ConnectionFactory.createConnection(admin.getConfiguration())) {
try (RegionLocator userTable= c.getRegionLocator(SchemaUtil.getPhysicalTableName(tableName.getBytes(), isNamespaceMapped))) {
try (RegionLocator indxTable = c.getRegionLocator(TableName.valueOf(indexPhysicalTableName))) {
@@ -443,8 +442,8 @@ public class LocalIndexIT extends BaseLocalIndexIT {
Scan s = new Scan();
s.setStartRow(startKeys[i]);
s.setStopRow(endKeys[i]);
- Collection<HColumnDescriptor> families = table.getTableDescriptor().getFamilies();
- for(HColumnDescriptor cf: families) {
+ ColumnFamilyDescriptor[] families = table.getDescriptor().getColumnFamilies();
+ for(ColumnFamilyDescriptor cf: families) {
if(cf.getNameAsString().startsWith(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX)){
s.addFamily(cf.getName());
}
@@ -607,7 +606,7 @@ public class LocalIndexIT extends BaseLocalIndexIT {
ResultSet rs = statement.executeQuery("SELECT COUNT(*) FROM " + indexName);
assertTrue(rs.next());
assertEquals(2000, rs.getLong(1));
- List<HRegionInfo> tableRegions = admin.getTableRegions(TableName.valueOf(tableName));
+ List<RegionInfo> tableRegions = admin.getRegions(TableName.valueOf(tableName));
admin.disableTable(TableName.valueOf(tableName));
copyLocalIndexHFiles(config, tableRegions.get(0), tableRegions.get(1), false);
copyLocalIndexHFiles(config, tableRegions.get(3), tableRegions.get(0), false);
@@ -671,14 +670,14 @@ public class LocalIndexIT extends BaseLocalIndexIT {
conn1.close();
}
- private void copyLocalIndexHFiles(Configuration conf, HRegionInfo fromRegion, HRegionInfo toRegion, boolean move)
+ private void copyLocalIndexHFiles(Configuration conf, RegionInfo fromRegion, RegionInfo toRegion, boolean move)
throws IOException {
Path root = FSUtils.getRootDir(conf);
- Path seondRegion = new Path(HTableDescriptor.getTableDir(root, fromRegion.getTableName()) + Path.SEPARATOR
+ Path seondRegion = new Path(FSUtils.getTableDir(root, fromRegion.getTable()) + Path.SEPARATOR
+ fromRegion.getEncodedName() + Path.SEPARATOR + "L#0/");
Path hfilePath = FSUtils.getCurrentFileSystem(conf).listFiles(seondRegion, true).next().getPath();
- Path firstRegionPath = new Path(HTableDescriptor.getTableDir(root, toRegion.getTableName()) + Path.SEPARATOR
+ Path firstRegionPath = new Path(FSUtils.getTableDir(root, toRegion.getTable()) + Path.SEPARATOR
+ toRegion.getEncodedName() + Path.SEPARATOR + "L#0/");
FileSystem currentFileSystem = FSUtils.getCurrentFileSystem(conf);
assertTrue(FileUtil.copy(currentFileSystem, hfilePath, currentFileSystem, firstRegionPath, move, conf));
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
index 66fe338..a931084 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
@@ -23,7 +23,6 @@ import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
-import java.io.IOException;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.PreparedStatement;
@@ -37,13 +36,10 @@ import java.util.Properties;
import jline.internal.Log;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
-import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.phoenix.end2end.ParallelStatsDisabledIT;
import org.apache.phoenix.jdbc.PhoenixConnection;
@@ -51,7 +47,6 @@ import org.apache.phoenix.query.BaseTest;
import org.apache.phoenix.query.ConnectionQueryServices;
import org.apache.phoenix.query.QueryServices;
import org.apache.phoenix.schema.PTableKey;
-import org.apache.phoenix.util.ByteUtil;
import org.apache.phoenix.util.PhoenixRuntime;
import org.apache.phoenix.util.PropertiesUtil;
import org.apache.phoenix.util.QueryUtil;
@@ -622,18 +617,6 @@ public class MutableIndexIT extends ParallelStatsDisabledIT {
}
}
- private void createTableAndLoadData(Connection conn1, String tableName, String indexName, String[] strings, boolean isReverse) throws SQLException {
- createBaseTable(conn1, tableName, null);
- for (int i = 0; i < 26; i++) {
- conn1.createStatement().execute(
- "UPSERT INTO " + tableName + " values('"+strings[i]+"'," + i + ","
- + (i + 1) + "," + (i + 2) + ",'" + strings[25 - i] + "')");
- }
- conn1.commit();
- conn1.createStatement().execute(
- "CREATE " + (localIndex ? "LOCAL" : "")+" INDEX " + indexName + " ON " + tableName + "(v1"+(isReverse?" DESC":"")+") include (k3)");
- }
-
@Test
public void testIndexHalfStoreFileReader() throws Exception {
Connection conn1 = getConnection();
@@ -658,7 +641,6 @@ public class MutableIndexIT extends ParallelStatsDisabledIT {
TableName indexTable = TableName.valueOf(localIndex?tableName: indexName);
admin.flush(indexTable);
boolean merged = false;
- Table table = connectionQueryServices.getTable(indexTable.getName());
// merge regions until 1 left
long numRegions = 0;
while (true) {
@@ -666,16 +648,16 @@ public class MutableIndexIT extends ParallelStatsDisabledIT {
assertTrue(rs.next());
assertEquals(4, rs.getInt(1)); //TODO this returns 5 sometimes instead of 4, duplicate results?
try {
- List<HRegionInfo> indexRegions = admin.getTableRegions(indexTable);
+ List<RegionInfo> indexRegions = admin.getRegions(indexTable);
numRegions = indexRegions.size();
if (numRegions==1) {
break;
}
if(!merged) {
- List<HRegionInfo> regions =
- admin.getTableRegions(indexTable);
+ List<RegionInfo> regions =
+ admin.getRegions(indexTable);
Log.info("Merging: " + regions.size());
- admin.mergeRegions(regions.get(0).getEncodedNameAsBytes(),
+ admin.mergeRegionsAsync(regions.get(0).getEncodedNameAsBytes(),
regions.get(1).getEncodedNameAsBytes(), false);
merged = true;
Threads.sleep(10000);
@@ -686,7 +668,7 @@ public class MutableIndexIT extends ParallelStatsDisabledIT {
long waitStartTime = System.currentTimeMillis();
// wait until merge happened
while (System.currentTimeMillis() - waitStartTime < 10000) {
- List<HRegionInfo> regions = admin.getTableRegions(indexTable);
+ List<RegionInfo> regions = admin.getRegions(indexTable);
Log.info("Waiting:" + regions.size());
if (regions.size() < numRegions) {
break;
@@ -698,72 +680,6 @@ public class MutableIndexIT extends ParallelStatsDisabledIT {
}
}
-
- private List<HRegionInfo> splitDuringScan(Connection conn1, String tableName, String indexName, String[] strings, Admin admin, boolean isReverse)
- throws SQLException, IOException, InterruptedException {
- ResultSet rs;
-
- String query = "SELECT t_id,k1,v1 FROM " + tableName;
- rs = conn1.createStatement().executeQuery(query);
- String[] tIdColumnValues = new String[26];
- String[] v1ColumnValues = new String[26];
- int[] k1ColumnValue = new int[26];
- for (int j = 0; j < 5; j++) {
- assertTrue(rs.next());
- tIdColumnValues[j] = rs.getString("t_id");
- k1ColumnValue[j] = rs.getInt("k1");
- v1ColumnValues[j] = rs.getString("V1");
- }
-
- String[] splitKeys = new String[2];
- splitKeys[0] = strings[4];
- splitKeys[1] = strings[12];
-
- int[] splitInts = new int[2];
- splitInts[0] = 22;
- splitInts[1] = 4;
- List<HRegionInfo> regionsOfUserTable = null;
- for(int i = 0; i <=1; i++) {
- Threads.sleep(10000);
- if(localIndex) {
- admin.split(TableName.valueOf(tableName),
- ByteUtil.concat(Bytes.toBytes(splitKeys[i])));
- } else {
- admin.split(TableName.valueOf(indexName), ByteUtil.concat(Bytes.toBytes(splitInts[i])));
- }
- Thread.sleep(100);
- regionsOfUserTable =
- MetaTableAccessor.getTableRegions(getUtility().getZooKeeperWatcher(),
- admin.getConnection(), TableName.valueOf(localIndex?tableName:indexName),
- false);
-
- while (regionsOfUserTable.size() != (i+2)) {
- Thread.sleep(100);
- regionsOfUserTable =
- MetaTableAccessor.getTableRegions(getUtility().getZooKeeperWatcher(),
- admin.getConnection(),
- TableName.valueOf(localIndex?tableName:indexName), false);
- }
- assertEquals(i+2, regionsOfUserTable.size());
- }
- for (int j = 5; j < 26; j++) {
- assertTrue(rs.next());
- tIdColumnValues[j] = rs.getString("t_id");
- k1ColumnValue[j] = rs.getInt("k1");
- v1ColumnValues[j] = rs.getString("V1");
- }
- Arrays.sort(tIdColumnValues);
- Arrays.sort(v1ColumnValues);
- Arrays.sort(k1ColumnValue);
- assertTrue(Arrays.equals(strings, tIdColumnValues));
- assertTrue(Arrays.equals(strings, v1ColumnValues));
- for(int i=0;i<26;i++) {
- assertEquals(i, k1ColumnValue[i]);
- }
- assertFalse(rs.next());
- return regionsOfUserTable;
- }
-
private void createBaseTable(Connection conn, String tableName, String splits) throws SQLException {
String ddl = "CREATE TABLE " + tableName + " (t_id VARCHAR NOT NULL,\n" +
"k1 INTEGER NOT NULL,\n" +
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexReplicationIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexReplicationIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexReplicationIT.java
index b8b96ac..1a380b8 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexReplicationIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexReplicationIT.java
@@ -38,20 +38,23 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.client.replication.ReplicationAdmin;
+import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
-import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
import org.apache.phoenix.jdbc.PhoenixTestDriver;
import org.apache.phoenix.query.BaseTest;
@@ -91,8 +94,8 @@ public class MutableIndexReplicationIT extends BaseTest {
protected static Configuration conf1 = HBaseConfiguration.create();
protected static Configuration conf2;
- protected static ZooKeeperWatcher zkw1;
- protected static ZooKeeperWatcher zkw2;
+ protected static ZKWatcher zkw1;
+ protected static ZKWatcher zkw2;
protected static ReplicationAdmin admin;
@@ -122,7 +125,6 @@ public class MutableIndexReplicationIT extends BaseTest {
conf1.setLong("hbase.master.logcleaner.ttl", 10);
conf1.setInt("zookeeper.recovery.retry", 1);
conf1.setInt("zookeeper.recovery.retry.intervalmill", 10);
- conf1.setBoolean(HConstants.REPLICATION_ENABLE_KEY, HConstants.REPLICATION_ENABLE_DEFAULT);
conf1.setBoolean("dfs.support.append", true);
conf1.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100);
conf1.setInt("replication.stats.thread.period.seconds", 5);
@@ -134,7 +136,7 @@ public class MutableIndexReplicationIT extends BaseTest {
// Have to reset conf1 in case zk cluster location different
// than default
conf1 = utility1.getConfiguration();
- zkw1 = new ZooKeeperWatcher(conf1, "cluster1", null, true);
+ zkw1 = new ZKWatcher(conf1, "cluster1", null, true);
admin = new ReplicationAdmin(conf1);
LOG.info("Setup first Zk");
@@ -142,16 +144,15 @@ public class MutableIndexReplicationIT extends BaseTest {
conf2 = HBaseConfiguration.create(conf1);
conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
conf2.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6);
- conf2.setBoolean(HConstants.REPLICATION_ENABLE_KEY, HConstants.REPLICATION_ENABLE_DEFAULT);
conf2.setBoolean("dfs.support.append", true);
conf2.setBoolean("hbase.tests.use.shortcircuit.reads", false);
utility2 = new HBaseTestingUtility(conf2);
utility2.setZkCluster(miniZK);
- zkw2 = new ZooKeeperWatcher(conf2, "cluster2", null, true);
+ zkw2 = new ZKWatcher(conf2, "cluster2", null, true);
//replicate from cluster 1 -> cluster 2, but not back again
- admin.addPeer("1", utility2.getClusterKey());
+ admin.addPeer("1", new ReplicationPeerConfig().setClusterKey(utility2.getClusterKey()),null);
LOG.info("Setup second Zk");
utility1.startMiniCluster(2);
@@ -193,28 +194,27 @@ public class MutableIndexReplicationIT extends BaseTest {
assertFalse(rs.next());
// make sure the data tables are created on the remote cluster
- Admin admin = utility1.getHBaseAdmin();
- Admin admin2 = utility2.getHBaseAdmin();
+ Admin admin = utility1.getAdmin();
+ Admin admin2 = utility2.getAdmin();
List<String> dataTables = new ArrayList<String>();
dataTables.add(DATA_TABLE_FULL_NAME);
dataTables.add(INDEX_TABLE_FULL_NAME);
for (String tableName : dataTables) {
- HTableDescriptor desc = admin.getTableDescriptor(TableName.valueOf(tableName));
+ TableDescriptor desc = admin.getDescriptor(TableName.valueOf(tableName));
//create it as-is on the remote cluster
admin2.createTable(desc);
LOG.info("Enabling replication on source table: "+tableName);
- HColumnDescriptor[] cols = desc.getColumnFamilies();
+ ColumnFamilyDescriptor[] cols = desc.getColumnFamilies();
assertEquals(1, cols.length);
// add the replication scope to the column
- HColumnDescriptor col = desc.removeFamily(cols[0].getName());
- col.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
- desc.addFamily(col);
+ ColumnFamilyDescriptor col = ColumnFamilyDescriptorBuilder.newBuilder(cols[0].getName()).setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build();
+ desc=TableDescriptorBuilder.newBuilder(desc).addColumnFamily(col).build();
//disable/modify/enable table so it has replication enabled
admin.disableTable(desc.getTableName());
- admin.modifyTable(TableName.valueOf(tableName), desc);
+ admin.modifyTable(desc);
admin.enableTable(desc.getTableName());
LOG.info("Replication enabled on source table: "+tableName);
}
@@ -250,7 +250,7 @@ public class MutableIndexReplicationIT extends BaseTest {
for (int i = 0; i < REPLICATION_RETRIES; i++) {
if (i >= REPLICATION_RETRIES - 1) {
fail("Waited too much time for put replication on table " + remoteTable
- .getTableDescriptor().getNameAsString());
+ .getDescriptor().getTableName());
}
if (ensureAnyRows(remoteTable)) {
break;
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexSplitIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexSplitIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexSplitIT.java
index 902a83e..4d0e56f 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexSplitIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexSplitIT.java
@@ -32,10 +32,10 @@ import java.util.Collection;
import java.util.List;
import java.util.Properties;
-import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.phoenix.end2end.ParallelStatsDisabledIT;
@@ -103,7 +103,7 @@ public abstract class MutableIndexSplitIT extends ParallelStatsDisabledIT {
"CREATE " + (localIndex ? "LOCAL" : "")+" INDEX " + indexName + " ON " + tableName + "(v1"+(isReverse?" DESC":"")+") include (k3)");
}
- private List<HRegionInfo> splitDuringScan(Connection conn1, String tableName, String indexName, String[] strings, Admin admin, boolean isReverse)
+ private List<RegionInfo> splitDuringScan(Connection conn1, String tableName, String indexName, String[] strings, Admin admin, boolean isReverse)
throws SQLException, IOException, InterruptedException {
ResultSet rs;
@@ -126,7 +126,7 @@ public abstract class MutableIndexSplitIT extends ParallelStatsDisabledIT {
int[] splitInts = new int[2];
splitInts[0] = 22;
splitInts[1] = 4;
- List<HRegionInfo> regionsOfUserTable = null;
+ List<RegionInfo> regionsOfUserTable = null;
for(int i = 0; i <=1; i++) {
Threads.sleep(10000);
if(localIndex) {
@@ -137,16 +137,14 @@ public abstract class MutableIndexSplitIT extends ParallelStatsDisabledIT {
}
Thread.sleep(100);
regionsOfUserTable =
- MetaTableAccessor.getTableRegions(getUtility().getZooKeeperWatcher(),
- admin.getConnection(), TableName.valueOf(localIndex?tableName:indexName),
- false);
+ MetaTableAccessor.getTableRegions(admin.getConnection(),
+ TableName.valueOf(localIndex ? tableName : indexName), false);
while (regionsOfUserTable.size() != (i+2)) {
Thread.sleep(100);
regionsOfUserTable =
- MetaTableAccessor.getTableRegions(getUtility().getZooKeeperWatcher(),
- admin.getConnection(),
- TableName.valueOf(localIndex?tableName:indexName), false);
+ MetaTableAccessor.getTableRegions(admin.getConnection(),
+ TableName.valueOf(localIndex ? tableName : indexName), false);
}
assertEquals(i+2, regionsOfUserTable.size());
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/it/java/org/apache/phoenix/execute/UpsertSelectOverlappingBatchesIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/execute/UpsertSelectOverlappingBatchesIT.java b/phoenix-core/src/it/java/org/apache/phoenix/execute/UpsertSelectOverlappingBatchesIT.java
index 59ed0d0..e0c8484 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/execute/UpsertSelectOverlappingBatchesIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/execute/UpsertSelectOverlappingBatchesIT.java
@@ -39,12 +39,12 @@ import org.apache.commons.lang.exception.ExceptionUtils;
import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.NotServingRegionException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Waiter;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver;
@@ -205,14 +205,14 @@ public class UpsertSelectOverlappingBatchesIT extends BaseUniqueNamesOwnClusterI
// keep trying to split the region
final HBaseTestingUtility utility = getUtility();
- final Admin admin = utility.getHBaseAdmin();
+ final Admin admin = utility.getAdmin();
final TableName dataTN = TableName.valueOf(dataTable);
assertEquals(1, utility.getHBaseCluster().getRegions(dataTN).size());
utility.waitFor(60000L, 1000, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
try {
- List<HRegionInfo> regions = admin.getTableRegions(dataTN);
+ List<RegionInfo> regions = admin.getRegions(dataTN);
if (regions.size() > 1) {
logger.info("Found region was split");
return true;
@@ -223,9 +223,9 @@ public class UpsertSelectOverlappingBatchesIT extends BaseUniqueNamesOwnClusterI
return false;
}
;
- HRegionInfo hRegion = regions.get(0);
+ RegionInfo hRegion = regions.get(0);
logger.info("Attempting to split region");
- admin.splitRegion(hRegion.getRegionName(), Bytes.toBytes(2));
+ admin.splitRegionAsync(hRegion.getRegionName(), Bytes.toBytes(2));
return false;
} catch (NotServingRegionException nsre) {
// during split
@@ -260,18 +260,18 @@ public class UpsertSelectOverlappingBatchesIT extends BaseUniqueNamesOwnClusterI
final HBaseTestingUtility utility = getUtility();
// try to close the region while UPSERT SELECTs are happening,
final HRegionServer dataRs = utility.getHBaseCluster().getRegionServer(0);
- final Admin admin = utility.getHBaseAdmin();
- final HRegionInfo dataRegion =
- admin.getTableRegions(TableName.valueOf(dataTable)).get(0);
+ final Admin admin = utility.getAdmin();
+ final RegionInfo dataRegion =
+ admin.getRegions(TableName.valueOf(dataTable)).get(0);
logger.info("Closing data table region");
- admin.closeRegion(dataRs.getServerName(), dataRegion);
+ admin.unassign(dataRegion.getEncodedNameAsBytes(), true);
// make sure the region is offline
utility.waitFor(60000L, 1000, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
- List<HRegionInfo> onlineRegions =
- admin.getOnlineRegions(dataRs.getServerName());
- for (HRegionInfo onlineRegion : onlineRegions) {
+ List<RegionInfo> onlineRegions =
+ admin.getRegions(dataRs.getServerName());
+ for (RegionInfo onlineRegion : onlineRegions) {
if (onlineRegion.equals(dataRegion)) {
logger.info("Data region still online");
return false;
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/FailForUnsupportedHBaseVersionsIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/FailForUnsupportedHBaseVersionsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/FailForUnsupportedHBaseVersionsIT.java
index 7c6de68..beb4762 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/FailForUnsupportedHBaseVersionsIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/FailForUnsupportedHBaseVersionsIT.java
@@ -27,10 +27,12 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.VersionInfo;
@@ -122,12 +124,12 @@ public class FailForUnsupportedHBaseVersionsIT {
try {
// setup the primary table
- @SuppressWarnings("deprecation")
- HTableDescriptor desc = new HTableDescriptor(
- "testDoesNotStartRegionServerForUnsupportedCompressionAndVersion");
+ TableDescriptorBuilder descBuilder = TableDescriptorBuilder.newBuilder(TableName.valueOf(
+ "testDoesNotStartRegionServerForUnsupportedCompressionAndVersion"));
byte[] family = Bytes.toBytes("f");
- desc.addFamily(new HColumnDescriptor(family));
-
+
+ descBuilder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(family));
+ TableDescriptor desc=descBuilder.build();
// enable indexing to a non-existant index table
String indexTableName = "INDEX_TABLE";
ColumnGroup fam1 = new ColumnGroup(indexTableName);
@@ -140,7 +142,7 @@ public class FailForUnsupportedHBaseVersionsIT {
HRegionServer server = util.getMiniHBaseCluster().getRegionServer(0);
// create the primary table
- Admin admin = util.getHBaseAdmin();
+ Admin admin = util.getAdmin();
if (supported) {
admin.createTable(desc);
assertFalse("Hosting regeion server failed, even the HBase version (" + version
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/it/java/org/apache/phoenix/iterate/RoundRobinResultIteratorIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/iterate/RoundRobinResultIteratorIT.java b/phoenix-core/src/it/java/org/apache/phoenix/iterate/RoundRobinResultIteratorIT.java
index b0c2cb4..35492cc 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/iterate/RoundRobinResultIteratorIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/iterate/RoundRobinResultIteratorIT.java
@@ -37,9 +37,9 @@ import java.util.Set;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.compile.StatementContext;
import org.apache.phoenix.end2end.ParallelStatsDisabledIT;
@@ -240,7 +240,7 @@ public class RoundRobinResultIteratorIT extends ParallelStatsDisabledIT {
Connection conn = getConnection();
conn.createStatement().execute("CREATE TABLE " + tableName + "("
+ "a VARCHAR PRIMARY KEY, b VARCHAR) "
- + HTableDescriptor.MAX_FILESIZE + "=" + maxFileSize + ","
+ + TableDescriptorBuilder.MAX_FILESIZE + "=" + maxFileSize + ","
+ " SALT_BUCKETS = " + NUM_SALT_BUCKETS);
PreparedStatement stmt = conn.prepareStatement("UPSERT INTO " + tableName + " VALUES(?,?)");
int rowCount = 0;
@@ -309,7 +309,6 @@ public class RoundRobinResultIteratorIT extends ParallelStatsDisabledIT {
@Test
public void testBug2074() throws Exception {
- Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
Connection conn = getConnection();
try {
conn.createStatement().execute("CREATE TABLE EVENTS"
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java b/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java
index bde8aeb..6167259 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java
@@ -34,14 +34,14 @@ import java.util.Map;
import java.util.Properties;
import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.ipc.CallRunner;
-import org.apache.hadoop.hbase.master.AssignmentManager;
import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.regionserver.RSRpcServices;
import org.apache.hadoop.hbase.util.Bytes;
@@ -216,13 +216,13 @@ public class PhoenixServerRpcIT extends BaseUniqueNamesOwnClusterIT {
AssignmentManager am = master.getAssignmentManager();
// verify there is only a single region for data table
- List<HRegionInfo> tableRegions = admin.getTableRegions(TableName.valueOf(table1));
+ List<RegionInfo> tableRegions = admin.getRegions(TableName.valueOf(table1));
assertEquals("Expected single region for " + table1, tableRegions.size(), 1);
- HRegionInfo hri1 = tableRegions.get(0);
+ RegionInfo hri1 = tableRegions.get(0);
// verify there is only a single region for index table
- tableRegions = admin.getTableRegions(TableName.valueOf(table2));
- HRegionInfo hri2 = tableRegions.get(0);
+ tableRegions = admin.getRegions(TableName.valueOf(table2));
+ RegionInfo hri2 = tableRegions.get(0);
assertEquals("Expected single region for " + table2, tableRegions.size(), 1);
ServerName serverName1 = am.getRegionStates().getRegionServerOfRegion(hri1);
@@ -246,15 +246,15 @@ public class PhoenixServerRpcIT extends BaseUniqueNamesOwnClusterIT {
while (dstServer.getOnlineRegion(hri2.getRegionName()) == null
|| dstServer.getRegionsInTransitionInRS().containsKey(encodedRegionNameInBytes)
|| srcServer.getRegionsInTransitionInRS().containsKey(encodedRegionNameInBytes)
- || master.getAssignmentManager().getRegionStates().isRegionsInTransition()) {
+ || master.getAssignmentManager().getRegionStates().isRegionInTransition(hri2)) {
// wait for the move to be finished
Thread.sleep(1);
}
}
- hri1 = admin.getTableRegions(TableName.valueOf(table1)).get(0);
+ hri1 = admin.getRegions(TableName.valueOf(table1)).get(0);
serverName1 = am.getRegionStates().getRegionServerOfRegion(hri1);
- hri2 = admin.getTableRegions(TableName.valueOf(table2)).get(0);
+ hri2 = admin.getRegions(TableName.valueOf(table2)).get(0);
serverName2 = am.getRegionStates().getRegionServerOfRegion(hri2);
// verify index and data tables are on different servers
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/it/java/org/apache/phoenix/schema/stats/StatsCollectorIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/schema/stats/StatsCollectorIT.java b/phoenix-core/src/it/java/org/apache/phoenix/schema/stats/StatsCollectorIT.java
index 76e3e8e..5aa97ab 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/schema/stats/StatsCollectorIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/schema/stats/StatsCollectorIT.java
@@ -38,14 +38,15 @@ import java.util.Map;
import java.util.Properties;
import java.util.Random;
-import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.coprocessor.UngroupedAggregateRegionObserver;
@@ -429,7 +430,7 @@ public abstract class StatsCollectorIT extends BaseUniqueNamesOwnClusterIT {
PreparedStatement stmt;
conn.createStatement().execute("CREATE TABLE " + tableName + "(k CHAR(1) PRIMARY KEY, v INTEGER, w INTEGER) "
+ (!tableDDLOptions.isEmpty() ? tableDDLOptions + "," : "")
- + HColumnDescriptor.KEEP_DELETED_CELLS + "=" + Boolean.FALSE);
+ + ColumnFamilyDescriptorBuilder.KEEP_DELETED_CELLS + "=" + Boolean.FALSE);
stmt = conn.prepareStatement("UPSERT INTO " + tableName + " VALUES(?,?,?)");
for (int i = 0; i < nRows; i++) {
stmt.setString(1, Character.toString((char) ('a' + i)));
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/it/java/org/apache/phoenix/tx/ParameterizedTransactionIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/tx/ParameterizedTransactionIT.java b/phoenix-core/src/it/java/org/apache/phoenix/tx/ParameterizedTransactionIT.java
index cf08d63..78c3bd2 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/tx/ParameterizedTransactionIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/tx/ParameterizedTransactionIT.java
@@ -16,8 +16,8 @@
* limitations under the License.
*/
package org.apache.phoenix.tx;
-import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
import static org.apache.phoenix.util.TestUtil.INDEX_DATA_SCHEMA;
+import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
@@ -35,12 +35,13 @@ import java.util.Collection;
import java.util.List;
import java.util.Properties;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.coprocessor.PhoenixTransactionalProcessor;
import org.apache.phoenix.end2end.ParallelStatsDisabledIT;
@@ -280,9 +281,9 @@ public class ParameterizedTransactionIT extends ParallelStatsDisabledIT {
conn.createStatement().execute("ALTER TABLE " + nonTxTableName + " SET TRANSACTIONAL=true");
htable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes( nonTxTableName));
- assertTrue(htable.getTableDescriptor().getCoprocessors().contains(PhoenixTransactionalProcessor.class.getName()));
+ assertTrue(htable.getDescriptor().getCoprocessors().contains(PhoenixTransactionalProcessor.class.getName()));
htable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(index));
- assertTrue(htable.getTableDescriptor().getCoprocessors().contains(PhoenixTransactionalProcessor.class.getName()));
+ assertTrue(htable.getDescriptor().getCoprocessors().contains(PhoenixTransactionalProcessor.class.getName()));
conn.createStatement().execute("UPSERT INTO " + nonTxTableName + " VALUES (4, 'c')");
ResultSet rs = conn.createStatement().executeQuery("SELECT /*+ NO_INDEX */ k FROM " + nonTxTableName + " WHERE v IS NULL");
@@ -357,10 +358,10 @@ public class ParameterizedTransactionIT extends ParallelStatsDisabledIT {
assertFalse(rs.next());
htable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes("SYSTEM." + nonTxTableName));
- assertFalse(htable.getTableDescriptor().getCoprocessors().contains(PhoenixTransactionalProcessor.class.getName()));
+ assertFalse(htable.getDescriptor().getCoprocessors().contains(PhoenixTransactionalProcessor.class.getName()));
assertEquals(1,conn.unwrap(PhoenixConnection.class).getQueryServices().
getTableDescriptor(Bytes.toBytes("SYSTEM." + nonTxTableName)).
- getFamily(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES).getMaxVersions());
+ getColumnFamily(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES).getMaxVersions());
}
@Test
@@ -375,7 +376,7 @@ public class ParameterizedTransactionIT extends ParallelStatsDisabledIT {
PTable table = pconn.getTable(new PTableKey(null, t1));
Table htable = pconn.getQueryServices().getTable(Bytes.toBytes(t1));
assertTrue(table.isTransactional());
- assertTrue(htable.getTableDescriptor().getCoprocessors().contains(PhoenixTransactionalProcessor.class.getName()));
+ assertTrue(htable.getDescriptor().getCoprocessors().contains(PhoenixTransactionalProcessor.class.getName()));
try {
ddl = "ALTER TABLE " + t1 + " SET transactional=false";
@@ -386,14 +387,14 @@ public class ParameterizedTransactionIT extends ParallelStatsDisabledIT {
}
Admin admin = pconn.getQueryServices().getAdmin();
- HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(t2));
- desc.addFamily(new HColumnDescriptor(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES));
- admin.createTable(desc);
+
+ admin.createTable(TableDescriptorBuilder.newBuilder(TableName.valueOf(t2))
+ .addColumnFamily(ColumnFamilyDescriptorBuilder.of(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES)).build());
ddl = "CREATE TABLE " + t2 + " (k varchar primary key) transactional=true";
conn.createStatement().execute(ddl);
- HTableDescriptor htableDescriptor = admin.getTableDescriptor(TableName.valueOf(t2));
- String str = htableDescriptor.getValue(PhoenixTransactionContext.READ_NON_TX_DATA);
+ TableDescriptor tableDescriptor = admin.getDescriptor(TableName.valueOf(t2));
+ String str = tableDescriptor.getValue(PhoenixTransactionContext.READ_NON_TX_DATA);
assertEquals(Boolean.TRUE.toString(), str);
// Should be ok, as HBase metadata should match existing metadata.
@@ -409,7 +410,7 @@ public class ParameterizedTransactionIT extends ParallelStatsDisabledIT {
table = pconn.getTable(new PTableKey(null, t1));
htable = pconn.getQueryServices().getTable(Bytes.toBytes(t1));
assertTrue(table.isTransactional());
- assertTrue(htable.getTableDescriptor().getCoprocessors().contains(PhoenixTransactionalProcessor.class.getName()));
+ assertTrue(htable.getDescriptor().getCoprocessors().contains(PhoenixTransactionalProcessor.class.getName()));
}
@Test
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java b/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java
index 9286c2e..dcbc83e 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java
@@ -33,8 +33,9 @@ import java.sql.SQLException;
import java.sql.Statement;
import java.util.Properties;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.end2end.ParallelStatsDisabledIT;
import org.apache.phoenix.exception.SQLExceptionCode;
@@ -166,53 +167,53 @@ public class TransactionIT extends ParallelStatsDisabledIT {
conn.createStatement().execute("ALTER TABLE " + nonTxTableName + "1 SET TRANSACTIONAL=true");
- HTableDescriptor desc = conn.unwrap(PhoenixConnection.class).getQueryServices().getTableDescriptor(Bytes.toBytes(nonTxTableName + "1"));
- for (HColumnDescriptor colDesc : desc.getFamilies()) {
+ TableDescriptor desc = conn.unwrap(PhoenixConnection.class).getQueryServices().getTableDescriptor(Bytes.toBytes(nonTxTableName + "1"));
+ for (ColumnFamilyDescriptor colDesc : desc.getColumnFamilies()) {
assertEquals(QueryServicesOptions.DEFAULT_MAX_VERSIONS_TRANSACTIONAL, colDesc.getMaxVersions());
assertEquals(1000, colDesc.getTimeToLive());
- String propertyTTL = colDesc.getValue(PhoenixTransactionContext.PROPERTY_TTL);
- assertEquals(1000, Integer.parseInt(propertyTTL));
+ byte[] propertyTTL = colDesc.getValue(PhoenixTransactionContext.PROPERTY_TTL_BYTES);
+ assertEquals(1000, Bytes.toInt(propertyTTL));
}
desc = conn.unwrap(PhoenixConnection.class).getQueryServices().getTableDescriptor(Bytes.toBytes("IDX1"));
- for (HColumnDescriptor colDesc : desc.getFamilies()) {
+ for (ColumnFamilyDescriptor colDesc : desc.getColumnFamilies()) {
assertEquals(QueryServicesOptions.DEFAULT_MAX_VERSIONS_TRANSACTIONAL, colDesc.getMaxVersions());
assertEquals(1000, colDesc.getTimeToLive());
- String propertyTTL = colDesc.getValue(PhoenixTransactionContext.PROPERTY_TTL);
- assertEquals(1000, Integer.parseInt(propertyTTL));
+ byte[] propertyTTL = colDesc.getValue(PhoenixTransactionContext.PROPERTY_TTL_BYTES);
+ assertEquals(1000, Bytes.toInt(propertyTTL));
}
desc = conn.unwrap(PhoenixConnection.class).getQueryServices().getTableDescriptor(Bytes.toBytes("IDX2"));
- for (HColumnDescriptor colDesc : desc.getFamilies()) {
+ for (ColumnFamilyDescriptor colDesc : desc.getColumnFamilies()) {
assertEquals(QueryServicesOptions.DEFAULT_MAX_VERSIONS_TRANSACTIONAL, colDesc.getMaxVersions());
assertEquals(1000, colDesc.getTimeToLive());
- String propertyTTL = colDesc.getValue(PhoenixTransactionContext.PROPERTY_TTL);
- assertEquals(1000, Integer.parseInt(propertyTTL));
+ byte[] propertyTTL = colDesc.getValue(PhoenixTransactionContext.PROPERTY_TTL_BYTES);
+ assertEquals(1000, Bytes.toInt(propertyTTL));
}
conn.createStatement().execute("CREATE TABLE " + nonTxTableName + "2(k INTEGER PRIMARY KEY, a.v VARCHAR, b.v VARCHAR, c.v VARCHAR)");
conn.createStatement().execute("ALTER TABLE " + nonTxTableName + "2 SET TRANSACTIONAL=true, VERSIONS=10");
desc = conn.unwrap(PhoenixConnection.class).getQueryServices().getTableDescriptor(Bytes.toBytes( nonTxTableName + "2"));
- for (HColumnDescriptor colDesc : desc.getFamilies()) {
+ for (ColumnFamilyDescriptor colDesc : desc.getColumnFamilies()) {
assertEquals(10, colDesc.getMaxVersions());
- assertEquals(HColumnDescriptor.DEFAULT_TTL, colDesc.getTimeToLive());
- assertEquals(null, colDesc.getValue(PhoenixTransactionContext.PROPERTY_TTL));
+ assertEquals(ColumnFamilyDescriptorBuilder.DEFAULT_TTL, colDesc.getTimeToLive());
+ assertEquals(null, colDesc.getValue(PhoenixTransactionContext.PROPERTY_TTL_BYTES));
}
conn.createStatement().execute("ALTER TABLE " + nonTxTableName + "2 SET TTL=1000");
desc = conn.unwrap(PhoenixConnection.class).getQueryServices().getTableDescriptor(Bytes.toBytes( nonTxTableName + "2"));
- for (HColumnDescriptor colDesc : desc.getFamilies()) {
+ for (ColumnFamilyDescriptor colDesc : desc.getColumnFamilies()) {
assertEquals(10, colDesc.getMaxVersions());
assertEquals(1000, colDesc.getTimeToLive());
- String propertyTTL = colDesc.getValue(PhoenixTransactionContext.PROPERTY_TTL);
- assertEquals(1000, Integer.parseInt(propertyTTL));
+ byte[] propertyTTL = colDesc.getValue(PhoenixTransactionContext.PROPERTY_TTL_BYTES);
+ assertEquals(1000, Bytes.toInt(propertyTTL));
}
conn.createStatement().execute("CREATE TABLE " + nonTxTableName + "3(k INTEGER PRIMARY KEY, a.v VARCHAR, b.v VARCHAR, c.v VARCHAR)");
conn.createStatement().execute("ALTER TABLE " + nonTxTableName + "3 SET TRANSACTIONAL=true, b.VERSIONS=10, c.VERSIONS=20");
desc = conn.unwrap(PhoenixConnection.class).getQueryServices().getTableDescriptor(Bytes.toBytes( nonTxTableName + "3"));
- assertEquals(QueryServicesOptions.DEFAULT_MAX_VERSIONS_TRANSACTIONAL, desc.getFamily(Bytes.toBytes("A")).getMaxVersions());
- assertEquals(10, desc.getFamily(Bytes.toBytes("B")).getMaxVersions());
- assertEquals(20, desc.getFamily(Bytes.toBytes("C")).getMaxVersions());
+ assertEquals(QueryServicesOptions.DEFAULT_MAX_VERSIONS_TRANSACTIONAL, desc.getColumnFamily(Bytes.toBytes("A")).getMaxVersions());
+ assertEquals(10, desc.getColumnFamily(Bytes.toBytes("B")).getMaxVersions());
+ assertEquals(20, desc.getColumnFamily(Bytes.toBytes("C")).getMaxVersions());
conn.createStatement().execute("CREATE TABLE " + nonTxTableName + "4(k INTEGER PRIMARY KEY, a.v VARCHAR, b.v VARCHAR, c.v VARCHAR)");
try {
@@ -231,11 +232,11 @@ public class TransactionIT extends ParallelStatsDisabledIT {
conn.createStatement().execute("CREATE TABLE TX_TABLE1(k INTEGER PRIMARY KEY, v VARCHAR) TTL=1000, TRANSACTIONAL=true");
desc = conn.unwrap(PhoenixConnection.class).getQueryServices().getTableDescriptor(Bytes.toBytes("TX_TABLE1"));
- for (HColumnDescriptor colDesc : desc.getFamilies()) {
+ for (ColumnFamilyDescriptor colDesc : desc.getColumnFamilies()) {
assertEquals(QueryServicesOptions.DEFAULT_MAX_VERSIONS_TRANSACTIONAL, colDesc.getMaxVersions());
- assertEquals(HColumnDescriptor.DEFAULT_TTL, colDesc.getTimeToLive());
- String propertyTTL = colDesc.getValue(PhoenixTransactionContext.PROPERTY_TTL);
- assertEquals(1000, Integer.parseInt(propertyTTL));
+ assertEquals(ColumnFamilyDescriptorBuilder.DEFAULT_TTL, colDesc.getTimeToLive());
+ byte[] propertyTTL = colDesc.getValue(PhoenixTransactionContext.PROPERTY_TTL_BYTES);
+ assertEquals(1000, Bytes.toInt(propertyTTL));
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
index 3cb36ee..3a70f66 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
@@ -46,7 +46,6 @@ import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.coprocessor.RegionObserver;
import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
-import org.apache.hadoop.hbase.io.HalfStoreFileReader;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.io.Reference;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java b/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java
index e9b5b37..9b88b03 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java
@@ -248,15 +248,15 @@ public class ServerCacheClient {
Set<HRegionLocation> servers = new HashSet<HRegionLocation>(nRegions);
for (HRegionLocation entry : locations) {
// Keep track of servers we've sent to and only send once
- byte[] regionStartKey = entry.getRegionInfo().getStartKey();
- byte[] regionEndKey = entry.getRegionInfo().getEndKey();
+ byte[] regionStartKey = entry.getRegion().getStartKey();
+ byte[] regionEndKey = entry.getRegion().getEndKey();
if ( ! servers.contains(entry) &&
keyRanges.intersectRegion(regionStartKey, regionEndKey,
cacheUsingTable.getIndexType() == IndexType.LOCAL)) {
// Call RPC once per server
servers.add(entry);
if (LOG.isDebugEnabled()) {LOG.debug(addCustomAnnotations("Adding cache entry to be sent for " + entry, connection));}
- final byte[] key = getKeyInRegion(entry.getRegionInfo().getStartKey());
+ final byte[] key = getKeyInRegion(entry.getRegion().getStartKey());
final Table htable = services.getTable(cacheUsingTableRef.getTable().getPhysicalName().getBytes());
closeables.add(htable);
futures.add(executor.submit(new JobCallable<Boolean>() {
@@ -355,7 +355,7 @@ public class ServerCacheClient {
// Call once per server
if (remainingOnServers.contains(entry)) {
try {
- byte[] key = getKeyInRegion(entry.getRegionInfo().getStartKey());
+ byte[] key = getKeyInRegion(entry.getRegion().getStartKey());
iterateOverTable.coprocessorService(ServerCachingService.class, key, key,
new Batch.Call<ServerCachingService, RemoveServerCacheResponse>() {
@Override
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
index 9eaaf62..a0c0971 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
@@ -158,8 +158,8 @@ public class UpsertCompiler {
statement.getConnection().getQueryServices()
.getTableRegionLocation(table.getParentName().getBytes(), rowKey);
byte[] regionPrefix =
- region.getRegionInfo().getStartKey().length == 0 ? new byte[region
- .getRegionInfo().getEndKey().length] : region.getRegionInfo()
+ region.getRegion().getStartKey().length == 0 ? new byte[region
+ .getRegion().getEndKey().length] : region.getRegion()
.getStartKey();
if (regionPrefix.length != 0) {
ptr.set(ScanRanges.prefixKey(ptr.get(), 0, ptr.getLength(), regionPrefix,
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index 7f0be01..602df4b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -48,7 +48,6 @@ import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.CoprocessorEnvironment;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.TableName;
@@ -62,6 +61,7 @@ import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
@@ -464,7 +464,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
if (flushSize <= 0) {
flushSize = conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE,
- HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE);
+ TableDescriptorBuilder.DEFAULT_MEMSTORE_FLUSH_SIZE);
}
/**
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/main/java/org/apache/phoenix/execute/ClientAggregatePlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/ClientAggregatePlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/ClientAggregatePlan.java
index 8ef1f8d..3017231 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/ClientAggregatePlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/ClientAggregatePlan.java
@@ -25,7 +25,6 @@ import java.util.Collections;
import java.util.List;
import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.phoenix.compile.ExplainPlan;
@@ -204,7 +203,7 @@ public class ClientAggregatePlan extends ClientProcessingPlan {
}
@Override
- protected Tuple wrapKeyValueAsResult(KeyValue keyValue) {
+ protected Tuple wrapKeyValueAsResult(Cell keyValue) {
return new MultiKeyValueTuple(Collections.<Cell> singletonList(keyValue));
}
@@ -230,7 +229,7 @@ public class ClientAggregatePlan extends ClientProcessingPlan {
}
@Override
- protected Tuple wrapKeyValueAsResult(KeyValue keyValue)
+ protected Tuple wrapKeyValueAsResult(Cell keyValue)
throws SQLException {
return new MultiKeyValueTuple(Collections.<Cell> singletonList(keyValue));
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
index 729b928..cb8accf 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
@@ -40,8 +40,6 @@ import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CoprocessorEnvironment;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HConstants.OperationStatusCode;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.client.Delete;
@@ -49,7 +47,10 @@ import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Increment;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
@@ -329,7 +330,6 @@ public class Indexer implements RegionObserver, RegionCoprocessor {
// Causes the Increment to be ignored as we're committing the mutations
// ourselves below.
e.bypass();
- e.complete();
// ON DUPLICATE KEY IGNORE will return empty list if row already exists
// as no action is required in that case.
if (!mutations.isEmpty()) {
@@ -789,20 +789,20 @@ public class Indexer implements RegionObserver, RegionCoprocessor {
/**
* Enable indexing on the given table
- * @param desc {@link HTableDescriptor} for the table on which indexing should be enabled
+ * @param desc {@link TableDescriptor} for the table on which indexing should be enabled
* @param builder class to use when building the index for this table
* @param properties map of custom configuration options to make available to your
* {@link IndexBuilder} on the server-side
* @param priority TODO
* @throws IOException the Indexer coprocessor cannot be added
*/
- public static void enableIndexing(HTableDescriptor desc, Class<? extends IndexBuilder> builder,
+ public static void enableIndexing(TableDescriptorBuilder descBuilder, Class<? extends IndexBuilder> builder,
Map<String, String> properties, int priority) throws IOException {
if (properties == null) {
properties = new HashMap<String, String>();
}
properties.put(Indexer.INDEX_BUILDER_CONF_KEY, builder.getName());
- desc.addCoprocessor(Indexer.class.getName(), null, priority, properties);
+ descBuilder.addCoprocessor(Indexer.class.getName(), null, priority, properties);
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/NonTxIndexBuilder.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/NonTxIndexBuilder.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/NonTxIndexBuilder.java
index 4adc7b9..cc1c773 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/NonTxIndexBuilder.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/NonTxIndexBuilder.java
@@ -16,8 +16,6 @@ import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
index ceac999..d9abd75 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
@@ -113,7 +113,7 @@ public class ParallelWriterIndexCommitter implements IndexCommitter {
if (env != null
&& !allowLocalUpdates
&& tableReference.getTableName().equals(
- env.getRegion().getTableDesc().getNameAsString())) {
+ env.getRegion().getTableDescriptor().getTableName().getNameAsString())) {
continue;
}
/*
@@ -147,7 +147,7 @@ public class ParallelWriterIndexCommitter implements IndexCommitter {
if (allowLocalUpdates
&& env != null
&& tableReference.getTableName().equals(
- env.getRegion().getTableDesc().getNameAsString())) {
+ env.getRegion().getTableDescriptor().getTableName().getNameAsString())) {
try {
throwFailureIfDone();
IndexUtil.writeLocalUpdates(env.getRegion(), mutations, true);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java
index f427646..cf8279a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java
@@ -124,7 +124,7 @@ public class TrackingParallelWriterIndexCommitter implements IndexCommitter {
if (env != null
&& !allowLocalUpdates
&& tableReference.getTableName().equals(
- env.getRegion().getTableDesc().getNameAsString())) {
+ env.getRegion().getTableDescriptor().getTableName().getNameAsString())) {
continue;
}
tables.add(tableReference);
@@ -153,7 +153,7 @@ public class TrackingParallelWriterIndexCommitter implements IndexCommitter {
if (allowLocalUpdates
&& env != null
&& tableReference.getTableName().equals(
- env.getRegion().getTableDesc().getNameAsString())) {
+ env.getRegion().getTableDescriptor().getTableName().getNameAsString())) {
try {
throwFailureIfDone();
IndexUtil.writeLocalUpdates(env.getRegion(), mutations, true);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexCodec.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexCodec.java b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexCodec.java
index ffb199a..ef1b40a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexCodec.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexCodec.java
@@ -75,8 +75,10 @@ public class PhoenixIndexCodec extends BaseIndexCodec {
Pair<ValueGetter, IndexUpdate> statePair = state.getIndexUpdateState(maintainer.getAllColumns(), metaData.getReplayWrite() != null, false, context);
ValueGetter valueGetter = statePair.getFirst();
IndexUpdate indexUpdate = statePair.getSecond();
- indexUpdate.setTable(maintainer.isLocalIndex() ? state.getEnvironment().getRegion()
- .getTableDesc().getName() : maintainer.getIndexTableName());
+ indexUpdate
+ .setTable(maintainer.isLocalIndex() ? state.getEnvironment().getRegion()
+ .getTableDescriptor().getTableName().getName() : maintainer
+ .getIndexTableName());
Put put = maintainer.buildUpdateMutation(KV_BUILDER, valueGetter, ptr, state.getCurrentTimestamp(), env
.getRegion().getRegionInfo().getStartKey(), env.getRegion().getRegionInfo().getEndKey());
indexUpdate.setUpdate(put);
@@ -104,7 +106,8 @@ public class PhoenixIndexCodec extends BaseIndexCodec {
if (valueGetter!=null) {
IndexUpdate indexUpdate = statePair.getSecond();
indexUpdate.setTable(maintainer.isLocalIndex() ? state.getEnvironment().getRegion()
- .getTableDesc().getName() : maintainer.getIndexTableName());
+ .getTableDescriptor().getTableName().getName() : maintainer
+ .getIndexTableName());
Delete delete = maintainer.buildDeleteMutation(KV_BUILDER, valueGetter, ptr, state.getPendingUpdate(),
state.getCurrentTimestamp(), env.getRegion().getRegionInfo().getStartKey(), env.getRegion().getRegionInfo().getEndKey());
indexUpdate.setUpdate(delete);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
index 297902f..9a2981f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
@@ -32,11 +32,11 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.Stoppable;
import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.util.Bytes;
@@ -90,7 +90,7 @@ public class PhoenixIndexFailurePolicy extends DelegateIndexFailurePolicy {
this.env = env;
rebuildIndexOnFailure = env.getConfiguration().getBoolean(QueryServices.INDEX_FAILURE_HANDLING_REBUILD_ATTRIB,
QueryServicesOptions.DEFAULT_INDEX_FAILURE_HANDLING_REBUILD);
- HTableDescriptor htd = env.getRegion().getTableDesc();
+ TableDescriptor htd = env.getRegion().getTableDescriptor();
// If rebuild index is turned off globally, no need to check the table because the background thread
// won't be running in this case
if (rebuildIndexOnFailure) {
@@ -194,8 +194,8 @@ public class PhoenixIndexFailurePolicy extends DelegateIndexFailurePolicy {
timestamp = minTimeStamp;
// If the data table has local index column families then get local indexes to disable.
- if (ref.getTableName().equals(env.getRegion().getTableDesc().getTableName().getNameAsString())
- && MetaDataUtil.hasLocalIndexColumnFamily(env.getRegion().getTableDesc())) {
+ if (ref.getTableName().equals(env.getRegion().getTableDescriptor().getTableName().getNameAsString())
+ && MetaDataUtil.hasLocalIndexColumnFamily(env.getRegion().getTableDescriptor())) {
for (String tableName : getLocalIndexNames(ref, mutations)) {
indexTableNames.put(tableName, minTimeStamp);
}
@@ -283,7 +283,7 @@ public class PhoenixIndexFailurePolicy extends DelegateIndexFailurePolicy {
}
IndexMaintainer indexMaintainer = localIndex.getIndexMaintainer(dataTable, conn);
- HRegionInfo regionInfo = this.env.getRegion().getRegionInfo();
+ RegionInfo regionInfo = this.env.getRegion().getRegionInfo();
int offset =
regionInfo.getStartKey().length == 0 ? regionInfo.getEndKey().length
: regionInfo.getStartKey().length;
[2/8] phoenix git commit: PHOENIX-4304 Refactoring to avoid using
deprecated HTableDescriptor, HColumnDescriptor,
HRegionInfo( Rajeshbabu Chintaguntla)
Posted by an...@apache.org.
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
index f6052a7..e710c82 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
@@ -57,9 +57,9 @@ import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
import org.apache.hadoop.hbase.filter.PageFilter;
@@ -161,7 +161,7 @@ public abstract class BaseResultIterators extends ExplainTable implements Result
static final Function<HRegionLocation, KeyRange> TO_KEY_RANGE = new Function<HRegionLocation, KeyRange>() {
@Override
public KeyRange apply(HRegionLocation region) {
- return KeyRange.getKeyRange(region.getRegionInfo().getStartKey(), region.getRegionInfo().getEndKey());
+ return KeyRange.getKeyRange(region.getRegion().getStartKey(), region.getRegion().getEndKey());
}
};
@@ -533,7 +533,7 @@ public abstract class BaseResultIterators extends ExplainTable implements Result
int nBoundaries = regionLocations.size() - 1;
List<byte[]> ranges = Lists.newArrayListWithExpectedSize(nBoundaries);
for (int i = 0; i < nBoundaries; i++) {
- HRegionInfo regionInfo = regionLocations.get(i).getRegionInfo();
+ RegionInfo regionInfo = regionLocations.get(i).getRegion();
ranges.add(regionInfo.getEndKey());
}
return ranges;
@@ -650,7 +650,7 @@ public abstract class BaseResultIterators extends ExplainTable implements Result
List<Scan> scans = Lists.newArrayListWithExpectedSize(2);
while (regionIndex <= stopIndex) {
HRegionLocation regionLocation = regionLocations.get(regionIndex);
- HRegionInfo regionInfo = regionLocation.getRegionInfo();
+ RegionInfo regionInfo = regionLocation.getRegion();
Scan newScan = ScanUtil.newScan(scan);
byte[] endKey;
if (regionIndex == stopIndex) {
@@ -727,7 +727,7 @@ public abstract class BaseResultIterators extends ExplainTable implements Result
if (stopKey.length > 0) {
stopIndex = Math.min(stopIndex, regionIndex + getIndexContainingExclusive(regionBoundaries.subList(regionIndex, stopIndex), stopKey));
if (isLocalIndex) {
- stopKey = regionLocations.get(stopIndex).getRegionInfo().getEndKey();
+ stopKey = regionLocations.get(stopIndex).getRegion().getEndKey();
}
}
List<List<Scan>> parallelScans = Lists.newArrayListWithExpectedSize(stopIndex - regionIndex + 1);
@@ -773,7 +773,7 @@ public abstract class BaseResultIterators extends ExplainTable implements Result
// Merge bisect with guideposts for all but the last region
while (regionIndex <= stopIndex) {
HRegionLocation regionLocation = regionLocations.get(regionIndex);
- HRegionInfo regionInfo = regionLocation.getRegionInfo();
+ RegionInfo regionInfo = regionLocation.getRegion();
byte[] currentGuidePostBytes = currentGuidePost.copyBytes();
byte[] endKey, endRegionKey = EMPTY_BYTE_ARRAY;
if (regionIndex == stopIndex) {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/main/java/org/apache/phoenix/iterate/MapReduceParallelScanGrouper.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/MapReduceParallelScanGrouper.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/MapReduceParallelScanGrouper.java
index 593608f..f25d89d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/MapReduceParallelScanGrouper.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/MapReduceParallelScanGrouper.java
@@ -22,15 +22,16 @@ import java.util.List;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
+
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
-import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
import org.apache.phoenix.compile.QueryPlan;
@@ -66,7 +67,7 @@ public class MapReduceParallelScanGrouper implements ParallelScanGrouper {
Path rootDir = new Path(conf.get(HConstants.HBASE_DIR));
FileSystem fs = rootDir.getFileSystem(conf);
Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir);
- HBaseProtos.SnapshotDescription snapshotDescription = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
+ SnapshotDescription snapshotDescription = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, snapshotDescription);
return getRegionLocationsFromManifest(manifest);
}
@@ -80,14 +81,14 @@ public class MapReduceParallelScanGrouper implements ParallelScanGrouper {
}
private List<HRegionLocation> getRegionLocationsFromManifest(SnapshotManifest manifest) {
- List<SnapshotProtos.SnapshotRegionManifest> regionManifests = manifest.getRegionManifests();
+ List<SnapshotRegionManifest> regionManifests = manifest.getRegionManifests();
Preconditions.checkNotNull(regionManifests);
List<HRegionLocation> regionLocations = Lists.newArrayListWithCapacity(regionManifests.size());
- for (SnapshotProtos.SnapshotRegionManifest regionManifest : regionManifests) {
+ for (SnapshotRegionManifest regionManifest : regionManifests) {
regionLocations.add(new HRegionLocation(
- HRegionInfo.convert(regionManifest.getRegionInfo()), null));
+ ProtobufUtil.toRegionInfo(regionManifest.getRegionInfo()), null));
}
return regionLocations;
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/main/java/org/apache/phoenix/iterate/RegionScannerFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/RegionScannerFactory.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/RegionScannerFactory.java
index 3dcbef9..59f08ae 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/RegionScannerFactory.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/RegionScannerFactory.java
@@ -21,6 +21,7 @@ package org.apache.phoenix.iterate;
import com.google.common.collect.ImmutableList;
import org.apache.hadoop.hbase.*;
+import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
@@ -105,7 +106,7 @@ public abstract class RegionScannerFactory {
return new RegionScanner() {
private boolean hasReferences = checkForReferenceFiles();
- private HRegionInfo regionInfo = env.getRegionInfo();
+ private RegionInfo regionInfo = env.getRegionInfo();
private byte[] actualStartKey = getActualStartKey();
// If there are any reference files after local index region merge some cases we might
@@ -150,7 +151,7 @@ public abstract class RegionScannerFactory {
}
@Override
- public HRegionInfo getRegionInfo() {
+ public RegionInfo getRegionInfo() {
return s.getRegionInfo();
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/main/java/org/apache/phoenix/iterate/SnapshotScanner.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/SnapshotScanner.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/SnapshotScanner.java
index c09b3c4..35f40ac 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/SnapshotScanner.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/SnapshotScanner.java
@@ -19,25 +19,35 @@
package org.apache.phoenix.iterate;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.ConcurrentMap;
+
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.*;
-import org.apache.hadoop.hbase.client.*;
-
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.client.AbstractClientScanner;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.IsolationLevel;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-import org.apache.hadoop.hbase.regionserver.*;
+import org.apache.hadoop.hbase.metrics.MetricRegistry;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.OnlineRegions;
+import org.apache.hadoop.hbase.regionserver.Region;
+import org.apache.hadoop.hbase.regionserver.RegionScanner;
import org.apache.phoenix.coprocessor.BaseScannerRegionObserver;
import org.apache.phoenix.schema.PTable;
-import org.apache.phoenix.util.*;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.ExecutorService;
+import org.apache.phoenix.util.EncodedColumnsUtil;
public class SnapshotScanner extends AbstractClientScanner {
@@ -48,7 +58,7 @@ public class SnapshotScanner extends AbstractClientScanner {
List<Cell> values;
public SnapshotScanner(Configuration conf, FileSystem fs, Path rootDir,
- HTableDescriptor htd, HRegionInfo hri, Scan scan) throws Throwable{
+ TableDescriptor htd, RegionInfo hri, Scan scan) throws Throwable{
scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
values = new ArrayList<>();
@@ -119,16 +129,11 @@ public class SnapshotScanner extends AbstractClientScanner {
}
@Override
- public HRegionInfo getRegionInfo() {
+ public RegionInfo getRegionInfo() {
return region.getRegionInfo();
}
@Override
- public RegionServerServices getRegionServerServices() {
- throw new UnsupportedOperationException();
- }
-
- @Override
public ConcurrentMap<String, Object> getSharedData() {
throw new UnsupportedOperationException();
}
@@ -144,11 +149,6 @@ public class SnapshotScanner extends AbstractClientScanner {
}
@Override
- public Coprocessor getInstance() {
- throw new UnsupportedOperationException();
- }
-
- @Override
public int getPriority() {
throw new UnsupportedOperationException();
}
@@ -164,20 +164,34 @@ public class SnapshotScanner extends AbstractClientScanner {
}
@Override
- public Table getTable(TableName tableName) throws IOException {
+ public ClassLoader getClassLoader() {
throw new UnsupportedOperationException();
}
- @Override
- public Table getTable(TableName tableName, ExecutorService executorService)
- throws IOException {
+ @Override
+ public RegionCoprocessor getInstance() {
throw new UnsupportedOperationException();
- }
+ }
- @Override
- public ClassLoader getClassLoader() {
+ @Override
+ public OnlineRegions getOnlineRegions() {
throw new UnsupportedOperationException();
- }
+ }
+
+ @Override
+ public ServerName getServerName() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public Connection getConnection() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public MetricRegistry getMetricRegistryForRegionServer() {
+ throw new UnsupportedOperationException();
+ }
};
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/main/java/org/apache/phoenix/iterate/TableSnapshotResultIterator.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/TableSnapshotResultIterator.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/TableSnapshotResultIterator.java
index df60339..984cb84 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/TableSnapshotResultIterator.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/TableSnapshotResultIterator.java
@@ -18,13 +18,20 @@
package org.apache.phoenix.iterate;
+import java.io.IOException;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper;
import org.apache.hadoop.hbase.util.FSUtils;
@@ -33,13 +40,6 @@ import org.apache.phoenix.monitoring.ScanMetricsHolder;
import org.apache.phoenix.schema.tuple.Tuple;
import org.apache.phoenix.util.ServerUtil;
-import java.io.IOException;
-import java.sql.SQLException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.List;
-
public class TableSnapshotResultIterator implements ResultIterator {
private final Scan scan;
@@ -48,8 +48,8 @@ public class TableSnapshotResultIterator implements ResultIterator {
private final ScanMetricsHolder scanMetricsHolder;
private Tuple lastTuple = null;
private static final ResultIterator UNINITIALIZED_SCANNER = ResultIterator.EMPTY_ITERATOR;
- private ArrayList<HRegionInfo> regions;
- private HTableDescriptor htd;
+ private ArrayList<RegionInfo> regions;
+ private TableDescriptor htd;
private String snapshotName;
private Path restoreDir;
@@ -83,14 +83,14 @@ public class TableSnapshotResultIterator implements ResultIterator {
Iterator i$ = restoredRegions.iterator();
while(i$.hasNext()) {
- HRegionInfo hri = (HRegionInfo)i$.next();
+ RegionInfo hri = (RegionInfo)i$.next();
if(CellUtil.overlappingKeys(this.scan.getStartRow(), this.scan.getStopRow(),
hri.getStartKey(), hri.getEndKey())) {
this.regions.add(hri);
}
}
- Collections.sort(this.regions);
+ Collections.sort(this.regions,RegionInfo.COMPARATOR);
}
public boolean initSnapshotScanner() throws SQLException {
@@ -103,7 +103,7 @@ public class TableSnapshotResultIterator implements ResultIterator {
if (this.currentRegion >= this.regions.size())
return false;
try {
- HRegionInfo hri = regions.get(this.currentRegion);
+ RegionInfo hri = regions.get(this.currentRegion);
this.scanIterator =
new ScanningResultIterator(new SnapshotScanner(configuration, fs, restoreDir, htd, hri, scan),
scan, scanMetricsHolder);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/main/java/org/apache/phoenix/parse/CreateTableStatement.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/CreateTableStatement.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/CreateTableStatement.java
index 73b7a11..2d9629d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/CreateTableStatement.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/CreateTableStatement.java
@@ -17,16 +17,12 @@
*/
package org.apache.phoenix.parse;
-import java.util.Collection;
import java.util.Collections;
import java.util.List;
-import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
-import org.apache.phoenix.query.QueryConstants;
import org.apache.phoenix.schema.PTableType;
-import org.apache.phoenix.schema.TableProperty;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableListMultimap;
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java
index 1cac944..b49e221 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java
@@ -25,11 +25,11 @@ import java.util.Set;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HRegionLocation;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.phoenix.compile.MutationPlan;
@@ -69,7 +69,7 @@ public interface ConnectionQueryServices extends QueryServices, MetaDataMutated
*/
public Table getTable(byte[] tableName) throws SQLException;
- public HTableDescriptor getTableDescriptor(byte[] tableName) throws SQLException;
+ public TableDescriptor getTableDescriptor(byte[] tableName) throws SQLException;
public HRegionLocation getTableRegionLocation(byte[] tableName, byte[] row) throws SQLException;
public List<HRegionLocation> getAllTableRegions(byte[] tableName) throws SQLException;
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 66e4661..2077272 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -17,7 +17,7 @@
*/
package org.apache.phoenix.query;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
-import static org.apache.hadoop.hbase.HColumnDescriptor.TTL;
+import static org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder.TTL;
import static org.apache.phoenix.coprocessor.MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP;
import static org.apache.phoenix.coprocessor.MetaDataProtocol.PHOENIX_MAJOR_VERSION;
import static org.apache.phoenix.coprocessor.MetaDataProtocol.PHOENIX_MINOR_VERSION;
@@ -94,26 +94,30 @@ import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
+import java.util.regex.Pattern;
import javax.annotation.concurrent.GuardedBy;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionLocation;
-import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.KeepDeletedCells;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.TableExistsException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Append;
import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.Increment;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.client.coprocessor.Batch;
import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
@@ -435,10 +439,10 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
}
@Override
- public HTableDescriptor getTableDescriptor(byte[] tableName) throws SQLException {
+ public TableDescriptor getTableDescriptor(byte[] tableName) throws SQLException {
Table htable = getTable(tableName);
try {
- return htable.getTableDescriptor();
+ return htable.getDescriptor();
} catch (IOException e) {
if(e instanceof org.apache.hadoop.hbase.TableNotFoundException
|| e.getCause() instanceof org.apache.hadoop.hbase.TableNotFoundException) {
@@ -568,7 +572,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
HRegionLocation regionLocation = ((ClusterConnection)connection).getRegionLocation(
TableName.valueOf(tableName), currentKey, reload);
locations.add(regionLocation);
- currentKey = regionLocation.getRegionInfo().getEndKey();
+ currentKey = regionLocation.getRegion().getEndKey();
} while (!Bytes.equals(currentKey, HConstants.EMPTY_END_ROW));
return locations;
} catch (org.apache.hadoop.hbase.TableNotFoundException e) {
@@ -700,30 +704,31 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
}
- private HColumnDescriptor generateColumnFamilyDescriptor(Pair<byte[],Map<String,Object>> family, PTableType tableType) throws SQLException {
- HColumnDescriptor columnDesc = new HColumnDescriptor(family.getFirst());
+ private ColumnFamilyDescriptor generateColumnFamilyDescriptor(Pair<byte[],Map<String,Object>> family, PTableType tableType) throws SQLException {
+ ColumnFamilyDescriptorBuilder columnDescBuilder = ColumnFamilyDescriptorBuilder.newBuilder(family.getFirst());
if (tableType != PTableType.VIEW) {
if(props.get(QueryServices.DEFAULT_KEEP_DELETED_CELLS_ATTRIB) != null){
- columnDesc.setKeepDeletedCells(props.getBoolean(
- QueryServices.DEFAULT_KEEP_DELETED_CELLS_ATTRIB, QueryServicesOptions.DEFAULT_KEEP_DELETED_CELLS));
+ columnDescBuilder.setKeepDeletedCells(props.getBoolean(QueryServices.DEFAULT_KEEP_DELETED_CELLS_ATTRIB,
+ QueryServicesOptions.DEFAULT_KEEP_DELETED_CELLS) ? KeepDeletedCells.TRUE
+ : KeepDeletedCells.FALSE);
}
- columnDesc.setDataBlockEncoding(SchemaUtil.DEFAULT_DATA_BLOCK_ENCODING);
- columnDesc.setBloomFilterType(BloomType.NONE);
+ columnDescBuilder.setDataBlockEncoding(SchemaUtil.DEFAULT_DATA_BLOCK_ENCODING);
+ columnDescBuilder.setBloomFilterType(BloomType.NONE);
for (Entry<String,Object> entry : family.getSecond().entrySet()) {
String key = entry.getKey();
Object value = entry.getValue();
- setHColumnDescriptorValue(columnDesc, key, value);
+ setHColumnDescriptorValue(columnDescBuilder, key, value);
}
}
- return columnDesc;
+ return columnDescBuilder.build();
}
// Workaround HBASE-14737
- private static void setHColumnDescriptorValue(HColumnDescriptor columnDesc, String key, Object value) {
+ private static void setHColumnDescriptorValue(ColumnFamilyDescriptorBuilder columnDescBuilder, String key, Object value) {
if (HConstants.VERSIONS.equals(key)) {
- columnDesc.setMaxVersions(getMaxVersion(value));
+ columnDescBuilder.setMaxVersions(getMaxVersion(value));
} else {
- columnDesc.setValue(key, value == null ? null : value.toString());
+ columnDescBuilder.setValue(key, value == null ? null : value.toString());
}
}
@@ -741,7 +746,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
return Integer.parseInt(stringValue);
}
- private void modifyColumnFamilyDescriptor(HColumnDescriptor hcd, Map<String,Object> props) throws SQLException {
+ private void modifyColumnFamilyDescriptor(ColumnFamilyDescriptorBuilder hcd, Map<String,Object> props) throws SQLException {
for (Entry<String, Object> entry : props.entrySet()) {
String propName = entry.getKey();
Object value = entry.getValue();
@@ -749,32 +754,32 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
}
}
- private HTableDescriptor generateTableDescriptor(byte[] physicalTableName, HTableDescriptor existingDesc,
+ private TableDescriptorBuilder generateTableDescriptor(byte[] physicalTableName, TableDescriptor existingDesc,
PTableType tableType, Map<String, Object> tableProps, List<Pair<byte[], Map<String, Object>>> families,
byte[][] splits, boolean isNamespaceMapped) throws SQLException {
String defaultFamilyName = (String)tableProps.remove(PhoenixDatabaseMetaData.DEFAULT_COLUMN_FAMILY_NAME);
- HTableDescriptor tableDescriptor = (existingDesc != null) ? new HTableDescriptor(existingDesc)
- : new HTableDescriptor(physicalTableName);
+ TableDescriptorBuilder tableDescriptorBuilder = (existingDesc != null) ?TableDescriptorBuilder.newBuilder(existingDesc)
+ : TableDescriptorBuilder.newBuilder(TableName.valueOf(physicalTableName));
// By default, do not automatically rebuild/catch up an index on a write failure
for (Entry<String,Object> entry : tableProps.entrySet()) {
String key = entry.getKey();
if (!TableProperty.isPhoenixTableProperty(key)) {
Object value = entry.getValue();
- tableDescriptor.setValue(key, value == null ? null : value.toString());
+ tableDescriptorBuilder.setValue(key, value == null ? null : value.toString());
}
}
if (families.isEmpty()) {
if (tableType != PTableType.VIEW) {
byte[] defaultFamilyByes = defaultFamilyName == null ? QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES : Bytes.toBytes(defaultFamilyName);
// Add dummy column family so we have key values for tables that
- HColumnDescriptor columnDescriptor = generateColumnFamilyDescriptor(new Pair<byte[],Map<String,Object>>(defaultFamilyByes,Collections.<String,Object>emptyMap()), tableType);
- tableDescriptor.addFamily(columnDescriptor);
+ ColumnFamilyDescriptor columnDescriptor = generateColumnFamilyDescriptor(new Pair<byte[],Map<String,Object>>(defaultFamilyByes,Collections.<String,Object>emptyMap()), tableType);
+ tableDescriptorBuilder.addColumnFamily(columnDescriptor);
}
} else {
for (Pair<byte[],Map<String,Object>> family : families) {
// If family is only in phoenix description, add it. otherwise, modify its property accordingly.
byte[] familyByte = family.getFirst();
- if (tableDescriptor.getFamily(familyByte) == null) {
+ if (tableDescriptorBuilder.build().getColumnFamily(familyByte) == null) {
if (tableType == PTableType.VIEW) {
String fullTableName = Bytes.toString(physicalTableName);
throw new ReadOnlyTableException(
@@ -783,32 +788,34 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
SchemaUtil.getTableNameFromFullName(fullTableName),
Bytes.toString(familyByte));
}
- HColumnDescriptor columnDescriptor = generateColumnFamilyDescriptor(family, tableType);
- tableDescriptor.addFamily(columnDescriptor);
+ ColumnFamilyDescriptor columnDescriptor = generateColumnFamilyDescriptor(family, tableType);
+ tableDescriptorBuilder.addColumnFamily(columnDescriptor);
} else {
if (tableType != PTableType.VIEW) {
- HColumnDescriptor columnDescriptor = tableDescriptor.getFamily(familyByte);
+ ColumnFamilyDescriptor columnDescriptor = tableDescriptorBuilder.build().getColumnFamily(familyByte);
if (columnDescriptor == null) {
throw new IllegalArgumentException("Unable to find column descriptor with family name " + Bytes.toString(family.getFirst()));
}
- modifyColumnFamilyDescriptor(columnDescriptor, family.getSecond());
+ ColumnFamilyDescriptorBuilder columnDescriptorBuilder = ColumnFamilyDescriptorBuilder.newBuilder(columnDescriptor);
+ modifyColumnFamilyDescriptor(columnDescriptorBuilder, family.getSecond());
+ tableDescriptorBuilder.addColumnFamily(columnDescriptorBuilder.build());
}
}
}
}
- addCoprocessors(physicalTableName, tableDescriptor, tableType, tableProps);
+ addCoprocessors(physicalTableName, tableDescriptorBuilder, tableType, tableProps);
// PHOENIX-3072: Set index priority if this is a system table or index table
if (tableType == PTableType.SYSTEM) {
- tableDescriptor.setValue(QueryConstants.PRIORITY,
+ tableDescriptorBuilder.setValue(QueryConstants.PRIORITY,
String.valueOf(PhoenixRpcSchedulerFactory.getMetadataPriority(config)));
} else if (tableType == PTableType.INDEX // Global, mutable index
- && !isLocalIndexTable(tableDescriptor.getFamiliesKeys())
+ && !isLocalIndexTable(tableDescriptorBuilder.build().getColumnFamilyNames())
&& !Boolean.TRUE.equals(tableProps.get(PhoenixDatabaseMetaData.IMMUTABLE_ROWS))) {
- tableDescriptor.setValue(QueryConstants.PRIORITY,
+ tableDescriptorBuilder.setValue(QueryConstants.PRIORITY,
String.valueOf(PhoenixRpcSchedulerFactory.getIndexPriority(config)));
}
- return tableDescriptor;
+ return tableDescriptorBuilder;
}
private boolean isLocalIndexTable(Collection<byte[]> families) {
@@ -822,22 +829,17 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
}
- private void addCoprocessors(byte[] tableName, HTableDescriptor descriptor, PTableType tableType, Map<String,Object> tableProps) throws SQLException {
+ private void addCoprocessors(byte[] tableName, TableDescriptorBuilder builder, PTableType tableType, Map<String,Object> tableProps) throws SQLException {
// The phoenix jar must be available on HBase classpath
int priority = props.getInt(QueryServices.COPROCESSOR_PRIORITY_ATTRIB, QueryServicesOptions.DEFAULT_COPROCESSOR_PRIORITY);
try {
- if (!descriptor.hasCoprocessor(ScanRegionObserver.class.getName())) {
- descriptor.addCoprocessor(ScanRegionObserver.class.getName(), null, priority, null);
- }
- if (!descriptor.hasCoprocessor(UngroupedAggregateRegionObserver.class.getName())) {
- descriptor.addCoprocessor(UngroupedAggregateRegionObserver.class.getName(), null, priority, null);
- }
- if (!descriptor.hasCoprocessor(GroupedAggregateRegionObserver.class.getName())) {
- descriptor.addCoprocessor(GroupedAggregateRegionObserver.class.getName(), null, priority, null);
- }
- if (!descriptor.hasCoprocessor(ServerCachingEndpointImpl.class.getName())) {
- descriptor.addCoprocessor(ServerCachingEndpointImpl.class.getName(), null, priority, null);
- }
+ builder.addCoprocessor(ScanRegionObserver.class.getName(), null, priority, null);
+
+ builder.addCoprocessor(UngroupedAggregateRegionObserver.class.getName(), null, priority, null);
+
+ builder.addCoprocessor(GroupedAggregateRegionObserver.class.getName(), null, priority, null);
+
+ builder.addCoprocessor(ServerCachingEndpointImpl.class.getName(), null, priority, null);
boolean isTransactional =
Boolean.TRUE.equals(tableProps.get(TableProperty.TRANSACTIONAL.name())) ||
Boolean.TRUE.equals(tableProps.get(PhoenixTransactionContext.READ_NON_TX_DATA)); // For ALTER TABLE
@@ -849,67 +851,49 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
&& !SchemaUtil.isMetaTable(tableName)
&& !SchemaUtil.isStatsTable(tableName)) {
if (isTransactional) {
- if (!descriptor.hasCoprocessor(PhoenixTransactionalIndexer.class.getName())) {
- descriptor.addCoprocessor(PhoenixTransactionalIndexer.class.getName(), null, priority, null);
- }
+ builder.addCoprocessor(PhoenixTransactionalIndexer.class.getName(), null, priority, null);
// For alter table, remove non transactional index coprocessor
- if (descriptor.hasCoprocessor(Indexer.class.getName())) {
- descriptor.removeCoprocessor(Indexer.class.getName());
- }
+ builder.removeCoprocessor(Indexer.class.getName());
} else {
- if (!descriptor.hasCoprocessor(Indexer.class.getName())) {
+ if (!builder.build().hasCoprocessor(Indexer.class.getName())) {
// If exception on alter table to transition back to non transactional
- if (descriptor.hasCoprocessor(PhoenixTransactionalIndexer.class.getName())) {
- descriptor.removeCoprocessor(PhoenixTransactionalIndexer.class.getName());
- }
+ builder.removeCoprocessor(PhoenixTransactionalIndexer.class.getName());
Map<String, String> opts = Maps.newHashMapWithExpectedSize(1);
opts.put(NonTxIndexBuilder.CODEC_CLASS_NAME_KEY, PhoenixIndexCodec.class.getName());
- Indexer.enableIndexing(descriptor, PhoenixIndexBuilder.class, opts, priority);
+ Indexer.enableIndexing(builder, PhoenixIndexBuilder.class, opts, priority);
}
}
}
- if (SchemaUtil.isStatsTable(tableName) && !descriptor.hasCoprocessor(MultiRowMutationEndpoint.class.getName())) {
- descriptor.addCoprocessor(MultiRowMutationEndpoint.class.getName(),
+ if (SchemaUtil.isStatsTable(tableName)) {
+ builder.addCoprocessor(MultiRowMutationEndpoint.class.getName(),
null, priority, null);
}
- Set<byte[]> familiesKeys = descriptor.getFamiliesKeys();
+ Set<byte[]> familiesKeys = builder.build().getColumnFamilyNames();
for(byte[] family: familiesKeys) {
if(Bytes.toString(family).startsWith(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX)) {
- if (!descriptor.hasCoprocessor(IndexHalfStoreFileReaderGenerator.class.getName())) {
- descriptor.addCoprocessor(IndexHalfStoreFileReaderGenerator.class.getName(),
+ builder.addCoprocessor(IndexHalfStoreFileReaderGenerator.class.getName(),
null, priority, null);
break;
- }
}
}
// Setup split policy on Phoenix metadata table to ensure that the key values of a Phoenix table
// stay on the same region.
if (SchemaUtil.isMetaTable(tableName) || SchemaUtil.isFunctionTable(tableName)) {
- if (!descriptor.hasCoprocessor(MetaDataEndpointImpl.class.getName())) {
- descriptor.addCoprocessor(MetaDataEndpointImpl.class.getName(), null, priority, null);
- }
+ builder.addCoprocessor(MetaDataEndpointImpl.class.getName(), null, priority, null);
if(SchemaUtil.isMetaTable(tableName) ) {
- if (!descriptor.hasCoprocessor(MetaDataRegionObserver.class.getName())) {
- descriptor.addCoprocessor(MetaDataRegionObserver.class.getName(), null, priority + 1, null);
- }
+ builder.addCoprocessor(MetaDataRegionObserver.class.getName(), null, priority + 1, null);
}
} else if (SchemaUtil.isSequenceTable(tableName)) {
- if (!descriptor.hasCoprocessor(SequenceRegionObserver.class.getName())) {
- descriptor.addCoprocessor(SequenceRegionObserver.class.getName(), null, priority, null);
- }
+ builder.addCoprocessor(SequenceRegionObserver.class.getName(), null, priority, null);
}
if (isTransactional) {
- if (!descriptor.hasCoprocessor(PhoenixTransactionalProcessor.class.getName())) {
- descriptor.addCoprocessor(PhoenixTransactionalProcessor.class.getName(), null, priority - 10, null);
- }
+ builder.addCoprocessor(PhoenixTransactionalProcessor.class.getName(), null, priority - 10, null);
} else {
// If exception on alter table to transition back to non transactional
- if (descriptor.hasCoprocessor(PhoenixTransactionalProcessor.class.getName())) {
- descriptor.removeCoprocessor(PhoenixTransactionalProcessor.class.getName());
- }
+ builder.removeCoprocessor(PhoenixTransactionalProcessor.class.getName());
}
} catch (IOException e) {
throw ServerUtil.parseServerException(e);
@@ -921,7 +905,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
String getOperationName();
}
- private void pollForUpdatedTableDescriptor(final Admin admin, final HTableDescriptor newTableDescriptor,
+ private void pollForUpdatedTableDescriptor(final Admin admin, final TableDescriptor newTableDescriptor,
final byte[] tableName) throws InterruptedException, TimeoutException {
checkAndRetry(new RetriableOperation() {
@@ -932,7 +916,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
@Override
public boolean checkForCompletion() throws TimeoutException, IOException {
- HTableDescriptor tableDesc = admin.getTableDescriptor(TableName.valueOf(tableName));
+ TableDescriptor tableDesc = admin.getDescriptor(TableName.valueOf(tableName));
return newTableDescriptor.equals(tableDesc);
}
});
@@ -1020,11 +1004,11 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
* @return true if table was created and false if it already exists
* @throws SQLException
*/
- private HTableDescriptor ensureTableCreated(byte[] physicalTableName, PTableType tableType, Map<String, Object> props,
+ private TableDescriptor ensureTableCreated(byte[] physicalTableName, PTableType tableType, Map<String, Object> props,
List<Pair<byte[], Map<String, Object>>> families, byte[][] splits, boolean modifyExistingMetaData,
boolean isNamespaceMapped) throws SQLException {
SQLException sqlE = null;
- HTableDescriptor existingDesc = null;
+ TableDescriptor existingDesc = null;
boolean isMetaTable = SchemaUtil.isMetaTable(physicalTableName);
boolean tableExist = true;
try (Admin admin = getAdmin()) {
@@ -1032,7 +1016,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
final String znode = this.props.get(HConstants.ZOOKEEPER_ZNODE_PARENT);
logger.debug("Found quorum: " + quorum + ":" + znode);
try {
- existingDesc = admin.getTableDescriptor(TableName.valueOf(physicalTableName));
+ existingDesc = admin.getDescriptor(TableName.valueOf(physicalTableName));
} catch (org.apache.hadoop.hbase.TableNotFoundException e) {
tableExist = false;
if (tableType == PTableType.VIEW) {
@@ -1044,23 +1028,23 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
}
}
- HTableDescriptor newDesc = generateTableDescriptor(physicalTableName, existingDesc, tableType, props, families,
+ TableDescriptorBuilder newDesc = generateTableDescriptor(physicalTableName, existingDesc, tableType, props, families,
splits, isNamespaceMapped);
-
+
if (!tableExist) {
- if (newDesc.getValue(MetaDataUtil.IS_LOCAL_INDEX_TABLE_PROP_BYTES) != null && Boolean.TRUE.equals(
- PBoolean.INSTANCE.toObject(newDesc.getValue(MetaDataUtil.IS_LOCAL_INDEX_TABLE_PROP_BYTES)))) {
- newDesc.setValue(HTableDescriptor.SPLIT_POLICY, IndexRegionSplitPolicy.class.getName());
+ if (newDesc.build().getValue(MetaDataUtil.IS_LOCAL_INDEX_TABLE_PROP_BYTES) != null && Boolean.TRUE.equals(
+ PBoolean.INSTANCE.toObject(newDesc.build().getValue(MetaDataUtil.IS_LOCAL_INDEX_TABLE_PROP_BYTES)))) {
+ newDesc.setValue(Bytes.toBytes(TableDescriptorBuilder.SPLIT_POLICY), Bytes.toBytes(IndexRegionSplitPolicy.class.getName()));
}
// Remove the splitPolicy attribute to prevent HBASE-12570
if (isMetaTable) {
- newDesc.remove(HTableDescriptor.SPLIT_POLICY);
+ newDesc.removeValue(Bytes.toBytes(TableDescriptorBuilder.SPLIT_POLICY));
}
try {
if (splits == null) {
- admin.createTable(newDesc);
+ admin.createTable(newDesc.build());
} else {
- admin.createTable(newDesc, splits);
+ admin.createTable(newDesc.build(), splits);
}
} catch (TableExistsException e) {
// We can ignore this, as it just means that another client beat us
@@ -1074,8 +1058,8 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
* server and compatible. This works around HBASE-12570 which causes the cluster to be
* brought down.
*/
- newDesc.setValue(HTableDescriptor.SPLIT_POLICY, MetaDataSplitPolicy.class.getName());
- modifyTable(physicalTableName, newDesc, true);
+ newDesc.setValue(TableDescriptorBuilder.SPLIT_POLICY, MetaDataSplitPolicy.class.getName());
+ modifyTable(physicalTableName, newDesc.build(), true);
}
return null;
} else {
@@ -1083,11 +1067,11 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
checkClientServerCompatibility(SchemaUtil.getPhysicalName(SYSTEM_CATALOG_NAME_BYTES, this.getProps()).getName());
} else {
for(Pair<byte[],Map<String,Object>> family: families) {
- if ((newDesc.getValue(HTableDescriptor.SPLIT_POLICY)==null || !newDesc.getValue(HTableDescriptor.SPLIT_POLICY).equals(
+ if ((newDesc.build().getValue(TableDescriptorBuilder.SPLIT_POLICY)==null || !newDesc.build().getValue(TableDescriptorBuilder.SPLIT_POLICY).equals(
IndexRegionSplitPolicy.class.getName()))
&& Bytes.toString(family.getFirst()).startsWith(
QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX)) {
- newDesc.setValue(HTableDescriptor.SPLIT_POLICY, IndexRegionSplitPolicy.class.getName());
+ newDesc.setValue(TableDescriptorBuilder.SPLIT_POLICY, IndexRegionSplitPolicy.class.getName());
break;
}
}
@@ -1110,14 +1094,14 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
.setSchemaName(SchemaUtil.getSchemaNameFromFullName(physicalTableName))
.setTableName(SchemaUtil.getTableNameFromFullName(physicalTableName)).build().buildException();
}
- newDesc.remove(PhoenixTransactionContext.READ_NON_TX_DATA);
+ newDesc.removeValue(Bytes.toBytes(PhoenixTransactionContext.READ_NON_TX_DATA));
}
if (existingDesc.equals(newDesc)) {
return null; // Indicate that no metadata was changed
}
- modifyTable(physicalTableName, newDesc, true);
- return newDesc;
+ modifyTable(physicalTableName, newDesc.build(), true);
+ return newDesc.build();
}
} catch (IOException e) {
@@ -1136,16 +1120,16 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
return null; // will never make it here
}
- private void modifyTable(byte[] tableName, HTableDescriptor newDesc, boolean shouldPoll) throws IOException,
+ private void modifyTable(byte[] tableName, TableDescriptor newDesc, boolean shouldPoll) throws IOException,
InterruptedException, TimeoutException, SQLException {
TableName tn = TableName.valueOf(tableName);
try (Admin admin = getAdmin()) {
if (!allowOnlineTableSchemaUpdate()) {
admin.disableTable(tn);
- admin.modifyTable(tn, newDesc); // TODO: Update to TableDescriptor
+ admin.modifyTable(newDesc); // TODO: Update to TableDescriptor
admin.enableTable(tn);
} else {
- admin.modifyTable(tn, newDesc); // TODO: Update to TableDescriptor
+ admin.modifyTable(newDesc); // TODO: Update to TableDescriptor
if (shouldPoll) {
pollForUpdatedTableDescriptor(admin, newDesc, tableName);
}
@@ -1181,8 +1165,8 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
List<byte[]> regionKeys = Lists.newArrayListWithExpectedSize(locations.size());
for (HRegionLocation entry : locations) {
if (!serverMap.contains(entry)) {
- regionKeys.add(entry.getRegionInfo().getStartKey());
- regionMap.put(entry.getRegionInfo().getRegionName(), entry);
+ regionKeys.add(entry.getRegion().getStartKey());
+ regionMap.put(entry.getRegion().getRegionName(), entry);
serverMap.add(entry);
}
}
@@ -1308,7 +1292,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
byte[] physicalIndexName = MetaDataUtil.getViewIndexPhysicalName(physicalTableName);
tableProps.put(MetaDataUtil.IS_VIEW_INDEX_TABLE_PROP_NAME, TRUE_BYTES_AS_STRING);
- HTableDescriptor desc = ensureTableCreated(physicalIndexName, PTableType.TABLE, tableProps, families, splits,
+ TableDescriptor desc = ensureTableCreated(physicalIndexName, PTableType.TABLE, tableProps, families, splits,
false, isNamespaceMapped);
if (desc != null) {
if (!Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(desc.getValue(MetaDataUtil.IS_VIEW_INDEX_TABLE_PROP_BYTES)))) {
@@ -1327,7 +1311,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
try (Admin admin = getAdmin()) {
try {
TableName physicalIndexTableName = TableName.valueOf(physicalIndexName);
- HTableDescriptor desc = admin.getTableDescriptor(physicalIndexTableName);
+ TableDescriptor desc = admin.getDescriptor(physicalIndexTableName);
if (Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(desc.getValue(MetaDataUtil.IS_VIEW_INDEX_TABLE_PROP_BYTES)))) {
final ReadOnlyProps props = this.getProps();
final boolean dropMetadata = props.getBoolean(DROP_METADATA_ATTRIB, DEFAULT_DROP_METADATA);
@@ -1350,19 +1334,19 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
}
private boolean ensureLocalIndexTableDropped(byte[] physicalTableName, long timestamp) throws SQLException {
- HTableDescriptor desc = null;
+ TableDescriptor desc = null;
boolean wasDeleted = false;
try (Admin admin = getAdmin()) {
try {
- desc = admin.getTableDescriptor(TableName.valueOf(physicalTableName));
- for (byte[] fam : desc.getFamiliesKeys()) {
+ desc = admin.getDescriptor(TableName.valueOf(physicalTableName));
+ for (byte[] fam : desc.getColumnFamilyNames()) {
this.tableStatsCache.invalidate(new GuidePostsKey(physicalTableName, fam));
}
final ReadOnlyProps props = this.getProps();
final boolean dropMetadata = props.getBoolean(DROP_METADATA_ATTRIB, DEFAULT_DROP_METADATA);
if (dropMetadata) {
List<String> columnFamiles = new ArrayList<String>();
- for(HColumnDescriptor cf : desc.getColumnFamilies()) {
+ for(ColumnFamilyDescriptor cf : desc.getColumnFamilies()) {
if(cf.getNameAsString().startsWith(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX)) {
columnFamiles.add(cf.getNameAsString());
}
@@ -1627,7 +1611,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
for ( byte[] tableName : tableNamesToDelete ) {
try {
TableName tn = TableName.valueOf(tableName);
- HTableDescriptor htableDesc = this.getTableDescriptor(tableName);
+ TableDescriptor htableDesc = this.getTableDescriptor(tableName);
admin.disableTable(tn);
admin.deleteTable(tn);
tableStatsCache.invalidateAll(htableDesc);
@@ -1646,12 +1630,13 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
}
}
- private static Map<String,Object> createPropertiesMap(Map<ImmutableBytesWritable,ImmutableBytesWritable> htableProps) {
+ private static Map<String,Object> createPropertiesMap(Map<Bytes,Bytes> htableProps) {
Map<String,Object> props = Maps.newHashMapWithExpectedSize(htableProps.size());
- for (Map.Entry<ImmutableBytesWritable,ImmutableBytesWritable> entry : htableProps.entrySet()) {
- ImmutableBytesWritable key = entry.getKey();
- ImmutableBytesWritable value = entry.getValue();
- props.put(Bytes.toString(key.get(), key.getOffset(), key.getLength()), Bytes.toString(value.get(), value.getOffset(), value.getLength()));
+ for (Map.Entry<Bytes,Bytes> entry : htableProps.entrySet()) {
+ Bytes key = entry.getKey();
+ Bytes value = entry.getValue();
+ props.put(Bytes.toString(key.get(), key.getOffset(), key.getLength()),
+ Bytes.toString(value.get(), value.getOffset(), value.getLength()));
}
return props;
}
@@ -1690,17 +1675,17 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
private void ensureViewIndexTableCreated(PTable table, long timestamp, boolean isNamespaceMapped)
throws SQLException {
byte[] physicalTableName = table.getPhysicalName().getBytes();
- HTableDescriptor htableDesc = this.getTableDescriptor(physicalTableName);
+ TableDescriptor htableDesc = this.getTableDescriptor(physicalTableName);
Map<String,Object> tableProps = createPropertiesMap(htableDesc.getValues());
List<Pair<byte[],Map<String,Object>>> families = Lists.newArrayListWithExpectedSize(Math.max(1, table.getColumnFamilies().size()+1));
if (families.isEmpty()) {
byte[] familyName = SchemaUtil.getEmptyColumnFamily(table);
- Map<String,Object> familyProps = createPropertiesMap(htableDesc.getFamily(familyName).getValues());
+ Map<String,Object> familyProps = createPropertiesMap(htableDesc.getColumnFamily(familyName).getValues());
families.add(new Pair<byte[],Map<String,Object>>(familyName, familyProps));
} else {
for (PColumnFamily family : table.getColumnFamilies()) {
byte[] familyName = family.getName().getBytes();
- Map<String,Object> familyProps = createPropertiesMap(htableDesc.getFamily(familyName).getValues());
+ Map<String,Object> familyProps = createPropertiesMap(htableDesc.getColumnFamily(familyName).getValues());
families.add(new Pair<byte[],Map<String,Object>>(familyName, familyProps));
}
// Always create default column family, because we don't know in advance if we'll
@@ -1723,12 +1708,12 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
public MetaDataMutationResult addColumn(final List<Mutation> tableMetaData, PTable table, Map<String, List<Pair<String,Object>>> stmtProperties, Set<String> colFamiliesForPColumnsToBeAdded, List<PColumn> columns) throws SQLException {
List<Pair<byte[], Map<String, Object>>> families = new ArrayList<>(stmtProperties.size());
Map<String, Object> tableProps = new HashMap<String, Object>();
- Set<HTableDescriptor> tableDescriptors = Collections.emptySet();
- Set<HTableDescriptor> origTableDescriptors = Collections.emptySet();
+ Set<TableDescriptor> tableDescriptors = Collections.emptySet();
+ Set<TableDescriptor> origTableDescriptors = Collections.emptySet();
boolean nonTxToTx = false;
- Pair<HTableDescriptor,HTableDescriptor> tableDescriptorPair = separateAndValidateProperties(table, stmtProperties, colFamiliesForPColumnsToBeAdded, tableProps);
- HTableDescriptor tableDescriptor = tableDescriptorPair.getSecond();
- HTableDescriptor origTableDescriptor = tableDescriptorPair.getFirst();
+ Pair<TableDescriptor,TableDescriptor> tableDescriptorPair = separateAndValidateProperties(table, stmtProperties, colFamiliesForPColumnsToBeAdded, families, tableProps);
+ TableDescriptor tableDescriptor = tableDescriptorPair.getSecond();
+ TableDescriptor origTableDescriptor = tableDescriptorPair.getFirst();
if (tableDescriptor != null) {
tableDescriptors = Sets.newHashSetWithExpectedSize(3 + table.getIndexes().size());
origTableDescriptors = Sets.newHashSetWithExpectedSize(3 + table.getIndexes().size());
@@ -1739,9 +1724,12 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
* If the table was transitioned from non transactional to transactional, we need
* to also transition the index tables.
*/
+
+ TableDescriptorBuilder tableDescriptorBuilder = TableDescriptorBuilder.newBuilder(tableDescriptor);
if (nonTxToTx) {
- updateDescriptorForTx(table, tableProps, tableDescriptor, Boolean.TRUE.toString(), tableDescriptors, origTableDescriptors);
+ updateDescriptorForTx(table, tableProps, tableDescriptorBuilder, Boolean.TRUE.toString(), tableDescriptors, origTableDescriptors);
}
+ tableDescriptor=tableDescriptorBuilder.build();
}
boolean success = false;
@@ -1834,11 +1822,11 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
}
return result;
}
- private void updateDescriptorForTx(PTable table, Map<String, Object> tableProps, HTableDescriptor tableDescriptor,
- String txValue, Set<HTableDescriptor> descriptorsToUpdate, Set<HTableDescriptor> origDescriptors) throws SQLException {
+ private void updateDescriptorForTx(PTable table, Map<String, Object> tableProps, TableDescriptorBuilder tableDescriptorBuilder,
+ String txValue, Set<TableDescriptor> descriptorsToUpdate, Set<TableDescriptor> origDescriptors) throws SQLException {
byte[] physicalTableName = table.getPhysicalName().getBytes();
try (Admin admin = getAdmin()) {
- setTransactional(tableDescriptor, table.getType(), txValue, tableProps);
+ setTransactional(physicalTableName, tableDescriptorBuilder, table.getType(), txValue, tableProps);
Map<String, Object> indexTableProps;
if (txValue == null) {
indexTableProps = Collections.<String,Object>emptyMap();
@@ -1847,46 +1835,50 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
indexTableProps.put(PhoenixTransactionContext.READ_NON_TX_DATA, Boolean.valueOf(txValue));
}
for (PTable index : table.getIndexes()) {
- HTableDescriptor indexDescriptor = admin.getTableDescriptor(TableName.valueOf(index.getPhysicalName().getBytes()));
- origDescriptors.add(indexDescriptor);
- indexDescriptor = new HTableDescriptor(indexDescriptor);
- descriptorsToUpdate.add(indexDescriptor);
+ TableDescriptor indexDesc = admin.getDescriptor(TableName.valueOf(index.getPhysicalName().getBytes()));
+ origDescriptors.add(indexDesc);
+ TableDescriptorBuilder indexDescriptorBuilder = TableDescriptorBuilder.newBuilder(indexDesc);
+ descriptorsToUpdate.add(indexDescriptorBuilder.build());
if (index.getColumnFamilies().isEmpty()) {
byte[] dataFamilyName = SchemaUtil.getEmptyColumnFamily(table);
byte[] indexFamilyName = SchemaUtil.getEmptyColumnFamily(index);
- HColumnDescriptor indexColDescriptor = indexDescriptor.getFamily(indexFamilyName);
- HColumnDescriptor tableColDescriptor = tableDescriptor.getFamily(dataFamilyName);
+ ColumnFamilyDescriptorBuilder indexColDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(indexDescriptorBuilder.build().getColumnFamily(indexFamilyName));
+ ColumnFamilyDescriptor tableColDescriptor = tableDescriptorBuilder.build().getColumnFamily(dataFamilyName);
indexColDescriptor.setMaxVersions(tableColDescriptor.getMaxVersions());
- indexColDescriptor.setValue(PhoenixTransactionContext.PROPERTY_TTL, tableColDescriptor.getValue(PhoenixTransactionContext.PROPERTY_TTL));
+ indexColDescriptor.setValue(Bytes.toBytes(PhoenixTransactionContext.PROPERTY_TTL),
+ tableColDescriptor.getValue(Bytes.toBytes(PhoenixTransactionContext.PROPERTY_TTL)));
+ indexDescriptorBuilder.addColumnFamily(indexColDescriptor.build());
} else {
for (PColumnFamily family : index.getColumnFamilies()) {
byte[] familyName = family.getName().getBytes();
- indexDescriptor.getFamily(familyName).setMaxVersions(tableDescriptor.getFamily(familyName).getMaxVersions());
- HColumnDescriptor indexColDescriptor = indexDescriptor.getFamily(familyName);
- HColumnDescriptor tableColDescriptor = tableDescriptor.getFamily(familyName);
+ ColumnFamilyDescriptorBuilder indexColDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(indexDescriptorBuilder.build().getColumnFamily(familyName));
+ ColumnFamilyDescriptor tableColDescriptor = tableDescriptorBuilder.build().getColumnFamily(familyName);
indexColDescriptor.setMaxVersions(tableColDescriptor.getMaxVersions());
- indexColDescriptor.setValue(PhoenixTransactionContext.PROPERTY_TTL, tableColDescriptor.getValue(PhoenixTransactionContext.PROPERTY_TTL));
+ indexColDescriptor.setValue(Bytes.toBytes(PhoenixTransactionContext.PROPERTY_TTL),
+ tableColDescriptor.getValue(Bytes.toBytes(PhoenixTransactionContext.PROPERTY_TTL)));
+ indexDescriptorBuilder.addColumnFamily(indexColDescriptor.build());
}
}
- setTransactional(indexDescriptor, index.getType(), txValue, indexTableProps);
+ setTransactional(index.getPhysicalName().getBytes(), indexDescriptorBuilder, index.getType(), txValue, indexTableProps);
}
try {
- HTableDescriptor indexDescriptor = admin.getTableDescriptor(TableName.valueOf(MetaDataUtil.getViewIndexPhysicalName(physicalTableName)));
+ TableDescriptor indexDescriptor = admin.getDescriptor(TableName.valueOf(MetaDataUtil.getViewIndexPhysicalName(physicalTableName)));
origDescriptors.add(indexDescriptor);
- indexDescriptor = new HTableDescriptor(indexDescriptor);
- descriptorsToUpdate.add(indexDescriptor);
- setSharedIndexMaxVersion(table, tableDescriptor, indexDescriptor);
- setTransactional(indexDescriptor, PTableType.INDEX, txValue, indexTableProps);
+ TableDescriptorBuilder indexDescriptorBuilder = TableDescriptorBuilder.newBuilder(indexDescriptor);
+ setSharedIndexMaxVersion(table, tableDescriptorBuilder.build(), indexDescriptorBuilder);
+ setTransactional(MetaDataUtil.getViewIndexPhysicalName(physicalTableName), indexDescriptorBuilder, PTableType.INDEX, txValue, indexTableProps);
+ descriptorsToUpdate.add(indexDescriptorBuilder.build());
} catch (org.apache.hadoop.hbase.TableNotFoundException ignore) {
// Ignore, as we may never have created a view index table
}
try {
- HTableDescriptor indexDescriptor = admin.getTableDescriptor(TableName.valueOf(MetaDataUtil.getLocalIndexPhysicalName(physicalTableName)));
+ TableDescriptor indexDescriptor = admin.getDescriptor(TableName.valueOf(MetaDataUtil.getLocalIndexPhysicalName(physicalTableName)));
origDescriptors.add(indexDescriptor);
- indexDescriptor = new HTableDescriptor(indexDescriptor);
- descriptorsToUpdate.add(indexDescriptor);
- setSharedIndexMaxVersion(table, tableDescriptor, indexDescriptor);
- setTransactional(indexDescriptor, PTableType.INDEX, txValue, indexTableProps);
+ TableDescriptorBuilder indexDescriptorBuilder = TableDescriptorBuilder.newBuilder(indexDescriptor);
+
+ setSharedIndexMaxVersion(table, tableDescriptorBuilder.build(), indexDescriptorBuilder);
+ setTransactional(MetaDataUtil.getViewIndexPhysicalName(physicalTableName), indexDescriptorBuilder, PTableType.INDEX, txValue, indexTableProps);
+ descriptorsToUpdate.add(indexDescriptorBuilder.build());
} catch (org.apache.hadoop.hbase.TableNotFoundException ignore) {
// Ignore, as we may never have created a view index table
}
@@ -1894,32 +1886,36 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
throw ServerUtil.parseServerException(e);
}
}
- private void setSharedIndexMaxVersion(PTable table, HTableDescriptor tableDescriptor,
- HTableDescriptor indexDescriptor) {
+ private void setSharedIndexMaxVersion(PTable table, TableDescriptor tableDescriptor,
+ TableDescriptorBuilder indexDescriptorBuilder) {
if (table.getColumnFamilies().isEmpty()) {
byte[] familyName = SchemaUtil.getEmptyColumnFamily(table);
- HColumnDescriptor indexColDescriptor = indexDescriptor.getFamily(familyName);
- HColumnDescriptor tableColDescriptor = tableDescriptor.getFamily(familyName);
- indexColDescriptor.setMaxVersions(tableColDescriptor.getMaxVersions());
- indexColDescriptor.setValue(PhoenixTransactionContext.PROPERTY_TTL, tableColDescriptor.getValue(PhoenixTransactionContext.PROPERTY_TTL));
+ ColumnFamilyDescriptorBuilder indexColDescriptorBuilder = ColumnFamilyDescriptorBuilder.newBuilder(indexDescriptorBuilder.build().getColumnFamily(familyName));
+ ColumnFamilyDescriptor tableColDescriptor = tableDescriptor.getColumnFamily(familyName);
+ indexColDescriptorBuilder.setMaxVersions(tableColDescriptor.getMaxVersions());
+ indexColDescriptorBuilder.setValue( Bytes.toBytes(PhoenixTransactionContext.PROPERTY_TTL),tableColDescriptor.getValue(Bytes.toBytes(PhoenixTransactionContext.PROPERTY_TTL)));
+ indexDescriptorBuilder.addColumnFamily(indexColDescriptorBuilder.build());
} else {
for (PColumnFamily family : table.getColumnFamilies()) {
byte[] familyName = family.getName().getBytes();
- HColumnDescriptor indexColDescriptor = indexDescriptor.getFamily(familyName);
+ ColumnFamilyDescriptor indexColDescriptor = indexDescriptorBuilder.build().getColumnFamily(familyName);
if (indexColDescriptor != null) {
- HColumnDescriptor tableColDescriptor = tableDescriptor.getFamily(familyName);
- indexColDescriptor.setMaxVersions(tableColDescriptor.getMaxVersions());
- indexColDescriptor.setValue(PhoenixTransactionContext.PROPERTY_TTL, tableColDescriptor.getValue(PhoenixTransactionContext.PROPERTY_TTL));
+ ColumnFamilyDescriptor tableColDescriptor = tableDescriptor.getColumnFamily(familyName);
+ ColumnFamilyDescriptorBuilder indexColDescriptorBuilder = ColumnFamilyDescriptorBuilder.newBuilder(indexColDescriptor);
+ indexColDescriptorBuilder.setMaxVersions(tableColDescriptor.getMaxVersions());
+ indexColDescriptorBuilder.setValue( Bytes.toBytes(PhoenixTransactionContext.PROPERTY_TTL),tableColDescriptor.getValue(Bytes.toBytes(PhoenixTransactionContext.PROPERTY_TTL)));
+ indexDescriptorBuilder.addColumnFamily(indexColDescriptorBuilder.build());
+
}
}
}
}
- private void sendHBaseMetaData(Set<HTableDescriptor> tableDescriptors, boolean pollingNeeded) throws SQLException {
+ private void sendHBaseMetaData(Set<TableDescriptor> tableDescriptors, boolean pollingNeeded) throws SQLException {
SQLException sqlE = null;
- for (HTableDescriptor descriptor : tableDescriptors) {
+ for (TableDescriptor descriptor : tableDescriptors) {
try {
- modifyTable(descriptor.getName(), descriptor, pollingNeeded);
+ modifyTable(descriptor.getTableName().getName(), descriptor, pollingNeeded);
} catch (IOException e) {
sqlE = ServerUtil.parseServerException(e);
} catch (InterruptedException e) {
@@ -1935,17 +1931,18 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
}
}
}
- private void setTransactional(HTableDescriptor tableDescriptor, PTableType tableType, String txValue, Map<String, Object> tableProps) throws SQLException {
+ private void setTransactional(byte[] physicalTableName, TableDescriptorBuilder tableDescriptorBuilder, PTableType tableType, String txValue, Map<String, Object> tableProps) throws SQLException {
if (txValue == null) {
- tableDescriptor.remove(PhoenixTransactionContext.READ_NON_TX_DATA);
+ tableDescriptorBuilder.removeValue(Bytes.toBytes(PhoenixTransactionContext.READ_NON_TX_DATA));
} else {
- tableDescriptor.setValue(PhoenixTransactionContext.READ_NON_TX_DATA, txValue);
+ tableDescriptorBuilder.setValue(PhoenixTransactionContext.READ_NON_TX_DATA, txValue);
}
- this.addCoprocessors(tableDescriptor.getName(), tableDescriptor, tableType, tableProps);
+ this.addCoprocessors(physicalTableName, tableDescriptorBuilder, tableType, tableProps);
}
- private Pair<HTableDescriptor,HTableDescriptor> separateAndValidateProperties(PTable table, Map<String, List<Pair<String, Object>>> properties,
- Set<String> colFamiliesForPColumnsToBeAdded, Map<String, Object> tableProps) throws SQLException {
+ private Pair<TableDescriptor, TableDescriptor> separateAndValidateProperties(PTable table,
+ Map<String, List<Pair<String, Object>>> properties, Set<String> colFamiliesForPColumnsToBeAdded,
+ List<Pair<byte[], Map<String, Object>>> families, Map<String, Object> tableProps) throws SQLException {
Map<String, Map<String, Object>> stmtFamiliesPropsMap = new HashMap<>(properties.size());
Map<String,Object> commonFamilyProps = new HashMap<>();
boolean addingColumns = colFamiliesForPColumnsToBeAdded != null && !colFamiliesForPColumnsToBeAdded.isEmpty();
@@ -2090,21 +2087,21 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
.buildException();
}
- HTableDescriptor newTableDescriptor = null;
- HTableDescriptor origTableDescriptor = null;
+ TableDescriptorBuilder newTableDescriptorBuilder = null;
+ TableDescriptor origTableDescriptor = null;
if (!allFamiliesProps.isEmpty() || !tableProps.isEmpty()) {
byte[] tableNameBytes = Bytes.toBytes(table.getPhysicalName().getString());
- HTableDescriptor existingTableDescriptor = origTableDescriptor = getTableDescriptor(tableNameBytes);
- newTableDescriptor = new HTableDescriptor(existingTableDescriptor);
+ TableDescriptor existingTableDescriptor = origTableDescriptor = this.getTableDescriptor(tableNameBytes);
+ newTableDescriptorBuilder = TableDescriptorBuilder.newBuilder(existingTableDescriptor);
if (!tableProps.isEmpty()) {
// add all the table properties to the existing table descriptor
for (Entry<String, Object> entry : tableProps.entrySet()) {
- newTableDescriptor.setValue(entry.getKey(), entry.getValue() != null ? entry.getValue().toString() : null);
+ newTableDescriptorBuilder.setValue(entry.getKey(), entry.getValue() != null ? entry.getValue().toString() : null);
}
}
if (addingColumns) {
// Make sure that all the CFs of the table have the same TTL as the empty CF.
- setTTLForNewCFs(allFamiliesProps, table, newTableDescriptor, newTTL);
+ setTTLForNewCFs(allFamiliesProps, table, newTableDescriptorBuilder, newTTL);
}
// Set TTL on all table column families, even if they're not referenced here
if (newTTL != null) {
@@ -2125,7 +2122,8 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
}
if (defaultTxMaxVersions == null) {
if (isTransactional) {
- defaultTxMaxVersions = newTableDescriptor.getFamily(SchemaUtil.getEmptyColumnFamily(table)).getMaxVersions();
+ defaultTxMaxVersions = newTableDescriptorBuilder.build()
+ .getColumnFamily(SchemaUtil.getEmptyColumnFamily(table)).getMaxVersions();
} else {
defaultTxMaxVersions =
this.getProps().getInt(
@@ -2148,8 +2146,8 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
// transitioning to become transactional or setting TTL on
// an already transactional table.
if (isOrWillBeTransactional) {
- int ttl = getTTL(table, newTableDescriptor, newTTL);
- if (ttl != HColumnDescriptor.DEFAULT_TTL) {
+ int ttl = getTTL(table, newTableDescriptorBuilder.build(), newTTL);
+ if (ttl != ColumnFamilyDescriptorBuilder.DEFAULT_TTL) {
for (Map.Entry<String, Map<String, Object>> entry : allFamiliesProps.entrySet()) {
Map<String, Object> props = entry.getValue();
if (props == null) {
@@ -2158,7 +2156,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
props.put(PhoenixTransactionContext.PROPERTY_TTL, ttl);
// Remove HBase TTL if we're not transitioning an existing table to become transactional
// or if the existing transactional table wasn't originally non transactional.
- if (!willBeTransactional && !Boolean.valueOf(newTableDescriptor.getValue(PhoenixTransactionContext.READ_NON_TX_DATA))) {
+ if (!willBeTransactional && !Boolean.valueOf(newTableDescriptorBuilder.build().getValue(PhoenixTransactionContext.READ_NON_TX_DATA))) {
props.remove(TTL);
}
}
@@ -2172,23 +2170,25 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
}
}
byte[] cf = Bytes.toBytes(entry.getKey());
- HColumnDescriptor colDescriptor = newTableDescriptor.getFamily(cf);
+ ColumnFamilyDescriptor colDescriptor = newTableDescriptorBuilder.build().getColumnFamily(cf);
if (colDescriptor == null) {
// new column family
colDescriptor = generateColumnFamilyDescriptor(new Pair<>(cf, familyProps), table.getType());
- newTableDescriptor.addFamily(colDescriptor);
+ newTableDescriptorBuilder.addColumnFamily(colDescriptor);
} else {
- modifyColumnFamilyDescriptor(colDescriptor, familyProps);
+ ColumnFamilyDescriptorBuilder colDescriptorBuilder = ColumnFamilyDescriptorBuilder.newBuilder(colDescriptor);
+ modifyColumnFamilyDescriptor(colDescriptorBuilder, familyProps);
+ colDescriptor = colDescriptorBuilder.build();
}
if (isOrWillBeTransactional) {
checkTransactionalVersionsValue(colDescriptor);
}
}
}
- return new Pair<>(origTableDescriptor, newTableDescriptor);
+ return new Pair<>(origTableDescriptor, newTableDescriptorBuilder.build());
}
- private void checkTransactionalVersionsValue(HColumnDescriptor colDescriptor) throws SQLException {
+ private void checkTransactionalVersionsValue(ColumnFamilyDescriptor colDescriptor) throws SQLException {
int maxVersions = colDescriptor.getMaxVersions();
if (maxVersions <= 1) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.TX_MAX_VERSIONS_MUST_BE_GREATER_THAN_ONE)
@@ -2212,17 +2212,17 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
return cfNames;
}
- private static int getTTL(PTable table, HTableDescriptor tableDesc, Integer newTTL) throws SQLException {
+ private static int getTTL(PTable table, TableDescriptor tableDesc, Integer newTTL) throws SQLException {
// If we're setting TTL now, then use that value. Otherwise, use empty column family value
int ttl = newTTL != null ? newTTL
- : tableDesc.getFamily(SchemaUtil.getEmptyColumnFamily(table)).getTimeToLive();
+ : tableDesc.getColumnFamily(SchemaUtil.getEmptyColumnFamily(table)).getTimeToLive();
return ttl;
}
private static void setTTLForNewCFs(Map<String, Map<String, Object>> familyProps, PTable table,
- HTableDescriptor tableDesc, Integer newTTL) throws SQLException {
+ TableDescriptorBuilder tableDescBuilder, Integer newTTL) throws SQLException {
if (!familyProps.isEmpty()) {
- int ttl = getTTL(table, tableDesc, newTTL);
+ int ttl = getTTL(table, tableDescBuilder.build(), newTTL);
for (Map.Entry<String, Map<String, Object>> entry : familyProps.entrySet()) {
Map<String, Object> props = entry.getValue();
if (props == null) {
@@ -2538,11 +2538,11 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
logger.debug("System mutex table already appears to exist, not creating it");
return;
}
- HTableDescriptor tableDesc = new HTableDescriptor(mutexTableName);
- HColumnDescriptor columnDesc = new HColumnDescriptor(
- PhoenixDatabaseMetaData.SYSTEM_MUTEX_FAMILY_NAME_BYTES);
- columnDesc.setTimeToLive(TTL_FOR_MUTEX); // Let mutex expire after some time
- tableDesc.addFamily(columnDesc);
+ TableDescriptor tableDesc = TableDescriptorBuilder.newBuilder(mutexTableName)
+ .addColumnFamily(ColumnFamilyDescriptorBuilder
+ .newBuilder(PhoenixDatabaseMetaData.SYSTEM_MUTEX_FAMILY_NAME_BYTES)
+ .setTimeToLive(TTL_FOR_MUTEX).build())
+ .build();
admin.createTable(tableDesc);
try (Table sysMutexTable = getTable(mutexTableName.getName())) {
byte[] mutexRowKey = SchemaUtil.getTableKey(null, PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA,
@@ -2557,7 +2557,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
}
List<TableName> getSystemTableNames(Admin admin) throws IOException {
- return Lists.newArrayList(admin.listTableNames(QueryConstants.SYSTEM_SCHEMA_NAME + "\\..*")); // TODO: replace to pattern
+ return Lists.newArrayList(admin.listTableNames(Pattern.compile(QueryConstants.SYSTEM_SCHEMA_NAME + "\\..*"))); // TODO: replace to pattern
}
private void createOtherSystemTables(PhoenixConnection metaConnection, Admin hbaseAdmin) throws SQLException, IOException {
@@ -2632,19 +2632,21 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
columnsToAdd = addColumn(columnsToAdd, PhoenixDatabaseMetaData.STORE_NULLS
+ " " + PBoolean.INSTANCE.getSqlTypeName());
try (Admin admin = getAdmin()) {
- HTableDescriptor[] localIndexTables = admin
- .listTables(MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + ".*");
- for (HTableDescriptor table : localIndexTables) {
+ List<TableDescriptor> localIndexTables =
+ admin.listTableDescriptors(Pattern
+ .compile(MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX + ".*"));
+ for (TableDescriptor table : localIndexTables) {
if (table.getValue(MetaDataUtil.PARENT_TABLE_KEY) == null
&& table.getValue(MetaDataUtil.IS_LOCAL_INDEX_TABLE_PROP_NAME) != null) {
- table.setValue(MetaDataUtil.PARENT_TABLE_KEY,
- MetaDataUtil.getLocalIndexUserTableName(table.getNameAsString()));
+
+ table=TableDescriptorBuilder.newBuilder(table).setValue(Bytes.toBytes(MetaDataUtil.PARENT_TABLE_KEY),
+ Bytes.toBytes(MetaDataUtil.getLocalIndexUserTableName(table.getTableName().getNameAsString()))).build();
// Explicitly disable, modify and enable the table to ensure
// co-location of data and index regions. If we just modify the
// table descriptor when online schema change enabled may reopen
// the region in same region server instead of following data region.
admin.disableTable(table.getTableName());
- admin.modifyTable(table.getTableName(), table);
+ admin.modifyTable(table);
admin.enableTable(table.getTableName());
}
}
@@ -3680,7 +3682,6 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
incrementSequenceValues(sequenceAllocations, timestamp, values, exceptions, Sequence.ValueOp.INCREMENT_SEQUENCE);
}
- @SuppressWarnings("deprecation")
private void incrementSequenceValues(List<SequenceAllocation> sequenceAllocations, long timestamp, long[] values, SQLException[] exceptions, Sequence.ValueOp op) throws SQLException {
List<Sequence> sequences = Lists.newArrayListWithExpectedSize(sequenceAllocations.size());
for (SequenceAllocation sequenceAllocation : sequenceAllocations) {
@@ -3808,7 +3809,6 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
}
}
- @SuppressWarnings("deprecation")
@Override
public void returnSequences(List<SequenceKey> keys, long timestamp, SQLException[] exceptions) throws SQLException {
List<Sequence> sequences = Lists.newArrayListWithExpectedSize(keys.size());
@@ -3885,7 +3885,6 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
// Take no locks, as this only gets run when there are no open connections
// so there's no danger of contention.
- @SuppressWarnings("deprecation")
private void returnAllSequences(ConcurrentMap<SequenceKey,Sequence> sequenceMap) throws SQLException {
List<Append> mutations = Lists.newArrayListWithExpectedSize(sequenceMap.size());
for (Sequence sequence : sequenceMap.values()) {
[5/8] phoenix git commit: PHOENIX-4403 Workaround Tephra issues and
fix all left over compilation issues in phoenix-core
Posted by an...@apache.org.
http://git-wip-us.apache.org/repos/asf/phoenix/blob/488b5281/phoenix-core/src/main/java/org/apache/phoenix/transaction/TransactionAwareHTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/TransactionAwareHTable.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TransactionAwareHTable.java
new file mode 100644
index 0000000..62d3286
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TransactionAwareHTable.java
@@ -0,0 +1,680 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.transaction;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.NavigableMap;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.CompareOperator;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Append;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Durability;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Increment;
+import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.OperationWithAttributes;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Row;
+import org.apache.hadoop.hbase.client.RowMutations;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.coprocessor.Batch;
+import org.apache.hadoop.hbase.client.coprocessor.Batch.Callback;
+import org.apache.hadoop.hbase.filter.CompareFilter;
+import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
+import org.apache.tephra.AbstractTransactionAwareTable;
+import org.apache.tephra.Transaction;
+import org.apache.tephra.TransactionAware;
+import org.apache.tephra.TxConstants;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.protobuf.Descriptors.MethodDescriptor;
+import com.google.protobuf.Message;
+import com.google.protobuf.Service;
+import com.google.protobuf.ServiceException;
+
+/**
+ * A Transaction Aware HTable implementation for HBase 1.1. Operations are committed as usual, but upon a failed or
+ * aborted transaction, they are rolled back to the state before the transaction was started.
+ */
+public class TransactionAwareHTable extends AbstractTransactionAwareTable implements Table, TransactionAware {
+
+ private static final Logger LOG = LoggerFactory.getLogger(TransactionAwareHTable.class);
+ private final Table hTable;
+
+ /**
+ * Create a transactional aware instance of the passed HTable
+ *
+ * @param hTable
+ * underlying HBase table to use
+ */
+ public TransactionAwareHTable(Table hTable) {
+ this(hTable, false);
+ }
+
+ /**
+ * Create a transactional aware instance of the passed HTable
+ *
+ * @param hTable
+ * underlying HBase table to use
+ * @param conflictLevel
+ * level of conflict detection to perform (defaults to {@code COLUMN})
+ */
+ public TransactionAwareHTable(Table hTable, TxConstants.ConflictDetection conflictLevel) {
+ this(hTable, conflictLevel, false);
+ }
+
+ /**
+ * Create a transactional aware instance of the passed HTable, with the option of allowing non-transactional
+ * operations.
+ *
+ * @param hTable
+ * underlying HBase table to use
+ * @param allowNonTransactional
+ * if true, additional operations (checkAndPut, increment, checkAndDelete) will be available, though
+ * non-transactional
+ */
+ public TransactionAwareHTable(Table hTable, boolean allowNonTransactional) {
+ this(hTable, TxConstants.ConflictDetection.COLUMN, allowNonTransactional);
+ }
+
+ /**
+ * Create a transactional aware instance of the passed HTable, with the option of allowing non-transactional
+ * operations.
+ *
+ * @param hTable
+ * underlying HBase table to use
+ * @param conflictLevel
+ * level of conflict detection to perform (defaults to {@code COLUMN})
+ * @param allowNonTransactional
+ * if true, additional operations (checkAndPut, increment, checkAndDelete) will be available, though
+ * non-transactional
+ */
+ public TransactionAwareHTable(Table hTable, TxConstants.ConflictDetection conflictLevel,
+ boolean allowNonTransactional) {
+ super(conflictLevel, allowNonTransactional);
+ this.hTable = hTable;
+ }
+
+ /* AbstractTransactionAwareTable implementation */
+
+ @Override
+ protected byte[] getTableKey() {
+ return hTable.getName().getName();
+ }
+
+ @Override
+ protected boolean doCommit() throws IOException {
+ return true;
+ }
+
+ @Override
+ protected boolean doRollback() throws Exception {
+ try {
+ // pre-size arraylist of deletes
+ int size = 0;
+ for (Set<ActionChange> cs : changeSets.values()) {
+ size += cs.size();
+ }
+ List<Delete> rollbackDeletes = new ArrayList<>(size);
+ for (Map.Entry<Long, Set<ActionChange>> entry : changeSets.entrySet()) {
+ long transactionTimestamp = entry.getKey();
+ for (ActionChange change : entry.getValue()) {
+ byte[] row = change.getRow();
+ byte[] family = change.getFamily();
+ byte[] qualifier = change.getQualifier();
+ Delete rollbackDelete = new Delete(row);
+ makeRollbackOperation(rollbackDelete);
+ switch (conflictLevel) {
+ case ROW:
+ case NONE:
+ // issue family delete for the tx write pointer
+ rollbackDelete.addFamilyVersion(change.getFamily(), transactionTimestamp);
+ break;
+ case COLUMN:
+ if (family != null && qualifier == null) {
+ rollbackDelete.addFamilyVersion(family, transactionTimestamp);
+ } else if (family != null && qualifier != null) {
+ rollbackDelete.addColumn(family, qualifier, transactionTimestamp);
+ }
+ break;
+ default:
+ throw new IllegalStateException("Unknown conflict detection level: " + conflictLevel);
+ }
+ rollbackDeletes.add(rollbackDelete);
+ }
+ }
+ hTable.delete(rollbackDeletes);
+ return true;
+ } finally {
+ tx = null;
+ changeSets.clear();
+ }
+ }
+
+ /* HTableInterface implementation */
+
+
+ @Override
+ public TableName getName() {
+ return hTable.getName();
+ }
+
+ @Override
+ public Configuration getConfiguration() {
+ return hTable.getConfiguration();
+ }
+
+ @Override
+ public HTableDescriptor getTableDescriptor() throws IOException {
+ return hTable.getTableDescriptor();
+ }
+
+ @Override
+ public boolean exists(Get get) throws IOException {
+ if (tx == null) { throw new IOException("Transaction not started"); }
+ return hTable.exists(transactionalizeAction(get));
+ }
+
+
+ @Override
+ public void batch(List<? extends Row> actions, Object[] results) throws IOException, InterruptedException {
+ if (tx == null) { throw new IOException("Transaction not started"); }
+ hTable.batch(transactionalizeActions(actions), results);
+ }
+
+ @Override
+ public <R> void batchCallback(List<? extends Row> actions, Object[] results, Batch.Callback<R> callback)
+ throws IOException, InterruptedException {
+ if (tx == null) { throw new IOException("Transaction not started"); }
+ hTable.batchCallback(transactionalizeActions(actions), results, callback);
+ }
+
+
+
+ @Override
+ public Result get(Get get) throws IOException {
+ if (tx == null) { throw new IOException("Transaction not started"); }
+ return hTable.get(transactionalizeAction(get));
+ }
+
+ @Override
+ public Result[] get(List<Get> gets) throws IOException {
+ if (tx == null) { throw new IOException("Transaction not started"); }
+ ArrayList<Get> transactionalizedGets = new ArrayList<>();
+ for (Get get : gets) {
+ transactionalizedGets.add(transactionalizeAction(get));
+ }
+ return hTable.get(transactionalizedGets);
+ }
+
+ @Override
+ public ResultScanner getScanner(Scan scan) throws IOException {
+ if (tx == null) { throw new IOException("Transaction not started"); }
+ return hTable.getScanner(transactionalizeAction(scan));
+ }
+
+ @Override
+ public ResultScanner getScanner(byte[] family) throws IOException {
+ if (tx == null) { throw new IOException("Transaction not started"); }
+ Scan scan = new Scan();
+ scan.addFamily(family);
+ return hTable.getScanner(transactionalizeAction(scan));
+ }
+
+ @Override
+ public ResultScanner getScanner(byte[] family, byte[] qualifier) throws IOException {
+ if (tx == null) { throw new IOException("Transaction not started"); }
+ Scan scan = new Scan();
+ scan.addColumn(family, qualifier);
+ return hTable.getScanner(transactionalizeAction(scan));
+ }
+
+ @Override
+ public void put(Put put) throws IOException {
+ if (tx == null) { throw new IOException("Transaction not started"); }
+ Put txPut = transactionalizeAction(put);
+ hTable.put(txPut);
+ }
+
+ @Override
+ public void put(List<Put> puts) throws IOException {
+ if (tx == null) { throw new IOException("Transaction not started"); }
+ List<Put> transactionalizedPuts = new ArrayList<>(puts.size());
+ for (Put put : puts) {
+ Put txPut = transactionalizeAction(put);
+ transactionalizedPuts.add(txPut);
+ }
+ hTable.put(transactionalizedPuts);
+ }
+
+ @Override
+ public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, byte[] value, Put put) throws IOException {
+ if (allowNonTransactional) {
+ return hTable.checkAndPut(row, family, qualifier, value, put);
+ } else {
+ throw new UnsupportedOperationException("Operation is not supported transactionally");
+ }
+ }
+
+ @Override
+ public void delete(Delete delete) throws IOException {
+ if (tx == null) { throw new IOException("Transaction not started"); }
+ hTable.delete(transactionalizeAction(delete));
+ }
+
+ @Override
+ public void delete(List<Delete> deletes) throws IOException {
+ if (tx == null) { throw new IOException("Transaction not started"); }
+ List<Delete> transactionalizedDeletes = new ArrayList<>(deletes.size());
+ for (Delete delete : deletes) {
+ Delete txDelete = transactionalizeAction(delete);
+ transactionalizedDeletes.add(txDelete);
+ }
+ hTable.delete(transactionalizedDeletes);
+ }
+
+ @Override
+ public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, byte[] value, Delete delete)
+ throws IOException {
+ if (allowNonTransactional) {
+ return hTable.checkAndDelete(row, family, qualifier, value, delete);
+ } else {
+ throw new UnsupportedOperationException("Operation is not supported transactionally");
+ }
+ }
+
+ @Override
+ public boolean checkAndDelete(byte[] bytes, byte[] bytes1, byte[] bytes2, CompareFilter.CompareOp compareOp,
+ byte[] bytes3, Delete delete) throws IOException {
+ if (allowNonTransactional) {
+ return hTable.checkAndDelete(bytes, bytes1, bytes2, compareOp, bytes3, delete);
+ } else {
+ throw new UnsupportedOperationException("Operation is not supported transactionally");
+ }
+ }
+
+ @Override
+ public boolean checkAndPut(byte[] bytes, byte[] bytes1, byte[] bytes2, CompareFilter.CompareOp compareOp,
+ byte[] bytes3, Put put) throws IOException {
+ if (allowNonTransactional) {
+ return hTable.checkAndPut(bytes, bytes1, bytes2, compareOp, bytes3, put);
+ } else {
+ throw new UnsupportedOperationException("Operation is not supported transactionally");
+ }
+ }
+
+ @Override
+ public boolean[] existsAll(List<Get> gets) throws IOException {
+ if (tx == null) { throw new IOException("Transaction not started"); }
+ List<Get> transactionalizedGets = new ArrayList<>(gets.size());
+ for (Get get : gets) {
+ transactionalizedGets.add(transactionalizeAction(get));
+ }
+ return hTable.existsAll(transactionalizedGets);
+ }
+
+ @Override
+ public boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, CompareFilter.CompareOp compareOp,
+ byte[] value, RowMutations rowMutations) throws IOException {
+ if (allowNonTransactional) { return hTable.checkAndMutate(row, family, qualifier, compareOp, value,
+ rowMutations); }
+
+ throw new UnsupportedOperationException("checkAndMutate operation is not supported transactionally");
+ }
+
+ @Override
+ public void mutateRow(RowMutations rm) throws IOException {
+ if (tx == null) { throw new IOException("Transaction not started"); }
+ RowMutations transactionalMutations = new RowMutations(rm.getRow());
+ for (Mutation mutation : rm.getMutations()) {
+ if (mutation instanceof Put) {
+ transactionalMutations.add(transactionalizeAction((Put)mutation));
+ } else if (mutation instanceof Delete) {
+ transactionalMutations.add(transactionalizeAction((Delete)mutation));
+ }
+ }
+ hTable.mutateRow(transactionalMutations);
+ }
+
+ @Override
+ public Result append(Append append) throws IOException {
+ if (allowNonTransactional) {
+ return hTable.append(append);
+ } else {
+ throw new UnsupportedOperationException("Operation is not supported transactionally");
+ }
+ }
+
+ @Override
+ public Result increment(Increment increment) throws IOException {
+ if (allowNonTransactional) {
+ return hTable.increment(increment);
+ } else {
+ throw new UnsupportedOperationException("Operation is not supported transactionally");
+ }
+ }
+
+ @Override
+ public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount) throws IOException {
+ if (allowNonTransactional) {
+ return hTable.incrementColumnValue(row, family, qualifier, amount);
+ } else {
+ throw new UnsupportedOperationException("Operation is not supported transactionally");
+ }
+ }
+
+ @Override
+ public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount, Durability durability)
+ throws IOException {
+ if (allowNonTransactional) {
+ return hTable.incrementColumnValue(row, family, qualifier, amount, durability);
+ } else {
+ throw new UnsupportedOperationException("Operation is not supported transactionally");
+ }
+ }
+
+ @Override
+ public void close() throws IOException {
+ hTable.close();
+ }
+
+ @Override
+ public CoprocessorRpcChannel coprocessorService(byte[] row) {
+ return hTable.coprocessorService(row);
+ }
+
+ @Override
+ public <T extends Service, R> Map<byte[], R> coprocessorService(Class<T> service, byte[] startKey, byte[] endKey,
+ Batch.Call<T, R> callable) throws ServiceException, Throwable {
+ return hTable.coprocessorService(service, startKey, endKey, callable);
+ }
+
+ @Override
+ public <T extends Service, R> void coprocessorService(Class<T> service, byte[] startKey, byte[] endKey,
+ Batch.Call<T, R> callable, Batch.Callback<R> callback) throws ServiceException, Throwable {
+ hTable.coprocessorService(service, startKey, endKey, callable, callback);
+ }
+
+ @Override
+ public <R extends Message> Map<byte[], R> batchCoprocessorService(MethodDescriptor methodDescriptor,
+ Message request, byte[] startKey, byte[] endKey, R responsePrototype) throws ServiceException, Throwable {
+ return hTable.batchCoprocessorService(methodDescriptor, request, startKey, endKey, responsePrototype);
+ }
+
+ @Override
+ public <R extends Message> void batchCoprocessorService(MethodDescriptor methodDescriptor, Message request,
+ byte[] startKey, byte[] endKey, R responsePrototype, Callback<R> callback)
+ throws ServiceException, Throwable {
+ hTable.batchCoprocessorService(methodDescriptor, request, startKey, endKey, responsePrototype, callback);
+ }
+
+ // Helpers to get copies of objects with the timestamp set to the current transaction timestamp.
+
+ private Get transactionalizeAction(Get get) throws IOException {
+ addToOperation(get, tx);
+ return get;
+ }
+
+ private Scan transactionalizeAction(Scan scan) throws IOException {
+ addToOperation(scan, tx);
+ return scan;
+ }
+
+ private Put transactionalizeAction(Put put) throws IOException {
+ Put txPut = new Put(put.getRow(), tx.getWritePointer());
+ Set<Map.Entry<byte[], List<Cell>>> familyMap = put.getFamilyCellMap().entrySet();
+ if (!familyMap.isEmpty()) {
+ for (Map.Entry<byte[], List<Cell>> family : familyMap) {
+ List<Cell> familyValues = family.getValue();
+ if (!familyValues.isEmpty()) {
+ for (Cell value : familyValues) {
+ txPut.addColumn(CellUtil.cloneFamily(value), CellUtil.cloneQualifier(value), tx.getWritePointer(), CellUtil.cloneValue(value));
+ addToChangeSet(txPut.getRow(), CellUtil.cloneFamily(value), CellUtil.cloneQualifier(value));
+ }
+ }
+ }
+ }
+ for (Map.Entry<String, byte[]> entry : put.getAttributesMap().entrySet()) {
+ txPut.setAttribute(entry.getKey(), entry.getValue());
+ }
+ txPut.setDurability(put.getDurability());
+ addToOperation(txPut, tx);
+ return txPut;
+ }
+
+ private Delete transactionalizeAction(Delete delete) throws IOException {
+ long transactionTimestamp = tx.getWritePointer();
+
+ byte[] deleteRow = delete.getRow();
+ Delete txDelete = new Delete(deleteRow, transactionTimestamp);
+
+ Map<byte[], List<Cell>> familyToDelete = delete.getFamilyCellMap();
+ if (familyToDelete.isEmpty()) {
+ // perform a row delete if we are using row-level conflict detection
+ if (conflictLevel == TxConstants.ConflictDetection.ROW
+ || conflictLevel == TxConstants.ConflictDetection.NONE) {
+ // Row delete leaves delete markers in all column families of the table
+ // Therefore get all the column families of the hTable from the HTableDescriptor and add them to the
+ // changeSet
+ for (HColumnDescriptor columnDescriptor : hTable.getTableDescriptor().getColumnFamilies()) {
+ // no need to identify individual columns deleted
+ addToChangeSet(deleteRow, columnDescriptor.getName(), null);
+ }
+ } else {
+ Result result = get(new Get(delete.getRow()));
+ // Delete everything
+ NavigableMap<byte[], NavigableMap<byte[], byte[]>> resultMap = result.getNoVersionMap();
+ for (Map.Entry<byte[], NavigableMap<byte[], byte[]>> familyEntry : resultMap.entrySet()) {
+ NavigableMap<byte[], byte[]> familyColumns = result.getFamilyMap(familyEntry.getKey());
+ for (Map.Entry<byte[], byte[]> column : familyColumns.entrySet()) {
+ txDelete.addColumns(familyEntry.getKey(), column.getKey(), transactionTimestamp);
+ addToChangeSet(deleteRow, familyEntry.getKey(), column.getKey());
+ }
+ }
+ }
+ } else {
+ for (Map.Entry<byte[], List<Cell>> familyEntry : familyToDelete.entrySet()) {
+ byte[] family = familyEntry.getKey();
+ List<Cell> entries = familyEntry.getValue();
+ boolean isFamilyDelete = false;
+ if (entries.size() == 1) {
+ Cell cell = entries.get(0);
+ isFamilyDelete = CellUtil.isDeleteFamily(cell);
+ }
+ if (isFamilyDelete) {
+ if (conflictLevel == TxConstants.ConflictDetection.ROW
+ || conflictLevel == TxConstants.ConflictDetection.NONE) {
+ // no need to identify individual columns deleted
+ txDelete.addFamily(family);
+ addToChangeSet(deleteRow, family, null);
+ } else {
+ Result result = get(new Get(delete.getRow()).addFamily(family));
+ // Delete entire family
+ NavigableMap<byte[], byte[]> familyColumns = result.getFamilyMap(family);
+ for (Map.Entry<byte[], byte[]> column : familyColumns.entrySet()) {
+ txDelete.addColumns(family, column.getKey(), transactionTimestamp);
+ addToChangeSet(deleteRow, family, column.getKey());
+ }
+ }
+ } else {
+ for (Cell value : entries) {
+ txDelete.addColumn(CellUtil.cloneFamily(value), CellUtil.cloneQualifier(value), transactionTimestamp);
+ addToChangeSet(deleteRow, CellUtil.cloneFamily(value), CellUtil.cloneQualifier(value));
+ }
+ }
+ }
+ }
+ for (Map.Entry<String, byte[]> entry : delete.getAttributesMap().entrySet()) {
+ txDelete.setAttribute(entry.getKey(), entry.getValue());
+ }
+ txDelete.setDurability(delete.getDurability());
+ addToOperation(txDelete, tx);
+ return txDelete;
+ }
+
+ private List<? extends Row> transactionalizeActions(List<? extends Row> actions) throws IOException {
+ List<Row> transactionalizedActions = new ArrayList<>(actions.size());
+ for (Row action : actions) {
+ if (action instanceof Get) {
+ transactionalizedActions.add(transactionalizeAction((Get)action));
+ } else if (action instanceof Put) {
+ transactionalizedActions.add(transactionalizeAction((Put)action));
+ } else if (action instanceof Delete) {
+ transactionalizedActions.add(transactionalizeAction((Delete)action));
+ } else {
+ transactionalizedActions.add(action);
+ }
+ }
+ return transactionalizedActions;
+ }
+
+ public void addToOperation(OperationWithAttributes op, Transaction tx) throws IOException {
+ op.setAttribute(TxConstants.TX_OPERATION_ATTRIBUTE_KEY, txCodec.encode(tx));
+ }
+
+ protected void makeRollbackOperation(Delete delete) {
+ delete.setAttribute(TxConstants.TX_ROLLBACK_ATTRIBUTE_KEY, new byte[0]);
+ }
+
+ @Override
+ public TableDescriptor getDescriptor() throws IOException {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ @Override
+ public boolean[] exists(List<Get> gets) throws IOException {
+ if (tx == null) { throw new IOException("Transaction not started"); }
+ List<Get> transactionalizedGets = new ArrayList<>(gets.size());
+ for (Get get : gets) {
+ transactionalizedGets.add(transactionalizeAction(get));
+ }
+ return hTable.exists(transactionalizedGets);
+ }
+
+ @Override
+ public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, CompareOperator op, byte[] value, Put put)
+ throws IOException {
+ if(allowNonTransactional){
+ return hTable.checkAndPut(row, family, qualifier, value, put);
+ }else{
+ throw new UnsupportedOperationException("Operation is not supported transactionally");
+ }
+
+ }
+
+ @Override
+ public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, CompareOperator op, byte[] value,
+ Delete delete) throws IOException {
+ if(allowNonTransactional){
+ return hTable.checkAndDelete(row, family, qualifier, op, value, delete);
+ }else{
+ throw new UnsupportedOperationException("Operation is not supported transactionally");
+ }
+ }
+
+ @Override
+ public boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, CompareOperator op, byte[] value,
+ RowMutations mutation) throws IOException {
+ if(allowNonTransactional){
+ return hTable.checkAndMutate(row, family, qualifier, op, value, mutation);
+ }else{
+ throw new UnsupportedOperationException("Operation is not supported transactionally");
+ }
+ }
+
+ @Override
+ public long getRpcTimeout(TimeUnit unit) {
+ return hTable.getRpcTimeout(unit);
+ }
+
+ @Override
+ public int getRpcTimeout() {
+ return hTable.getRpcTimeout();
+ }
+
+ @Override
+ public void setRpcTimeout(int rpcTimeout) {
+ hTable.setRpcTimeout(rpcTimeout);
+
+ }
+
+ @Override
+ public long getReadRpcTimeout(TimeUnit unit) {
+ return hTable.getReadRpcTimeout(unit);
+ }
+
+ @Override
+ public int getReadRpcTimeout() {
+ return hTable.getReadRpcTimeout();
+ }
+
+ @Override
+ public void setReadRpcTimeout(int readRpcTimeout) {
+ hTable.setReadRpcTimeout(readRpcTimeout);
+
+ }
+
+ @Override
+ public long getWriteRpcTimeout(TimeUnit unit) {
+ return hTable.getWriteRpcTimeout(unit);
+ }
+
+ @Override
+ public int getWriteRpcTimeout() {
+ return hTable.getWriteRpcTimeout();
+ }
+
+ @Override
+ public void setWriteRpcTimeout(int writeRpcTimeout) {
+ hTable.setWriteRpcTimeout(writeRpcTimeout);
+
+ }
+
+ @Override
+ public long getOperationTimeout(TimeUnit unit) {
+ return hTable.getOperationTimeout(unit);
+ }
+
+ @Override
+ public int getOperationTimeout() {
+ return hTable.getOperationTimeout();
+ }
+
+ @Override
+ public void setOperationTimeout(int operationTimeout) {
+ hTable.setOperationTimeout(operationTimeout);;
+ }
+}
+
http://git-wip-us.apache.org/repos/asf/phoenix/blob/488b5281/phoenix-core/src/main/java/org/apache/phoenix/util/ZKBasedMasterElectionUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ZKBasedMasterElectionUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/ZKBasedMasterElectionUtil.java
index 69ef0b5..047dcdb 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/ZKBasedMasterElectionUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ZKBasedMasterElectionUtil.java
@@ -25,7 +25,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
-import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.ZooDefs.Ids;
@@ -34,7 +34,7 @@ public class ZKBasedMasterElectionUtil {
private static final Log LOG = LogFactory.getLog(ZKBasedMasterElectionUtil.class);
- public static boolean acquireLock(ZooKeeperWatcher zooKeeperWatcher, String parentNode,
+ public static boolean acquireLock(ZKWatcher zooKeeperWatcher, String parentNode,
String lockName) throws KeeperException, InterruptedException {
// Create the parent node as Persistent
LOG.info("Creating the parent lock node:" + parentNode);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/488b5281/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 47220db..e96eb7e 100644
--- a/pom.xml
+++ b/pom.xml
@@ -66,7 +66,7 @@
<top.dir>${project.basedir}</top.dir>
<!-- Hadoop Versions -->
- <hbase.version>2.0.0-alpha4</hbase.version>
+ <hbase.version>2.0.0-beta-1-SNAPSHOT</hbase.version>
<hadoop-two.version>2.7.1</hadoop-two.version>
<!-- Dependency versions -->
@@ -100,7 +100,7 @@
<joni.version>2.1.2</joni.version>
<avatica.version>1.10.0</avatica.version>
<jettyVersion>8.1.7.v20120910</jettyVersion>
- <tephra.version>0.13.0-incubating</tephra.version>
+ <tephra.version>0.14.0-incubating-SNAPSHOT</tephra.version>
<spark.version>2.0.2</spark.version>
<scala.version>2.11.8</scala.version>
<scala.binary.version>2.11</scala.binary.version>
[8/8] phoenix git commit: PHOENIX-4404 Fix all unit tests cases(not
IT)
Posted by an...@apache.org.
PHOENIX-4404 Fix all unit tests cases(not IT)
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/37200681
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/37200681
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/37200681
Branch: refs/heads/5.x-HBase-2.0
Commit: 372006816245e683746787c1d1a7d6719c232ed3
Parents: 5b9a07d
Author: Ankit Singhal <an...@gmail.com>
Authored: Mon Nov 27 18:29:01 2017 +0530
Committer: Ankit Singhal <an...@gmail.com>
Committed: Mon Nov 27 18:29:01 2017 +0530
----------------------------------------------------------------------
.../phoenix/end2end/AggregateQueryIT.java | 2 -
.../end2end/ExplainPlanWithStatsEnabledIT.java | 7 +--
.../phoenix/end2end/FlappingLocalIndexIT.java | 3 +-
.../end2end/TableSnapshotReadsMapReduceIT.java | 4 +-
.../phoenix/end2end/index/ImmutableIndexIT.java | 11 ++--
.../end2end/index/MutableIndexFailureIT.java | 4 +-
.../end2end/index/PartialIndexRebuilderIT.java | 4 +-
.../end2end/index/txn/TxWriteFailureIT.java | 6 +--
.../apache/phoenix/execute/PartialCommitIT.java | 3 +-
.../SystemCatalogWALEntryFilterIT.java | 20 ++++---
.../TestPhoenixIndexRpcSchedulerFactory.java | 23 ++++++--
.../hbase/index/covered/data/IndexMemStore.java | 7 ++-
.../TrackingParallelWriterIndexCommitter.java | 5 +-
.../phoenix/util/PhoenixKeyValueUtil.java | 5 +-
.../phoenix/execute/UnnestArrayPlanTest.java | 3 +-
.../CoveredColumnIndexSpecifierBuilder.java | 7 ++-
.../index/covered/data/TestIndexMemStore.java | 8 ++-
.../hbase/index/write/TestIndexWriter.java | 56 ++++++++++----------
.../index/write/TestParalleIndexWriter.java | 11 ++--
.../write/TestParalleWriterIndexCommitter.java | 31 +++++------
.../index/write/TestWALRecoveryCaching.java | 6 ++-
.../query/ConnectionQueryServicesImplTest.java | 2 +-
.../schema/stats/StatisticsScannerTest.java | 37 ++++++++-----
23 files changed, 150 insertions(+), 115 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/37200681/phoenix-core/src/it/java/org/apache/phoenix/end2end/AggregateQueryIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AggregateQueryIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AggregateQueryIT.java
index cb892c6..bcfb922 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AggregateQueryIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AggregateQueryIT.java
@@ -41,7 +41,6 @@ import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.RegionLocator;
-import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.util.ByteUtil;
@@ -105,7 +104,6 @@ public class AggregateQueryIT extends BaseQueryIT {
TableName tn =TableName.valueOf(tableName);
admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();
- Table htable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(tableNameBytes);
Configuration configuration = conn.unwrap(PhoenixConnection.class).getQueryServices().getConfiguration();
org.apache.hadoop.hbase.client.Connection hbaseConn = ConnectionFactory.createConnection(configuration);
((ClusterConnection)hbaseConn).clearRegionCache(TableName.valueOf(tableName));
http://git-wip-us.apache.org/repos/asf/phoenix/blob/37200681/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExplainPlanWithStatsEnabledIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExplainPlanWithStatsEnabledIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExplainPlanWithStatsEnabledIT.java
index bfc6819..74f4f34 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExplainPlanWithStatsEnabledIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExplainPlanWithStatsEnabledIT.java
@@ -30,7 +30,8 @@ import java.sql.SQLException;
import java.util.Collections;
import java.util.List;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.jdbc.PhoenixResultSet;
@@ -867,10 +868,10 @@ public class ExplainPlanWithStatsEnabledIT extends ParallelStatsEnabledIT {
createTestTable(getUrl(), ddl, null, null);
try (Connection conn = DriverManager.getConnection(getUrl())) {
// split such that some data for view2 resides on region of view1
- try (HBaseAdmin admin =
+ try (Admin admin =
conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
byte[] splitKey = Bytes.toBytes("00Dabcdetenant200B");
- admin.split(Bytes.toBytes(multiTenantTable), splitKey);
+ admin.split(TableName.valueOf(multiTenantTable), splitKey);
}
/**
http://git-wip-us.apache.org/repos/asf/phoenix/blob/37200681/phoenix-core/src/it/java/org/apache/phoenix/end2end/FlappingLocalIndexIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/FlappingLocalIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/FlappingLocalIndexIT.java
index a5aa27e..6ebf344 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/FlappingLocalIndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/FlappingLocalIndexIT.java
@@ -29,13 +29,14 @@ import java.util.Properties;
import java.util.concurrent.CountDownLatch;
import org.apache.hadoop.hbase.DoNotRetryIOException;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.coprocessor.RegionObserver;
import org.apache.hadoop.hbase.util.Pair;
http://git-wip-us.apache.org/repos/asf/phoenix/blob/37200681/phoenix-core/src/it/java/org/apache/phoenix/end2end/TableSnapshotReadsMapReduceIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TableSnapshotReadsMapReduceIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TableSnapshotReadsMapReduceIT.java
index 612bf3b..fcf89a0 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TableSnapshotReadsMapReduceIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TableSnapshotReadsMapReduceIT.java
@@ -38,8 +38,8 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.SnapshotDescription;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
@@ -200,7 +200,7 @@ public class TableSnapshotReadsMapReduceIT extends BaseUniqueNamesOwnClusterIT {
// call flush to create new files in the region
admin.flush(TableName.valueOf(tableName));
- List<HBaseProtos.SnapshotDescription> snapshots = admin.listSnapshots();
+ List<SnapshotDescription> snapshots = admin.listSnapshots();
Assert.assertEquals(tableName, snapshots.get(0).getTable());
// upsert data after snapshot
http://git-wip-us.apache.org/repos/asf/phoenix/blob/37200681/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ImmutableIndexIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ImmutableIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ImmutableIndexIT.java
index 9b06955..8894b84 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ImmutableIndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ImmutableIndexIT.java
@@ -40,13 +40,10 @@ import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.coprocessor.ObserverContext;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver;
-import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.phoenix.end2end.BaseUniqueNamesOwnClusterIT;
@@ -298,11 +295,9 @@ public class ImmutableIndexIT extends BaseUniqueNamesOwnClusterIT {
// used to create an index while a batch of rows are being written
public static class CreateIndexRegionObserver extends SimpleRegionObserver {
@Override
- public void postPut(ObserverContext<RegionCoprocessorEnvironment> c,
- Put put, WALEdit edit, final Durability durability)
- throws HBaseIOException {
- String tableName = c.getEnvironment().getRegion().getRegionInfo()
- .getTable().getNameAsString();
+ public void postPut(org.apache.hadoop.hbase.coprocessor.ObserverContext<RegionCoprocessorEnvironment> c,
+ Put put, org.apache.hadoop.hbase.wal.WALEdit edit, Durability durability) throws java.io.IOException {
+ String tableName = c.getEnvironment().getRegion().getRegionInfo().getTable().getNameAsString();
if (tableName.equalsIgnoreCase(TABLE_NAME)
// create the index after the second batch
&& Bytes.startsWith(put.getRow(), Bytes.toBytes("varchar200_upsert_select"))) {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/37200681/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
index 1f4526d..8c7fccf 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
@@ -148,10 +148,10 @@ public class MutableIndexFailureIT extends BaseTest {
NUM_SLAVES_BASE = 4;
setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()), new ReadOnlyProps(clientProps.entrySet().iterator()));
indexRebuildTaskRegionEnvironment =
- (RegionCoprocessorEnvironment) getUtility()
+ getUtility()
.getRSForFirstRegionInTable(
PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME)
- .getOnlineRegions(PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME)
+ .getRegions(PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME)
.get(0).getCoprocessorHost()
.findCoprocessorEnvironment(MetaDataRegionObserver.class.getName());
MetaDataRegionObserver.initRebuildIndexConnectionProps(
http://git-wip-us.apache.org/repos/asf/phoenix/blob/37200681/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java
index c2ae7f2..21d1ff0 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java
@@ -96,10 +96,10 @@ public class PartialIndexRebuilderIT extends BaseUniqueNamesOwnClusterIT {
serverProps.put(QueryServices.INDEX_FAILURE_HANDLING_REBUILD_OVERLAP_FORWARD_TIME_ATTRIB, Long.toString(WAIT_AFTER_DISABLED));
setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()), ReadOnlyProps.EMPTY_PROPS);
indexRebuildTaskRegionEnvironment =
- (RegionCoprocessorEnvironment) getUtility()
+ getUtility()
.getRSForFirstRegionInTable(
PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME)
- .getOnlineRegions(PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME)
+ .getRegions(PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME)
.get(0).getCoprocessorHost()
.findCoprocessorEnvironment(MetaDataRegionObserver.class.getName());
MetaDataRegionObserver.initRebuildIndexConnectionProps(
http://git-wip-us.apache.org/repos/asf/phoenix/blob/37200681/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/txn/TxWriteFailureIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/txn/TxWriteFailureIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/txn/TxWriteFailureIT.java
index ec60151..5fae9fd 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/txn/TxWriteFailureIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/txn/TxWriteFailureIT.java
@@ -33,13 +33,11 @@ import java.util.Map;
import java.util.Properties;
import org.apache.hadoop.hbase.DoNotRetryIOException;
-import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver;
-import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.end2end.BaseUniqueNamesOwnClusterIT;
import org.apache.phoenix.hbase.index.Indexer;
@@ -182,8 +180,8 @@ public class TxWriteFailureIT extends BaseUniqueNamesOwnClusterIT {
public static class FailingRegionObserver extends SimpleRegionObserver {
@Override
- public void prePut(ObserverContext<RegionCoprocessorEnvironment> c, Put put, WALEdit edit,
- final Durability durability) throws HBaseIOException {
+ public void prePut(org.apache.hadoop.hbase.coprocessor.ObserverContext<RegionCoprocessorEnvironment> c, Put put,
+ org.apache.hadoop.hbase.wal.WALEdit edit, Durability durability) throws java.io.IOException {
if (shouldFailUpsert(c, put)) {
// throwing anything other than instances of IOException result
// in this coprocessor being unloaded
http://git-wip-us.apache.org/repos/asf/phoenix/blob/37200681/phoenix-core/src/it/java/org/apache/phoenix/execute/PartialCommitIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/execute/PartialCommitIT.java b/phoenix-core/src/it/java/org/apache/phoenix/execute/PartialCommitIT.java
index 10fd7f8..2ceac55 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/execute/PartialCommitIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/execute/PartialCommitIT.java
@@ -33,7 +33,6 @@ import java.sql.SQLException;
import java.sql.Statement;
import java.util.Arrays;
import java.util.Collection;
-import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import java.util.Map;
@@ -49,8 +48,8 @@ import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver;
-import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.wal.WALEdit;
import org.apache.phoenix.end2end.BaseOwnClusterIT;
import org.apache.phoenix.hbase.index.Indexer;
import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
http://git-wip-us.apache.org/repos/asf/phoenix/blob/37200681/phoenix-core/src/it/java/org/apache/phoenix/replication/SystemCatalogWALEntryFilterIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/replication/SystemCatalogWALEntryFilterIT.java b/phoenix-core/src/it/java/org/apache/phoenix/replication/SystemCatalogWALEntryFilterIT.java
index 776e300..bb10ae0 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/replication/SystemCatalogWALEntryFilterIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/replication/SystemCatalogWALEntryFilterIT.java
@@ -17,14 +17,23 @@
*/
package org.apache.phoenix.replication;
+import java.io.IOException;
+import java.util.List;
+import java.util.Properties;
+import java.util.UUID;
+
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.*;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.wal.WAL;
+import org.apache.hadoop.hbase.wal.WALEdit;
import org.apache.hadoop.hbase.wal.WALKey;
import org.apache.phoenix.end2end.ParallelStatsDisabledIT;
import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
@@ -39,11 +48,6 @@ import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
-import java.io.IOException;
-import java.util.List;
-import java.util.Properties;
-import java.util.UUID;
-
public class SystemCatalogWALEntryFilterIT extends ParallelStatsDisabledIT {
@@ -112,7 +116,7 @@ public class SystemCatalogWALEntryFilterIT extends ParallelStatsDisabledIT {
public void testOtherTablesAutoPass() throws Exception {
//Cell is nonsense but we should auto pass because the table name's not System.Catalog
WAL.Entry entry = new WAL.Entry(new WALKey(REGION,
- TableName.valueOf(TestUtil.ENTITY_HISTORY_TABLE_NAME)), new WALEdit());
+ TableName.valueOf(TestUtil.ENTITY_HISTORY_TABLE_NAME), System.currentTimeMillis()), new WALEdit());
entry.getEdit().add(CellUtil.createCell(Bytes.toBytes("foo")));
SystemCatalogWALEntryFilter filter = new SystemCatalogWALEntryFilter();
Assert.assertEquals(1, filter.filter(entry).getEdit().size());
http://git-wip-us.apache.org/repos/asf/phoenix/blob/37200681/phoenix-core/src/it/java/org/apache/phoenix/rpc/TestPhoenixIndexRpcSchedulerFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/rpc/TestPhoenixIndexRpcSchedulerFactory.java b/phoenix-core/src/it/java/org/apache/phoenix/rpc/TestPhoenixIndexRpcSchedulerFactory.java
index fb29985..cc3c1d0 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/rpc/TestPhoenixIndexRpcSchedulerFactory.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/rpc/TestPhoenixIndexRpcSchedulerFactory.java
@@ -19,6 +19,7 @@ package org.apache.phoenix.rpc;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Abortable;
+import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.ipc.BalancedQueueRpcExecutor;
import org.apache.hadoop.hbase.ipc.PhoenixRpcScheduler;
import org.apache.hadoop.hbase.ipc.PhoenixRpcSchedulerFactory;
@@ -28,11 +29,13 @@ import org.apache.hadoop.hbase.ipc.RpcScheduler;
import org.mockito.Mockito;
public class TestPhoenixIndexRpcSchedulerFactory extends PhoenixRpcSchedulerFactory {
-
+ private static Abortable abortable = new AbortServer();
+ private static final Configuration conf = HBaseConfiguration.create();
+ private static PriorityFunction qosFunction = Mockito.mock(PriorityFunction.class);
private static RpcExecutor indexRpcExecutor = Mockito.spy(new BalancedQueueRpcExecutor("test-index-queue", 30, 1,
- 300));
+ qosFunction,conf,abortable));
private static RpcExecutor metadataRpcExecutor = Mockito.spy(new BalancedQueueRpcExecutor("test-metataqueue", 30,
- 1, 300));
+ 1, qosFunction,conf,abortable));
@Override
public RpcScheduler create(Configuration conf, PriorityFunction priorityFunction, Abortable abortable) {
@@ -47,6 +50,20 @@ public class TestPhoenixIndexRpcSchedulerFactory extends PhoenixRpcSchedulerFact
return create(configuration, priorityFunction, null);
}
+ private static class AbortServer implements Abortable {
+ private boolean aborted = false;
+
+ @Override
+ public void abort(String why, Throwable e) {
+ aborted = true;
+ }
+
+ @Override
+ public boolean isAborted() {
+ return aborted;
+ }
+ }
+
public static RpcExecutor getIndexRpcExecutor() {
return indexRpcExecutor;
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/37200681/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/IndexMemStore.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/IndexMemStore.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/IndexMemStore.java
index dfd3774..8247496 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/IndexMemStore.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/data/IndexMemStore.java
@@ -79,7 +79,12 @@ public class IndexMemStore implements KeyValueStore {
private CellComparator comparator;
public IndexMemStore() {
- this(CellComparatorImpl.COMPARATOR);
+ this(new CellComparatorImpl(){
+ @Override
+ public int compare(Cell a, Cell b) {
+ return super.compare(a, b, true);
+ }
+ });
}
/**
http://git-wip-us.apache.org/repos/asf/phoenix/blob/37200681/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java
index cf8279a..5fb8b1c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java
@@ -189,8 +189,9 @@ public class TrackingParallelWriterIndexCommitter implements IndexCommitter {
}
private void throwFailureIfDone() throws SingleIndexWriteFailureException {
- if (stopped.isStopped() || env.getConnection() == null || env.getConnection().isClosed()
- || env.getConnection().isAborted()
+ if (stopped.isStopped()
+ || (env != null && (env.getConnection() == null || env.getConnection().isClosed()
+ || env.getConnection().isAborted()))
|| Thread.currentThread().isInterrupted()) { throw new SingleIndexWriteFailureException(
"Pool closed, not attempting to write to the index!", null); }
http://git-wip-us.apache.org/repos/asf/phoenix/blob/37200681/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixKeyValueUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixKeyValueUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixKeyValueUtil.java
index e49865e..ed4104b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixKeyValueUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixKeyValueUtil.java
@@ -25,6 +25,7 @@ import java.util.Map;
import java.util.Map.Entry;
import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellBuilder;
import org.apache.hadoop.hbase.CellBuilder.DataType;
import org.apache.hadoop.hbase.CellBuilderFactory;
import org.apache.hadoop.hbase.CellBuilderType;
@@ -76,11 +77,11 @@ public class PhoenixKeyValueUtil {
public static Cell newKeyValue(byte[] key, int keyOffset, int keyLength, byte[] cf,
int cfOffset, int cfLength, byte[] cq, int cqOffset, int cqLength, long ts, byte[] value,
- int valueOffset, int valueLength) {
+ int valueOffset, int valueLength,CellBuilder.DataType type) {
return CellBuilderFactory.create(CellBuilderType.DEEP_COPY)
.setRow(key, keyOffset, keyLength).setFamily(cf, cfOffset, cfLength)
.setQualifier(cq, cqOffset, cqLength).setTimestamp(ts)
- .setValue(value, valueOffset, valueLength).build();
+ .setValue(value, valueOffset, valueLength).setType(type).build();
}
public static Cell newKeyValue(byte[] key, byte[] cf, byte[] cq, long ts, byte[] value) {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/37200681/phoenix-core/src/test/java/org/apache/phoenix/execute/UnnestArrayPlanTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/execute/UnnestArrayPlanTest.java b/phoenix-core/src/test/java/org/apache/phoenix/execute/UnnestArrayPlanTest.java
index 5383d9b..f2d9c03 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/execute/UnnestArrayPlanTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/execute/UnnestArrayPlanTest.java
@@ -31,6 +31,7 @@ import java.sql.SQLException;
import java.util.Arrays;
import java.util.List;
+import org.apache.hadoop.hbase.CellBuilder;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.phoenix.compile.ColumnResolver;
@@ -159,7 +160,7 @@ public class UnnestArrayPlanTest {
for (Object[] array : arrays) {
PhoenixArray pArray = new PhoenixArray(baseType, array);
byte[] bytes = arrayType.toBytes(pArray);
- tuples.add(new SingleKeyValueTuple(PhoenixKeyValueUtil.newKeyValue(bytes, 0, bytes.length, bytes, 0, 0, bytes, 0, 0, 0, bytes, 0, 0)));
+ tuples.add(new SingleKeyValueTuple(PhoenixKeyValueUtil.newKeyValue(bytes, 0, bytes.length, bytes, 0, 0, bytes, 0, 0, 0, bytes, 0, 0,CellBuilder.DataType.Put)));
}
return tuples;
http://git-wip-us.apache.org/repos/asf/phoenix/blob/37200681/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/CoveredColumnIndexSpecifierBuilder.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/CoveredColumnIndexSpecifierBuilder.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/CoveredColumnIndexSpecifierBuilder.java
index 6494db2..6a54b93 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/CoveredColumnIndexSpecifierBuilder.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/CoveredColumnIndexSpecifierBuilder.java
@@ -28,10 +28,11 @@ import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Coprocessor;
import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.phoenix.hbase.index.Indexer;
/**
- * Helper to build the configuration for the {@link NonTxIndexBuilder}.
+ * Helper to build the configuration for the {@link NonTxIndexker}.
* <p>
* This class is NOT thread-safe; all concurrent access must be managed externally.
*/
@@ -135,7 +136,9 @@ public class CoveredColumnIndexSpecifierBuilder {
// add the codec for the index to the map of options
Map<String, String> opts = this.convertToMap();
opts.put(NonTxIndexBuilder.CODEC_CLASS_NAME_KEY, clazz.getName());
- return Indexer.enableIndexing(desc, NonTxIndexBuilder.class, opts, Coprocessor.PRIORITY_USER);
+ TableDescriptorBuilder newBuilder = TableDescriptorBuilder.newBuilder(desc);
+ Indexer.enableIndexing(newBuilder, NonTxIndexBuilder.class, opts, Coprocessor.PRIORITY_USER);
+ return newBuilder.build();
}
public static List<ColumnGroup> getColumns(Configuration conf) {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/37200681/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/data/TestIndexMemStore.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/data/TestIndexMemStore.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/data/TestIndexMemStore.java
index bcd5666..0f5f995 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/data/TestIndexMemStore.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/data/TestIndexMemStore.java
@@ -20,6 +20,7 @@ package org.apache.phoenix.hbase.index.covered.data;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
+import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValue.Type;
@@ -38,7 +39,12 @@ public class TestIndexMemStore {
@Test
public void testCorrectOverwritting() throws Exception {
- IndexMemStore store = new IndexMemStore(CellComparatorImpl.COMPARATOR);
+ IndexMemStore store = new IndexMemStore(new CellComparatorImpl(){
+ @Override
+ public int compare(Cell a, Cell b) {
+ return super.compare(a, b, true);
+ }
+ });
long ts = 10;
KeyValue kv = new KeyValue(row, family, qual, ts, Type.Put, val);
kv.setSequenceId(2);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/37200681/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java
index 57e3ba1..af45dad 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestIndexWriter.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.Stoppable;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Row;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
@@ -111,24 +112,23 @@ public class TestIndexWriter {
Table table = Mockito.mock(Table.class);
final boolean[] completed = new boolean[] { false };
- Mockito.when(table.batch(Mockito.anyList(), Mockito.anyList())).thenAnswer(new Answer<Void>() {
-
- @Override
- public Void answer(InvocationOnMock invocation) throws Throwable {
- // just keep track that it was called
- completed[0] = true;
- return null;
- }
- });
+ Mockito.doAnswer(new Answer<Void>() {
+ @Override
+ public Void answer(InvocationOnMock invocation) throws Throwable {
+ // just keep track that it was called
+ completed[0] = true;
+ return null;
+ }
+ }).when(table).batch(Mockito.anyList(), Mockito.any());
Mockito.when(table.getName()).thenReturn(TableName.valueOf(testName.getTableName()));
// add the table to the set of tables, so its returned to the writer
tables.put(new ImmutableBytesPtr(tableName), table);
// setup the writer and failure policy
TrackingParallelWriterIndexCommitter committer = new TrackingParallelWriterIndexCommitter(VersionInfo.getVersion());
- committer.setup(factory, exec, abort, stop, e);
+ committer.setup(factory, exec, stop, e);
KillServerOnFailurePolicy policy = new KillServerOnFailurePolicy();
- policy.setup(stop, abort);
+ policy.setup(stop, e);
IndexWriter writer = new IndexWriter(committer, policy);
writer.write(indexUpdates);
assertTrue("Writer returned before the table batch completed! Likely a race condition tripped",
@@ -163,21 +163,21 @@ public class TestIndexWriter {
final CountDownLatch writeStartedLatch = new CountDownLatch(1);
// latch never gets counted down, so we wait forever
final CountDownLatch waitOnAbortedLatch = new CountDownLatch(1);
- Mockito.when(table.batch(Mockito.anyList())).thenAnswer(new Answer<Void>() {
- @Override
- public Void answer(InvocationOnMock invocation) throws Throwable {
- LOG.info("Write started");
- writeStartedLatch.countDown();
- // when we interrupt the thread for shutdown, we should see this throw an interrupt too
- try {
- waitOnAbortedLatch.await();
- } catch (InterruptedException e) {
- LOG.info("Correctly interrupted while writing!");
- throw e;
- }
- return null;
- }
- });
+ Mockito.doAnswer(new Answer<Void>() {
+ @Override
+ public Void answer(InvocationOnMock invocation) throws Throwable {
+ LOG.info("Write started");
+ writeStartedLatch.countDown();
+ // when we interrupt the thread for shutdown, we should see this throw an interrupt too
+ try {
+ waitOnAbortedLatch.await();
+ } catch (InterruptedException e) {
+ LOG.info("Correctly interrupted while writing!");
+ throw e;
+ }
+ return null;
+ }
+ }).when(table).batch(Mockito.anyListOf(Row.class), Mockito.any());
// add the tables to the set of tables, so its returned to the writer
tables.put(new ImmutableBytesPtr(tableName), table);
@@ -189,9 +189,9 @@ public class TestIndexWriter {
// setup the writer
TrackingParallelWriterIndexCommitter committer = new TrackingParallelWriterIndexCommitter(VersionInfo.getVersion());
- committer.setup(factory, exec, abort, stop, e );
+ committer.setup(factory, exec, stop, e );
KillServerOnFailurePolicy policy = new KillServerOnFailurePolicy();
- policy.setup(stop, abort);
+ policy.setup(stop, e);
final IndexWriter writer = new IndexWriter(committer, policy);
final boolean[] failedWrite = new boolean[] { false };
http://git-wip-us.apache.org/repos/asf/phoenix/blob/37200681/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleIndexWriter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleIndexWriter.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleIndexWriter.java
index 1fe0342..cc185ce 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleIndexWriter.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleIndexWriter.java
@@ -67,15 +67,14 @@ public class TestParalleIndexWriter {
FakeTableFactory factory = new FakeTableFactory(
Collections.<ImmutableBytesPtr, Table> emptyMap());
TrackingParallelWriterIndexCommitter writer = new TrackingParallelWriterIndexCommitter(VersionInfo.getVersion());
- Abortable mockAbort = Mockito.mock(Abortable.class);
Stoppable mockStop = Mockito.mock(Stoppable.class);
// create a simple writer
- writer.setup(factory, exec, mockAbort, mockStop,e);
+ writer.setup(factory, exec, mockStop,e);
// stop the writer
writer.stop(this.test.getTableNameString() + " finished");
assertTrue("Factory didn't get shutdown after writer#stop!", factory.shutdown);
assertTrue("ExectorService isn't terminated after writer#stop!", exec.isShutdown());
- Mockito.verifyZeroInteractions(mockAbort, mockStop);
+ Mockito.verifyZeroInteractions(mockStop);
}
@SuppressWarnings({ "unchecked", "deprecation" })
@@ -102,7 +101,7 @@ public class TestParalleIndexWriter {
Table table = Mockito.mock(Table.class);
final boolean[] completed = new boolean[] { false };
- Mockito.when(table.batch(Mockito.anyList())).thenAnswer(new Answer<Void>() {
+ Mockito.doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
@@ -110,14 +109,14 @@ public class TestParalleIndexWriter {
completed[0] = true;
return null;
}
- });
+ }).when(table).batch(Mockito.anyList(),Mockito.any());
Mockito.when(table.getName()).thenReturn(org.apache.hadoop.hbase.TableName.valueOf(test.getTableName()));
// add the table to the set of tables, so its returned to the writer
tables.put(tableName, table);
// setup the writer and failure policy
TrackingParallelWriterIndexCommitter writer = new TrackingParallelWriterIndexCommitter(VersionInfo.getVersion());
- writer.setup(factory, exec, abort, stop, e);
+ writer.setup(factory, exec, stop, e);
writer.write(indexUpdates, true);
assertTrue("Writer returned before the table batch completed! Likely a race condition tripped",
completed[0]);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/37200681/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleWriterIndexCommitter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleWriterIndexCommitter.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleWriterIndexCommitter.java
index 79bc295..d8dde82 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleWriterIndexCommitter.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestParalleWriterIndexCommitter.java
@@ -29,7 +29,6 @@ import java.util.concurrent.Executors;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.Abortable;
import org.apache.hadoop.hbase.Stoppable;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
@@ -38,7 +37,6 @@ import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.VersionInfo;
import org.apache.phoenix.hbase.index.IndexTableName;
-import org.apache.phoenix.hbase.index.StubAbortable;
import org.apache.phoenix.hbase.index.table.HTableInterfaceReference;
import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
import org.junit.Rule;
@@ -63,27 +61,25 @@ public class TestParalleWriterIndexCommitter {
FakeTableFactory factory = new FakeTableFactory(
Collections.<ImmutableBytesPtr, Table> emptyMap());
TrackingParallelWriterIndexCommitter writer = new TrackingParallelWriterIndexCommitter(VersionInfo.getVersion());
- Abortable mockAbort = Mockito.mock(Abortable.class);
Stoppable mockStop = Mockito.mock(Stoppable.class);
RegionCoprocessorEnvironment e =Mockito.mock(RegionCoprocessorEnvironment.class);
Configuration conf =new Configuration();
Mockito.when(e.getConfiguration()).thenReturn(conf);
Mockito.when(e.getSharedData()).thenReturn(new ConcurrentHashMap<String,Object>());
// create a simple writer
- writer.setup(factory, exec, mockAbort, mockStop, e);
+ writer.setup(factory, exec, mockStop, e);
// stop the writer
writer.stop(this.test.getTableNameString() + " finished");
assertTrue("Factory didn't get shutdown after writer#stop!", factory.shutdown);
assertTrue("ExectorService isn't terminated after writer#stop!", exec.isShutdown());
- Mockito.verifyZeroInteractions(mockAbort, mockStop);
+ Mockito.verifyZeroInteractions(mockStop);
}
- @SuppressWarnings({ "unchecked", "deprecation" })
+ @SuppressWarnings({ "unchecked"})
@Test
public void testSynchronouslyCompletesAllWrites() throws Exception {
LOG.info("Starting " + test.getTableNameString());
LOG.info("Current thread is interrupted: " + Thread.interrupted());
- Abortable abort = new StubAbortable();
RegionCoprocessorEnvironment e =Mockito.mock(RegionCoprocessorEnvironment.class);
Configuration conf =new Configuration();
Mockito.when(e.getConfiguration()).thenReturn(conf);
@@ -103,22 +99,21 @@ public class TestParalleWriterIndexCommitter {
Table table = Mockito.mock(Table.class);
final boolean[] completed = new boolean[] { false };
- Mockito.when(table.batch(Mockito.anyList())).thenAnswer(new Answer<Void>() {
-
- @Override
- public Void answer(InvocationOnMock invocation) throws Throwable {
- // just keep track that it was called
- completed[0] = true;
- return null;
- }
- });
- Mockito.when(table.getName()).thenReturn(org.apache.hadoop.hbase.TableName.valueOf(test.getTableName()));
+ Mockito.doAnswer(new Answer<Void>() {
+ @Override
+ public Void answer(InvocationOnMock invocation) throws Throwable {
+ // just keep track that it was called
+ completed[0] = true;
+ return null;
+ }
+ }).when(table).batch(Mockito.anyList(), Mockito.any());
+ Mockito.when(table.getName()).thenReturn(org.apache.hadoop.hbase.TableName.valueOf(test.getTableName()));
// add the table to the set of tables, so its returned to the writer
tables.put(tableName, table);
// setup the writer and failure policy
TrackingParallelWriterIndexCommitter writer = new TrackingParallelWriterIndexCommitter(VersionInfo.getVersion());
- writer.setup(factory, exec, abort, stop, e);
+ writer.setup(factory, exec, stop, e);
writer.write(indexUpdates, true);
assertTrue("Writer returned before the table batch completed! Likely a race condition tripped",
completed[0]);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/37200681/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java
index 1ace4c5..f6dd85c 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/write/TestWALRecoveryCaching.java
@@ -47,9 +47,11 @@ import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.coprocessor.RegionObserver;
-import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.regionserver.Region;
import org.apache.hadoop.hbase.util.Bytes;
@@ -203,7 +205,7 @@ public class TestWALRecoveryCaching {
// kill the server where the tables live - this should trigger distributed log splitting
// find the regionserver that matches the passed server
- List<Region> online = new ArrayList<Region>();
+ List<HRegion> online = new ArrayList<HRegion>();
online.addAll(getRegionsFromServerForTable(util.getMiniHBaseCluster(), shared,
testTable.getTableName()));
online.addAll(getRegionsFromServerForTable(util.getMiniHBaseCluster(), shared,
http://git-wip-us.apache.org/repos/asf/phoenix/blob/37200681/phoenix-core/src/test/java/org/apache/phoenix/query/ConnectionQueryServicesImplTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/ConnectionQueryServicesImplTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/ConnectionQueryServicesImplTest.java
index 0570826..8a2e2b7 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/query/ConnectionQueryServicesImplTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/query/ConnectionQueryServicesImplTest.java
@@ -49,7 +49,7 @@ public class ConnectionQueryServicesImplTest {
when(cqs.createSchema(any(List.class), anyString())).thenCallRealMethod();
doCallRealMethod().when(cqs).ensureSystemTablesMigratedToSystemNamespace(any(ReadOnlyProps.class));
// Do nothing for this method, just check that it was invoked later
- doNothing().when(cqs).createSysMutexTable(any(HBaseAdmin.class), any(ReadOnlyProps.class));
+ doNothing().when(cqs).createSysMutexTable(any(Admin.class), any(ReadOnlyProps.class));
// Spoof out this call so that ensureSystemTablesUpgrade() will return-fast.
when(cqs.getSystemTableNames(any(Admin.class))).thenReturn(Collections.<TableName> emptyList());
http://git-wip-us.apache.org/repos/asf/phoenix/blob/37200681/phoenix-core/src/test/java/org/apache/phoenix/schema/stats/StatisticsScannerTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/schema/stats/StatisticsScannerTest.java b/phoenix-core/src/test/java/org/apache/phoenix/schema/stats/StatisticsScannerTest.java
index 177aff3..7ccaa8e 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/schema/stats/StatisticsScannerTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/schema/stats/StatisticsScannerTest.java
@@ -28,7 +28,9 @@ import java.io.IOException;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.regionserver.InternalScanner;
import org.apache.hadoop.hbase.regionserver.Region;
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
@@ -37,6 +39,7 @@ import org.apache.phoenix.schema.stats.StatisticsScanner.StatisticsScannerCallab
import org.junit.Before;
import org.junit.Test;
+
/**
* Test to verify that we don't try to update stats when a RS is stopping.
*/
@@ -53,6 +56,8 @@ public class StatisticsScannerTest {
private RegionInfo regionInfo;
private Configuration config;
+ private RegionCoprocessorEnvironment env;
+ private Connection conn;
@Before
public void setupMocks() throws Exception {
@@ -68,6 +73,8 @@ public class StatisticsScannerTest {
this.tracker = mock(StatisticsCollector.class);
this.delegate = mock(InternalScanner.class);
this.regionInfo = mock(RegionInfo.class);
+ this.env = mock(RegionCoprocessorEnvironment.class);
+ this.conn = mock(Connection.class);
// Wire up the mocks to the mock StatisticsScanner
when(mockScanner.getStatisticsWriter()).thenReturn(statsWriter);
@@ -77,6 +84,8 @@ public class StatisticsScannerTest {
when(mockScanner.getConfig()).thenReturn(config);
when(mockScanner.getTracker()).thenReturn(tracker);
when(mockScanner.getDelegate()).thenReturn(delegate);
+ when(env.getConnection()).thenReturn(conn);
+ when(mockScanner.getConnection()).thenReturn(conn);
// Wire up the HRegionInfo mock to the Region mock
when(region.getRegionInfo()).thenReturn(regionInfo);
@@ -87,25 +96,25 @@ public class StatisticsScannerTest {
@Test
public void testCheckRegionServerStoppingOnClose() throws Exception {
- when(rsServices.isStopping()).thenReturn(true);
- when(rsServices.isStopped()).thenReturn(false);
+ when(conn.isClosed()).thenReturn(true);
+ when(conn.isAborted()).thenReturn(false);
mockScanner.close();
- verify(rsServices).isStopping();
+ verify(conn).isClosed();
verify(callable, never()).call();
verify(runTracker, never()).runTask(callable);
}
@Test
public void testCheckRegionServerStoppedOnClose() throws Exception {
- when(rsServices.isStopping()).thenReturn(false);
- when(rsServices.isStopped()).thenReturn(true);
+ when(conn.isClosed()).thenReturn(false);
+ when(conn.isAborted()).thenReturn(true);
mockScanner.close();
- verify(rsServices).isStopping();
- verify(rsServices).isStopped();
+ verify(conn).isClosed();
+ verify(conn).isAborted();
verify(callable, never()).call();
verify(runTracker, never()).runTask(callable);
}
@@ -116,13 +125,13 @@ public class StatisticsScannerTest {
StatisticsScannerCallable realCallable = mockScanner.new StatisticsScannerCallable();
doThrow(new IOException()).when(statsWriter).deleteStatsForRegion(any(Region.class), any(StatisticsCollector.class),
any(ImmutableBytesPtr.class), any(List.class));
- when(rsServices.isStopping()).thenReturn(true);
- when(rsServices.isStopped()).thenReturn(false);
+ when(conn.isClosed()).thenReturn(true);
+ when(conn.isAborted()).thenReturn(false);
// Should not throw an exception
realCallable.call();
- verify(rsServices).isStopping();
+ verify(conn).isClosed();
}
@SuppressWarnings("unchecked")
@@ -131,13 +140,13 @@ public class StatisticsScannerTest {
StatisticsScannerCallable realCallable = mockScanner.new StatisticsScannerCallable();
doThrow(new IOException()).when(statsWriter).deleteStatsForRegion(any(Region.class), any(StatisticsCollector.class),
any(ImmutableBytesPtr.class), any(List.class));
- when(rsServices.isStopping()).thenReturn(false);
- when(rsServices.isStopped()).thenReturn(true);
+ when(conn.isClosed()).thenReturn(false);
+ when(conn.isAborted()).thenReturn(true);
// Should not throw an exception
realCallable.call();
- verify(rsServices).isStopping();
- verify(rsServices).isStopped();
+ verify(conn).isClosed();
+ verify(conn).isAborted();
}
}
[6/8] phoenix git commit: PHOENIX-4403 Workaround Tephra issues and
fix all left over compilation issues in phoenix-core
Posted by an...@apache.org.
PHOENIX-4403 Workaround Tephra issues and fix all left over compilation issues in phoenix-core
- update dependency of HBase to 2.0.0-beta-1-SNAPSHOT
- Update class to include new API from HBase beta-1
- Remove unused CoprocessorHTableFactory
- Use createConnection() API of environment to initiate shortCircuitConnection with custom conf
- change usage ZooKeeperWatcher to ZKWatcher
- Update PhoenixTransactionalTable,TephraTransactionTable,OmidTransactionTable with interfaces of Table
- Workaround Tephra dependencies like copying TransactionAwareHTable and porting it for HBase-2.0 in Phoenix workspace and creating BaseRegionObserver for TransactionContext.
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/488b5281
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/488b5281
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/488b5281
Branch: refs/heads/5.x-HBase-2.0
Commit: 488b5281f23604252c6b9731fab3e031c24ce742
Parents: c3ec80d
Author: Ankit Singhal <an...@gmail.com>
Authored: Mon Nov 27 15:46:00 2017 +0530
Committer: Ankit Singhal <an...@gmail.com>
Committed: Mon Nov 27 15:46:00 2017 +0530
----------------------------------------------------------------------
.../hbase/coprocessor/BaseRegionObserver.java | 22 +
.../DelegateRegionCoprocessorEnvironment.java | 12 +
.../apache/phoenix/execute/MutationState.java | 4 +-
.../org/apache/phoenix/hbase/index/Indexer.java | 3 +-
.../index/table/CoprocessorHTableFactory.java | 50 --
.../hbase/index/write/IndexWriterUtils.java | 21 +-
.../phoenix/index/PhoenixIndexMetaData.java | 2 +-
.../apache/phoenix/iterate/SnapshotScanner.java | 11 +
.../index/automation/PhoenixMRJobSubmitter.java | 8 +-
.../transaction/OmidTransactionTable.java | 32 +-
.../transaction/PhoenixTransactionalTable.java | 770 +++++++++++++++++--
.../transaction/TephraTransactionTable.java | 40 +-
.../transaction/TransactionAwareHTable.java | 680 ++++++++++++++++
.../phoenix/util/ZKBasedMasterElectionUtil.java | 4 +-
pom.xml | 4 +-
15 files changed, 1459 insertions(+), 204 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/488b5281/phoenix-core/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java
new file mode 100644
index 0000000..fa206bb
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java
@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.coprocessor;
+
+public class BaseRegionObserver implements RegionObserver{
+
+}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/488b5281/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java
index 00f3316..4e6bb2e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java
@@ -17,9 +17,11 @@
*/
package org.apache.phoenix.coprocessor;
+import java.io.IOException;
import java.util.concurrent.ConcurrentMap;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.ExtendedCellBuilder;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.RegionInfo;
@@ -113,6 +115,16 @@ public class DelegateRegionCoprocessorEnvironment implements RegionCoprocessorEn
public MetricRegistry getMetricRegistryForRegionServer() {
return delegate.getMetricRegistryForRegionServer();
}
+
+ @Override
+ public Connection createConnection(Configuration conf) throws IOException {
+ return delegate.createConnection(conf);
+ }
+
+ @Override
+ public ExtendedCellBuilder getCellBuilder() {
+ return delegate.getCellBuilder();
+ }
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/488b5281/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
index eab64f1..e9547f2 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
@@ -90,8 +90,8 @@ import org.apache.phoenix.transaction.PhoenixTransactionalTable;
import org.apache.phoenix.transaction.TransactionFactory;
import org.apache.phoenix.util.ByteUtil;
import org.apache.phoenix.util.IndexUtil;
-import org.apache.phoenix.util.PhoenixKeyValueUtil;
import org.apache.phoenix.util.LogUtil;
+import org.apache.phoenix.util.PhoenixKeyValueUtil;
import org.apache.phoenix.util.PhoenixRuntime;
import org.apache.phoenix.util.SQLCloseable;
import org.apache.phoenix.util.SQLCloseables;
@@ -195,7 +195,7 @@ public class MutationState implements SQLCloseable {
this.mutations.put(table, mutations);
}
this.numRows = mutations.size();
- this.estimatedSize = KeyValueUtil.getEstimatedRowSize(table, mutations);
+ this.estimatedSize = PhoenixKeyValueUtil.getEstimatedRowSize(table, mutations);
throwIfTooBig();
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/488b5281/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
index cb8accf..f9b882c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CoprocessorEnvironment;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HConstants.OperationStatusCode;
+import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.client.Delete;
@@ -47,7 +48,6 @@ import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Increment;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
@@ -209,7 +209,6 @@ public class Indexer implements RegionObserver, RegionCoprocessor {
String errormsg = Indexer.validateVersion(env.getHBaseVersion(), env.getConfiguration());
if (errormsg != null) {
IOException ioe = new IOException(errormsg);
- env.getRegionServerServices().abort(errormsg, ioe);
throw ioe;
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/488b5281/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/table/CoprocessorHTableFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/table/CoprocessorHTableFactory.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/table/CoprocessorHTableFactory.java
deleted file mode 100644
index 7ca43ea..0000000
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/table/CoprocessorHTableFactory.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.hbase.index.table;
-
-import java.io.IOException;
-import java.util.concurrent.ExecutorService;
-
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
-
-public class CoprocessorHTableFactory implements HTableFactory {
-
- private RegionCoprocessorEnvironment e;
-
- public CoprocessorHTableFactory(RegionCoprocessorEnvironment e) {
- this.e = e;
- }
-
- @Override
- public Table getTable(ImmutableBytesPtr tablename) throws IOException {
- return this.e.getConnection().getTable(TableName.valueOf(tablename.copyBytesIfNecessary()));
- }
-
- @Override
- public Table getTable(ImmutableBytesPtr tablename,ExecutorService pool) throws IOException {
- return this.e.getConnection().getTable(TableName.valueOf(tablename.copyBytesIfNecessary()), pool);
- }
-
- @Override
- public void shutdown() {
- // noop
- }
-}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/phoenix/blob/488b5281/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriterUtils.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriterUtils.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriterUtils.java
index 0ef7e18..bfdcbd8 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriterUtils.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/IndexWriterUtils.java
@@ -27,12 +27,8 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.CoprocessorHConnection;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-import org.apache.hadoop.hbase.regionserver.HRegionServer;
-import org.apache.hadoop.hbase.regionserver.RegionServerServices;
-import org.apache.phoenix.hbase.index.table.CoprocessorHTableFactory;
import org.apache.phoenix.hbase.index.table.HTableFactory;
import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
import org.apache.phoenix.hbase.index.util.IndexManagementUtil;
@@ -93,14 +89,7 @@ public class IndexWriterUtils {
IndexWriterUtils.DEFAULT_NUM_PER_TABLE_THREADS);
LOG.trace("Creating HTableFactory with " + htableThreads + " threads for each HTable.");
IndexManagementUtil.setIfNotSet(conf, HTABLE_THREAD_KEY, htableThreads);
- if (env instanceof RegionCoprocessorEnvironment) {
- RegionCoprocessorEnvironment e = (RegionCoprocessorEnvironment) env;
- RegionServerServices services =e.getRegionServerServices();
- if (services instanceof HRegionServer) {
- return new CoprocessorHConnectionTableFactory(conf, (HRegionServer) services);
- }
- }
- return new CoprocessorHTableFactory(env);
+ return new CoprocessorHConnectionTableFactory(conf, env);
}
/**
@@ -112,16 +101,16 @@ public class IndexWriterUtils {
@GuardedBy("CoprocessorHConnectionTableFactory.this")
private Connection connection;
private final Configuration conf;
- private final HRegionServer server;
+ private RegionCoprocessorEnvironment env;
- CoprocessorHConnectionTableFactory(Configuration conf, HRegionServer server) {
+ CoprocessorHConnectionTableFactory(Configuration conf, RegionCoprocessorEnvironment env) {
this.conf = conf;
- this.server = server;
+ this.env = env;
}
private synchronized Connection getConnection(Configuration conf) throws IOException {
if (connection == null || connection.isClosed()) {
- connection = new CoprocessorHConnection(conf, server);
+ connection = env.createConnection(conf);
}
return connection;
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/488b5281/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexMetaData.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexMetaData.java b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexMetaData.java
index 05371a6..14c66b0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexMetaData.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexMetaData.java
@@ -83,7 +83,7 @@ public class PhoenixIndexMetaData implements IndexMetaData {
IndexMetaDataCache indexCache = (IndexMetaDataCache)cache.getServerCache(new ImmutableBytesPtr(uuid));
if (indexCache == null) {
String msg = "key=" + ServerCacheClient.idToString(uuid) + " region=" + env.getRegion() + "host="
- + env.getRegionServerServices().getServerName();
+ + env.getServerName().getServerName();
SQLException e = new SQLExceptionInfo.Builder(SQLExceptionCode.INDEX_METADATA_NOT_FOUND).setMessage(msg)
.build().buildException();
ServerUtil.throwIOException("Index update failed", e); // will not return
http://git-wip-us.apache.org/repos/asf/phoenix/blob/488b5281/phoenix-core/src/main/java/org/apache/phoenix/iterate/SnapshotScanner.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/SnapshotScanner.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/SnapshotScanner.java
index 35f40ac..cfb3149 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/SnapshotScanner.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/SnapshotScanner.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.ExtendedCellBuilder;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.AbstractClientScanner;
import org.apache.hadoop.hbase.client.Connection;
@@ -192,6 +193,16 @@ public class SnapshotScanner extends AbstractClientScanner {
public MetricRegistry getMetricRegistryForRegionServer() {
throw new UnsupportedOperationException();
}
+
+ @Override
+ public Connection createConnection(Configuration conf) throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public ExtendedCellBuilder getCellBuilder() {
+ throw new UnsupportedOperationException();
+ }
};
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/488b5281/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/automation/PhoenixMRJobSubmitter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/automation/PhoenixMRJobSubmitter.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/automation/PhoenixMRJobSubmitter.java
index 9c447e8..662f0c9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/automation/PhoenixMRJobSubmitter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/automation/PhoenixMRJobSubmitter.java
@@ -44,7 +44,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
import org.apache.phoenix.mapreduce.index.IndexTool;
@@ -53,8 +53,8 @@ import org.apache.phoenix.schema.PTable.IndexType;
import org.apache.phoenix.schema.PTableType;
import org.apache.phoenix.schema.types.PDate;
import org.apache.phoenix.util.PhoenixMRJobUtil;
-import org.apache.phoenix.util.UpgradeUtil;
import org.apache.phoenix.util.PhoenixMRJobUtil.MR_SCHEDULER_TYPE;
+import org.apache.phoenix.util.UpgradeUtil;
import org.apache.phoenix.util.ZKBasedMasterElectionUtil;
import org.codehaus.jettison.json.JSONArray;
import org.codehaus.jettison.json.JSONObject;
@@ -182,8 +182,8 @@ public class PhoenixMRJobSubmitter {
public int scheduleIndexBuilds() throws Exception {
- ZooKeeperWatcher zookeeperWatcher =
- new ZooKeeperWatcher(conf, "phoenixAutomatedMRIndexBuild", null);
+ ZKWatcher zookeeperWatcher =
+ new ZKWatcher(conf, "phoenixAutomatedMRIndexBuild", null);
if (!ZKBasedMasterElectionUtil.acquireLock(zookeeperWatcher, PHOENIX_LOCKS_PARENT,
AUTO_INDEX_BUILD_LOCK_NAME)) {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/488b5281/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java
index 047ccf6..78d7e4c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java
@@ -79,11 +79,6 @@ public class OmidTransactionTable implements PhoenixTransactionalTable {
return null;
}
- @Override
- public byte[] getTableName() {
- // TODO Auto-generated method stub
- return null;
- }
@Override
public Configuration getConfiguration() {
@@ -132,32 +127,7 @@ public class OmidTransactionTable implements PhoenixTransactionalTable {
// TODO Auto-generated method stub
}
- @Override
- public void setAutoFlush(boolean autoFlush) {
- // TODO Auto-generated method stub
- }
-
- @Override
- public boolean isAutoFlush() {
- // TODO Auto-generated method stub
- return false;
- }
-
- @Override
- public long getWriteBufferSize() {
- // TODO Auto-generated method stub
- return 0;
- }
-
- @Override
- public void setWriteBufferSize(long writeBufferSize) throws IOException {
- // TODO Auto-generated method stub
- }
-
- @Override
- public void flushCommits() throws IOException {
- // TODO Auto-generated method stub
- }
+
@Override
public void close() throws IOException {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/488b5281/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionalTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionalTable.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionalTable.java
index aced376..deceac6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionalTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/PhoenixTransactionalTable.java
@@ -17,132 +17,782 @@
*/
package org.apache.phoenix.transaction;
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.CompareOperator;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Append;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Increment;
import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException;
+import org.apache.hadoop.hbase.client.Row;
+import org.apache.hadoop.hbase.client.RowMutations;
+import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.client.TableBuilder;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.coprocessor.Batch;
+import org.apache.hadoop.hbase.filter.CompareFilter;
+import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
-import java.io.IOException;
-import java.util.List;
+import com.google.protobuf.Descriptors;
+import com.google.protobuf.Message;
+import com.google.protobuf.Service;
+import com.google.protobuf.ServiceException;
public interface PhoenixTransactionalTable extends Table {
+ /**
+ * Gets the fully qualified table name instance of this table.
+ */
+ TableName getName();
/**
- * Transaction version of {@link Table#get(Get get)}
- * @param get
- * @throws IOException
+ * Returns the {@link org.apache.hadoop.conf.Configuration} object used by this instance.
+ * <p>
+ * The reference returned is not a copy, so any change made to it will
+ * affect this instance.
*/
- public Result get(Get get) throws IOException;
+ Configuration getConfiguration();
/**
- * Transactional version of {@link Table#put(Put put)}
- * @param put
- * @throws IOException
+ * Gets the {@link org.apache.hadoop.hbase.HTableDescriptor table descriptor} for this table.
+ * @throws java.io.IOException if a remote or network exception occurs.
+ * @deprecated since 2.0 version and will be removed in 3.0 version.
+ * use {@link #getDescriptor()}
+ */
+ @Deprecated
+ HTableDescriptor getTableDescriptor() throws IOException;
+
+ /**
+ * Gets the {@link org.apache.hadoop.hbase.client.TableDescriptor table descriptor} for this table.
+ * @throws java.io.IOException if a remote or network exception occurs.
*/
- public void put(Put put) throws IOException;
+ TableDescriptor getDescriptor() throws IOException;
/**
- * Transactional version of {@link Table#delete(Delete delete)}
+ * Test for the existence of columns in the table, as specified by the Get.
+ * <p>
*
- * @param delete
- * @throws IOException
+ * This will return true if the Get matches one or more keys, false if not.
+ * <p>
+ *
+ * This is a server-side call so it prevents any data from being transfered to
+ * the client.
+ *
+ * @param get the Get
+ * @return true if the specified Get matches one or more keys, false if not
+ * @throws IOException e
*/
- public void delete(Delete delete) throws IOException;
+ boolean exists(Get get) throws IOException;
/**
- * Transactional version of {@link Table#getScanner(Scan scan)}
+ * Test for the existence of columns in the table, as specified by the Gets.
+ * <p>
+ *
+ * This will return an array of booleans. Each value will be true if the related Get matches
+ * one or more keys, false if not.
+ * <p>
+ *
+ * This is a server-side call so it prevents any data from being transferred to
+ * the client.
*
- * @param scan
- * @return ResultScanner
+ * @param gets the Gets
+ * @return Array of boolean. True if the specified Get matches one or more keys, false if not.
+ * @throws IOException e
+ */
+ boolean[] exists(List<Get> gets) throws IOException;
+
+ /**
+ * Test for the existence of columns in the table, as specified by the Gets.
+ * This will return an array of booleans. Each value will be true if the related Get matches
+ * one or more keys, false if not.
+ * This is a server-side call so it prevents any data from being transferred to
+ * the client.
+ *
+ * @param gets the Gets
+ * @return Array of boolean. True if the specified Get matches one or more keys, false if not.
+ * @throws IOException e
+ * @deprecated since 2.0 version and will be removed in 3.0 version.
+ * use {@link #exists(List)}
+ */
+ @Deprecated
+ default boolean[] existsAll(List<Get> gets) throws IOException {
+ return exists(gets);
+ }
+
+ /**
+ * Method that does a batch call on Deletes, Gets, Puts, Increments, Appends, RowMutations.
+ * The ordering of execution of the actions is not defined. Meaning if you do a Put and a
+ * Get in the same {@link #batch} call, you will not necessarily be
+ * guaranteed that the Get returns what the Put had put.
+ *
+ * @param actions list of Get, Put, Delete, Increment, Append, RowMutations.
+ * @param results Empty Object[], same size as actions. Provides access to partial
+ * results, in case an exception is thrown. A null in the result array means that
+ * the call for that action failed, even after retries. The order of the objects
+ * in the results array corresponds to the order of actions in the request list.
* @throws IOException
+ * @since 0.90.0
*/
- public ResultScanner getScanner(Scan scan) throws IOException;
+ void batch(final List<? extends Row> actions, final Object[] results) throws IOException,
+ InterruptedException;
/**
- * Returns Htable name
+ * Same as {@link #batch(List, Object[])}, but with a callback.
+ * @since 0.96.0
*/
- public byte[] getTableName();
+ <R> void batchCallback(
+ final List<? extends Row> actions, final Object[] results, final Batch.Callback<R> callback
+ ) throws IOException, InterruptedException;
/**
- * Returns Htable configuration object
+ * Extracts certain cells from a given row.
+ * @param get The object that specifies what data to fetch and from which row.
+ * @return The data coming from the specified row, if it exists. If the row
+ * specified doesn't exist, the {@link Result} instance returned won't
+ * contain any {@link org.apache.hadoop.hbase.KeyValue}, as indicated by {@link Result#isEmpty()}.
+ * @throws IOException if a remote or network exception occurs.
+ * @since 0.20.0
*/
- public Configuration getConfiguration();
+ Result get(Get get) throws IOException;
/**
- * Returns HTableDescriptor of Htable
- * @throws IOException
+ * Extracts specified cells from the given rows, as a batch.
+ *
+ * @param gets The objects that specify what data to fetch and from which rows.
+ * @return The data coming from the specified rows, if it exists. If the row specified doesn't
+ * exist, the {@link Result} instance returned won't contain any {@link
+ * org.apache.hadoop.hbase.Cell}s, as indicated by {@link Result#isEmpty()}. If there are any
+ * failures even after retries, there will be a <code>null</code> in the results' array for those
+ * Gets, AND an exception will be thrown. The ordering of the Result array corresponds to the order
+ * of the list of passed in Gets.
+ * @throws IOException if a remote or network exception occurs.
+ * @since 0.90.0
+ * @apiNote {@link #put(List)} runs pre-flight validations on the input list on client.
+ * Currently {@link #get(List)} doesn't run any validations on the client-side, currently there
+ * is no need, but this may change in the future. An
+ * {@link IllegalArgumentException} will be thrown in this case.
*/
- public HTableDescriptor getTableDescriptor() throws IOException;
+ Result[] get(List<Get> gets) throws IOException;
/**
- * Checks if cell exists
- * @throws IOException
+ * Returns a scanner on the current table as specified by the {@link Scan}
+ * object.
+ * Note that the passed {@link Scan}'s start row and caching properties
+ * maybe changed.
+ *
+ * @param scan A configured {@link Scan} object.
+ * @return A scanner.
+ * @throws IOException if a remote or network exception occurs.
+ * @since 0.20.0
*/
- public boolean exists(Get get) throws IOException;
+ ResultScanner getScanner(Scan scan) throws IOException;
/**
- * Transactional version of {@link Table#get(List gets)}
- * @throws IOException
+ * Gets a scanner on the current table for the given family.
+ *
+ * @param family The column family to scan.
+ * @return A scanner.
+ * @throws IOException if a remote or network exception occurs.
+ * @since 0.20.0
*/
- public Result[] get(List<Get> gets) throws IOException;
+ ResultScanner getScanner(byte[] family) throws IOException;
/**
- * Transactional version of {@link Table#getScanner(byte[] family)}
- * @throws IOException
+ * Gets a scanner on the current table for the given family and qualifier.
+ *
+ * @param family The column family to scan.
+ * @param qualifier The column qualifier to scan.
+ * @return A scanner.
+ * @throws IOException if a remote or network exception occurs.
+ * @since 0.20.0
*/
- public ResultScanner getScanner(byte[] family) throws IOException;
+ ResultScanner getScanner(byte[] family, byte[] qualifier) throws IOException;
+
/**
- * Transactional version of {@link Table#getScanner(byte[] family, byte[] qualifier)}
- * @throws IOException
+ * Puts some data in the table.
+ *
+ * @param put The data to put.
+ * @throws IOException if a remote or network exception occurs.
+ * @since 0.20.0
*/
- public ResultScanner getScanner(byte[] family, byte[] qualifier) throws IOException;
+ void put(Put put) throws IOException;
/**
- * Transactional version of {@link Table#put(List puts)}
- * @throws IOException
+ * Batch puts the specified data into the table.
+ * <p>
+ * This can be used for group commit, or for submitting user defined batches. Before sending
+ * a batch of mutations to the server, the client runs a few validations on the input list. If an
+ * error is found, for example, a mutation was supplied but was missing it's column an
+ * {@link IllegalArgumentException} will be thrown and no mutations will be applied. If there
+ * are any failures even after retries, a {@link RetriesExhaustedWithDetailsException} will be
+ * thrown. RetriesExhaustedWithDetailsException contains lists of failed mutations and
+ * corresponding remote exceptions. The ordering of mutations and exceptions in the
+ * encapsulating exception corresponds to the order of the input list of Put requests.
+ *
+ * @param puts The list of mutations to apply.
+ * @throws IOException if a remote or network exception occurs.
+ * @since 0.20.0
*/
- public void put(List<Put> puts) throws IOException;
+ void put(List<Put> puts) throws IOException;
/**
- * Transactional version of {@link Table#delete(List deletes)}
- * @throws IOException
+ * Atomically checks if a row/family/qualifier value matches the expected
+ * value. If it does, it adds the put. If the passed value is null, the check
+ * is for the lack of column (ie: non-existance)
+ *
+ * @param row to check
+ * @param family column family to check
+ * @param qualifier column qualifier to check
+ * @param value the expected value
+ * @param put data to put if check succeeds
+ * @throws IOException e
+ * @return true if the new put was executed, false otherwise
*/
- public void delete(List<Delete> deletes) throws IOException;
+ boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier,
+ byte[] value, Put put) throws IOException;
/**
- * Delegates to {@link Table#setAutoFlush(boolean autoFlush)}
+ * Atomically checks if a row/family/qualifier value matches the expected
+ * value. If it does, it adds the put. If the passed value is null, the check
+ * is for the lack of column (ie: non-existence)
+ *
+ * The expected value argument of this call is on the left and the current
+ * value of the cell is on the right side of the comparison operator.
+ *
+ * Ie. eg. GREATER operator means expected value > existing <=> add the put.
+ *
+ * @param row to check
+ * @param family column family to check
+ * @param qualifier column qualifier to check
+ * @param compareOp comparison operator to use
+ * @param value the expected value
+ * @param put data to put if check succeeds
+ * @throws IOException e
+ * @return true if the new put was executed, false otherwise
+ * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use
+ * {@link #checkAndPut(byte[], byte[], byte[], CompareOperator, byte[], Put)}}
+ */
+ @Deprecated
+ boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier,
+ CompareFilter.CompareOp compareOp, byte[] value, Put put) throws IOException;
+
+ /**
+ * Atomically checks if a row/family/qualifier value matches the expected
+ * value. If it does, it adds the put. If the passed value is null, the check
+ * is for the lack of column (ie: non-existence)
+ *
+ * The expected value argument of this call is on the left and the current
+ * value of the cell is on the right side of the comparison operator.
+ *
+ * Ie. eg. GREATER operator means expected value > existing <=> add the put.
+ *
+ * @param row to check
+ * @param family column family to check
+ * @param qualifier column qualifier to check
+ * @param op comparison operator to use
+ * @param value the expected value
+ * @param put data to put if check succeeds
+ * @throws IOException e
+ * @return true if the new put was executed, false otherwise
+ */
+ boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier,
+ CompareOperator op, byte[] value, Put put) throws IOException;
+
+ /**
+ * Deletes the specified cells/row.
+ *
+ * @param delete The object that specifies what to delete.
+ * @throws IOException if a remote or network exception occurs.
+ * @since 0.20.0
*/
- public void setAutoFlush(boolean autoFlush);
+ void delete(Delete delete) throws IOException;
/**
- * Delegates to {@link Table#isAutoFlush()}
+ * Batch Deletes the specified cells/rows from the table.
+ * <p>
+ * If a specified row does not exist, {@link Delete} will report as though sucessful
+ * delete; no exception will be thrown. If there are any failures even after retries,
+ * a * {@link RetriesExhaustedWithDetailsException} will be thrown.
+ * RetriesExhaustedWithDetailsException contains lists of failed {@link Delete}s and
+ * corresponding remote exceptions.
+ *
+ * @param deletes List of things to delete. The input list gets modified by this
+ * method. All successfully applied {@link Delete}s in the list are removed (in particular it
+ * gets re-ordered, so the order in which the elements are inserted in the list gives no
+ * guarantee as to the order in which the {@link Delete}s are executed).
+ * @throws IOException if a remote or network exception occurs. In that case
+ * the {@code deletes} argument will contain the {@link Delete} instances
+ * that have not be successfully applied.
+ * @since 0.20.1
+ * @apiNote In 3.0.0 version, the input list {@code deletes} will no longer be modified. Also,
+ * {@link #put(List)} runs pre-flight validations on the input list on client. Currently
+ * {@link #delete(List)} doesn't run validations on the client, there is no need currently,
+ * but this may change in the future. An * {@link IllegalArgumentException} will be thrown
+ * in this case.
*/
- public boolean isAutoFlush();
+ void delete(List<Delete> deletes) throws IOException;
/**
- * Delegates to see Table.getWriteBufferSize()
+ * Atomically checks if a row/family/qualifier value matches the expected
+ * value. If it does, it adds the delete. If the passed value is null, the
+ * check is for the lack of column (ie: non-existance)
+ *
+ * @param row to check
+ * @param family column family to check
+ * @param qualifier column qualifier to check
+ * @param value the expected value
+ * @param delete data to delete if check succeeds
+ * @throws IOException e
+ * @return true if the new delete was executed, false otherwise
*/
- public long getWriteBufferSize();
+ boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier,
+ byte[] value, Delete delete) throws IOException;
/**
- * Delegates to see Table.setWriteBufferSize()
+ * Atomically checks if a row/family/qualifier value matches the expected
+ * value. If it does, it adds the delete. If the passed value is null, the
+ * check is for the lack of column (ie: non-existence)
+ *
+ * The expected value argument of this call is on the left and the current
+ * value of the cell is on the right side of the comparison operator.
+ *
+ * Ie. eg. GREATER operator means expected value > existing <=> add the delete.
+ *
+ * @param row to check
+ * @param family column family to check
+ * @param qualifier column qualifier to check
+ * @param compareOp comparison operator to use
+ * @param value the expected value
+ * @param delete data to delete if check succeeds
+ * @throws IOException e
+ * @return true if the new delete was executed, false otherwise
+ * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use
+ * {@link #checkAndDelete(byte[], byte[], byte[], byte[], Delete)}
*/
- public void setWriteBufferSize(long writeBufferSize) throws IOException;
+ @Deprecated
+ boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier,
+ CompareFilter.CompareOp compareOp, byte[] value, Delete delete) throws IOException;
/**
- * Delegates to see Table.flushCommits()
+ * Atomically checks if a row/family/qualifier value matches the expected
+ * value. If it does, it adds the delete. If the passed value is null, the
+ * check is for the lack of column (ie: non-existence)
+ *
+ * The expected value argument of this call is on the left and the current
+ * value of the cell is on the right side of the comparison operator.
+ *
+ * Ie. eg. GREATER operator means expected value > existing <=> add the delete.
+ *
+ * @param row to check
+ * @param family column family to check
+ * @param qualifier column qualifier to check
+ * @param op comparison operator to use
+ * @param value the expected value
+ * @param delete data to delete if check succeeds
+ * @throws IOException e
+ * @return true if the new delete was executed, false otherwise
*/
- public void flushCommits() throws IOException;
+ boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier,
+ CompareOperator op, byte[] value, Delete delete) throws IOException;
/**
- * Releases resources
+ * Performs multiple mutations atomically on a single row. Currently
+ * {@link Put} and {@link Delete} are supported.
+ *
+ * @param rm object that specifies the set of mutations to perform atomically
* @throws IOException
*/
- public void close() throws IOException;
+ void mutateRow(final RowMutations rm) throws IOException;
+
+ /**
+ * Appends values to one or more columns within a single row.
+ * <p>
+ * This operation guaranteed atomicity to readers. Appends are done
+ * under a single row lock, so write operations to a row are synchronized, and
+ * readers are guaranteed to see this operation fully completed.
+ *
+ * @param append object that specifies the columns and amounts to be used
+ * for the increment operations
+ * @throws IOException e
+ * @return values of columns after the append operation (maybe null)
+ */
+ Result append(final Append append) throws IOException;
+
+ /**
+ * Increments one or more columns within a single row.
+ * <p>
+ * This operation ensures atomicity to readers. Increments are done
+ * under a single row lock, so write operations to a row are synchronized, and
+ * readers are guaranteed to see this operation fully completed.
+ *
+ * @param increment object that specifies the columns and amounts to be used
+ * for the increment operations
+ * @throws IOException e
+ * @return values of columns after the increment
+ */
+ Result increment(final Increment increment) throws IOException;
+
+ /**
+ * See {@link #incrementColumnValue(byte[], byte[], byte[], long, Durability)}
+ * <p>
+ * The {@link Durability} is defaulted to {@link Durability#SYNC_WAL}.
+ * @param row The row that contains the cell to increment.
+ * @param family The column family of the cell to increment.
+ * @param qualifier The column qualifier of the cell to increment.
+ * @param amount The amount to increment the cell with (or decrement, if the
+ * amount is negative).
+ * @return The new value, post increment.
+ * @throws IOException if a remote or network exception occurs.
+ */
+ long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier,
+ long amount) throws IOException;
+
+ /**
+ * Atomically increments a column value. If the column value already exists
+ * and is not a big-endian long, this could throw an exception. If the column
+ * value does not yet exist it is initialized to <code>amount</code> and
+ * written to the specified column.
+ *
+ * <p>Setting durability to {@link Durability#SKIP_WAL} means that in a fail
+ * scenario you will lose any increments that have not been flushed.
+ * @param row The row that contains the cell to increment.
+ * @param family The column family of the cell to increment.
+ * @param qualifier The column qualifier of the cell to increment.
+ * @param amount The amount to increment the cell with (or decrement, if the
+ * amount is negative).
+ * @param durability The persistence guarantee for this increment.
+ * @return The new value, post increment.
+ * @throws IOException if a remote or network exception occurs.
+ */
+ long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier,
+ long amount, Durability durability) throws IOException;
+
+ /**
+ * Releases any resources held or pending changes in internal buffers.
+ *
+ * @throws IOException if a remote or network exception occurs.
+ */
+ @Override
+ void close() throws IOException;
+
+ /**
+ * Creates and returns a {@link com.google.protobuf.RpcChannel} instance connected to the
+ * table region containing the specified row. The row given does not actually have
+ * to exist. Whichever region would contain the row based on start and end keys will
+ * be used. Note that the {@code row} parameter is also not passed to the
+ * coprocessor handler registered for this protocol, unless the {@code row}
+ * is separately passed as an argument in the service request. The parameter
+ * here is only used to locate the region used to handle the call.
+ *
+ * <p>
+ * The obtained {@link com.google.protobuf.RpcChannel} instance can be used to access a published
+ * coprocessor {@link com.google.protobuf.Service} using standard protobuf service invocations:
+ * </p>
+ *
+ * <div style="background-color: #cccccc; padding: 2px">
+ * <blockquote><pre>
+ * CoprocessorRpcChannel channel = myTable.coprocessorService(rowkey);
+ * MyService.BlockingInterface service = MyService.newBlockingStub(channel);
+ * MyCallRequest request = MyCallRequest.newBuilder()
+ * ...
+ * .build();
+ * MyCallResponse response = service.myCall(null, request);
+ * </pre></blockquote></div>
+ *
+ * @param row The row key used to identify the remote region location
+ * @return A CoprocessorRpcChannel instance
+ */
+ CoprocessorRpcChannel coprocessorService(byte[] row);
+
+ /**
+ * Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table
+ * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), and
+ * invokes the passed {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method
+ * with each {@link com.google.protobuf.Service} instance.
+ *
+ * @param service the protocol buffer {@code Service} implementation to call
+ * @param startKey start region selection with region containing this row. If {@code null}, the
+ * selection will start with the first table region.
+ * @param endKey select regions up to and including the region containing this row. If {@code
+ * null}, selection will continue through the last table region.
+ * @param callable this instance's {@link org.apache.hadoop.hbase.client.coprocessor.Batch
+ * .Call#call}
+ * method will be invoked once per table region, using the {@link com.google.protobuf.Service}
+ * instance connected to that region.
+ * @param <T> the {@link com.google.protobuf.Service} subclass to connect to
+ * @param <R> Return type for the {@code callable} parameter's {@link
+ * org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method
+ * @return a map of result values keyed by region name
+ */
+ <T extends Service, R> Map<byte[],R> coprocessorService(final Class<T> service,
+ byte[] startKey, byte[] endKey, final Batch.Call<T,R> callable)
+ throws ServiceException, Throwable;
+
+ /**
+ * Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table
+ * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), and
+ * invokes the passed {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method
+ * with each {@link Service} instance.
+ *
+ * <p> The given {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Callback#update(byte[],
+ * byte[], Object)} method will be called with the return value from each region's {@link
+ * org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} invocation. </p>
+ *
+ * @param service the protocol buffer {@code Service} implementation to call
+ * @param startKey start region selection with region containing this row. If {@code null}, the
+ * selection will start with the first table region.
+ * @param endKey select regions up to and including the region containing this row. If {@code
+ * null}, selection will continue through the last table region.
+ * @param callable this instance's {@link org.apache.hadoop.hbase.client.coprocessor.Batch
+ * .Call#call}
+ * method will be invoked once per table region, using the {@link Service} instance connected to
+ * that region.
+ * @param callback
+ * @param <T> the {@link Service} subclass to connect to
+ * @param <R> Return type for the {@code callable} parameter's {@link
+ * org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method
+ */
+ <T extends Service, R> void coprocessorService(final Class<T> service,
+ byte[] startKey, byte[] endKey, final Batch.Call<T,R> callable,
+ final Batch.Callback<R> callback) throws ServiceException, Throwable;
+
+ /**
+ * Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table
+ * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), all
+ * the invocations to the same region server will be batched into one call. The coprocessor
+ * service is invoked according to the service instance, method name and parameters.
+ *
+ * @param methodDescriptor
+ * the descriptor for the protobuf service method to call.
+ * @param request
+ * the method call parameters
+ * @param startKey
+ * start region selection with region containing this row. If {@code null}, the
+ * selection will start with the first table region.
+ * @param endKey
+ * select regions up to and including the region containing this row. If {@code null},
+ * selection will continue through the last table region.
+ * @param responsePrototype
+ * the proto type of the response of the method in Service.
+ * @param <R>
+ * the response type for the coprocessor Service method
+ * @throws ServiceException
+ * @throws Throwable
+ * @return a map of result values keyed by region name
+ */
+ <R extends Message> Map<byte[], R> batchCoprocessorService(
+ Descriptors.MethodDescriptor methodDescriptor, Message request,
+ byte[] startKey, byte[] endKey, R responsePrototype) throws ServiceException, Throwable;
+
+ /**
+ * Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table
+ * region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), all
+ * the invocations to the same region server will be batched into one call. The coprocessor
+ * service is invoked according to the service instance, method name and parameters.
+ *
+ * <p>
+ * The given
+ * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Callback#update(byte[],byte[],Object)}
+ * method will be called with the return value from each region's invocation.
+ * </p>
+ *
+ * @param methodDescriptor
+ * the descriptor for the protobuf service method to call.
+ * @param request
+ * the method call parameters
+ * @param startKey
+ * start region selection with region containing this row. If {@code null}, the
+ * selection will start with the first table region.
+ * @param endKey
+ * select regions up to and including the region containing this row. If {@code null},
+ * selection will continue through the last table region.
+ * @param responsePrototype
+ * the proto type of the response of the method in Service.
+ * @param callback
+ * callback to invoke with the response for each region
+ * @param <R>
+ * the response type for the coprocessor Service method
+ * @throws ServiceException
+ * @throws Throwable
+ */
+ <R extends Message> void batchCoprocessorService(Descriptors.MethodDescriptor methodDescriptor,
+ Message request, byte[] startKey, byte[] endKey, R responsePrototype,
+ Batch.Callback<R> callback) throws ServiceException, Throwable;
+
+ /**
+ * Atomically checks if a row/family/qualifier value matches the expected value.
+ * If it does, it performs the row mutations. If the passed value is null, the check
+ * is for the lack of column (ie: non-existence)
+ *
+ * The expected value argument of this call is on the left and the current
+ * value of the cell is on the right side of the comparison operator.
+ *
+ * Ie. eg. GREATER operator means expected value > existing <=> perform row mutations.
+ *
+ * @param row to check
+ * @param family column family to check
+ * @param qualifier column qualifier to check
+ * @param compareOp the comparison operator
+ * @param value the expected value
+ * @param mutation mutations to perform if check succeeds
+ * @throws IOException e
+ * @return true if the new put was executed, false otherwise
+ * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use
+ * {@link #checkAndMutate(byte[], byte[], byte[], CompareOperator, byte[], RowMutations)}
+ */
+ @Deprecated
+ boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier,
+ CompareFilter.CompareOp compareOp, byte[] value, RowMutations mutation) throws IOException;
+
+ /**
+ * Atomically checks if a row/family/qualifier value matches the expected value.
+ * If it does, it performs the row mutations. If the passed value is null, the check
+ * is for the lack of column (ie: non-existence)
+ *
+ * The expected value argument of this call is on the left and the current
+ * value of the cell is on the right side of the comparison operator.
+ *
+ * Ie. eg. GREATER operator means expected value > existing <=> perform row mutations.
+ *
+ * @param row to check
+ * @param family column family to check
+ * @param qualifier column qualifier to check
+ * @param op the comparison operator
+ * @param value the expected value
+ * @param mutation mutations to perform if check succeeds
+ * @throws IOException e
+ * @return true if the new put was executed, false otherwise
+ */
+ boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, CompareOperator op,
+ byte[] value, RowMutations mutation) throws IOException;
+
+ /**
+ * Get timeout of each rpc request in this Table instance. It will be overridden by a more
+ * specific rpc timeout config such as readRpcTimeout or writeRpcTimeout.
+ * @see #getReadRpcTimeout(TimeUnit)
+ * @see #getWriteRpcTimeout(TimeUnit)
+ * @param unit the unit of time the timeout to be represented in
+ * @return rpc timeout in the specified time unit
+ */
+ long getRpcTimeout(TimeUnit unit);
+
+ /**
+ * Get timeout (millisecond) of each rpc request in this Table instance.
+ *
+ * @return Currently configured read timeout
+ * @deprecated use {@link #getReadRpcTimeout(TimeUnit)} or
+ * {@link #getWriteRpcTimeout(TimeUnit)} instead
+ */
+ @Deprecated
+ int getRpcTimeout();
+
+ /**
+ * Set timeout (millisecond) of each rpc request in operations of this Table instance, will
+ * override the value of hbase.rpc.timeout in configuration.
+ * If a rpc request waiting too long, it will stop waiting and send a new request to retry until
+ * retries exhausted or operation timeout reached.
+ * <p>
+ * NOTE: This will set both the read and write timeout settings to the provided value.
+ *
+ * @param rpcTimeout the timeout of each rpc request in millisecond.
+ *
+ * @deprecated Use setReadRpcTimeout or setWriteRpcTimeout instead
+ */
+ @Deprecated
+ void setRpcTimeout(int rpcTimeout);
+
+ /**
+ * Get timeout of each rpc read request in this Table instance.
+ * @param unit the unit of time the timeout to be represented in
+ * @return read rpc timeout in the specified time unit
+ */
+ long getReadRpcTimeout(TimeUnit unit);
+
+ /**
+ * Get timeout (millisecond) of each rpc read request in this Table instance.
+ * @deprecated since 2.0 and will be removed in 3.0 version
+ * use {@link #getReadRpcTimeout(TimeUnit)} instead
+ */
+ @Deprecated
+ int getReadRpcTimeout();
+
+ /**
+ * Set timeout (millisecond) of each rpc read request in operations of this Table instance, will
+ * override the value of hbase.rpc.read.timeout in configuration.
+ * If a rpc read request waiting too long, it will stop waiting and send a new request to retry
+ * until retries exhausted or operation timeout reached.
+ *
+ * @param readRpcTimeout the timeout for read rpc request in milliseconds
+ * @deprecated since 2.0.0, use {@link TableBuilder#setReadRpcTimeout} instead
+ */
+ @Deprecated
+ void setReadRpcTimeout(int readRpcTimeout);
+
+ /**
+ * Get timeout of each rpc write request in this Table instance.
+ * @param unit the unit of time the timeout to be represented in
+ * @return write rpc timeout in the specified time unit
+ */
+ long getWriteRpcTimeout(TimeUnit unit);
+
+ /**
+ * Get timeout (millisecond) of each rpc write request in this Table instance.
+ * @deprecated since 2.0 and will be removed in 3.0 version
+ * use {@link #getWriteRpcTimeout(TimeUnit)} instead
+ */
+ @Deprecated
+ int getWriteRpcTimeout();
+
+ /**
+ * Set timeout (millisecond) of each rpc write request in operations of this Table instance, will
+ * override the value of hbase.rpc.write.timeout in configuration.
+ * If a rpc write request waiting too long, it will stop waiting and send a new request to retry
+ * until retries exhausted or operation timeout reached.
+ *
+ * @param writeRpcTimeout the timeout for write rpc request in milliseconds
+ * @deprecated since 2.0.0, use {@link TableBuilder#setWriteRpcTimeout} instead
+ */
+ @Deprecated
+ void setWriteRpcTimeout(int writeRpcTimeout);
+
+ /**
+ * Get timeout of each operation in Table instance.
+ * @param unit the unit of time the timeout to be represented in
+ * @return operation rpc timeout in the specified time unit
+ */
+ long getOperationTimeout(TimeUnit unit);
+
+ /**
+ * Get timeout (millisecond) of each operation for in Table instance.
+ * @deprecated since 2.0 and will be removed in 3.0 version
+ * use {@link #getOperationTimeout(TimeUnit)} instead
+ */
+ @Deprecated
+ int getOperationTimeout();
+
+ /**
+ * Set timeout (millisecond) of each operation in this Table instance, will override the value
+ * of hbase.client.operation.timeout in configuration.
+ * Operation timeout is a top-level restriction that makes sure a blocking method will not be
+ * blocked more than this. In each operation, if rpc request fails because of timeout or
+ * other reason, it will retry until success or throw a RetriesExhaustedException. But if the
+ * total time being blocking reach the operation timeout before retries exhausted, it will break
+ * early and throw SocketTimeoutException.
+ * @param operationTimeout the total timeout of each operation in millisecond.
+ * @deprecated since 2.0.0, use {@link TableBuilder#setOperationTimeout} instead
+ */
+ @Deprecated
+ void setOperationTimeout(int operationTimeout);
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/488b5281/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java
index e248f33..f9de869 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionTable.java
@@ -46,7 +46,6 @@ import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
import org.apache.phoenix.schema.PTable;
import org.apache.phoenix.schema.PTableType;
import org.apache.tephra.TxConstants;
-import org.apache.tephra.hbase.TransactionAwareHTable;
import com.google.protobuf.Descriptors.MethodDescriptor;
import com.google.protobuf.Message;
@@ -69,7 +68,8 @@ public class TephraTransactionTable implements PhoenixTransactionalTable {
tephraTransactionContext = (TephraTransactionContext) ctx;
- transactionAwareHTable = new TransactionAwareHTable(hTable, (pTable != null && pTable.isImmutableRows()) ? TxConstants.ConflictDetection.NONE : TxConstants.ConflictDetection.ROW);
+ transactionAwareHTable = new TransactionAwareHTable(hTable, (pTable != null && pTable.isImmutableRows())
+ ? TxConstants.ConflictDetection.NONE : TxConstants.ConflictDetection.ROW);
tephraTransactionContext.addTransactionAware(transactionAwareHTable);
@@ -98,10 +98,6 @@ public class TephraTransactionTable implements PhoenixTransactionalTable {
return transactionAwareHTable.getScanner(scan);
}
- @Override
- public byte[] getTableName() {
- return transactionAwareHTable.getTableName();
- }
@Override
public Configuration getConfiguration() {
@@ -145,31 +141,6 @@ public class TephraTransactionTable implements PhoenixTransactionalTable {
}
@Override
- public void setAutoFlush(boolean autoFlush) {
- transactionAwareHTable.setAutoFlush(autoFlush);
- }
-
- @Override
- public boolean isAutoFlush() {
- return transactionAwareHTable.isAutoFlush();
- }
-
- @Override
- public long getWriteBufferSize() {
- return transactionAwareHTable.getWriteBufferSize();
- }
-
- @Override
- public void setWriteBufferSize(long writeBufferSize) throws IOException {
- transactionAwareHTable.setWriteBufferSize(writeBufferSize);
- }
-
- @Override
- public void flushCommits() throws IOException {
- transactionAwareHTable.flushCommits();
- }
-
- @Override
public void close() throws IOException {
transactionAwareHTable.close();
}
@@ -333,7 +304,7 @@ public class TephraTransactionTable implements PhoenixTransactionalTable {
@Override
public boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, CompareOperator op,
byte[] value, RowMutations mutation) throws IOException {
- return transactionAwareHTable.checkAndMutate(row, family, qualifier, op, value, mutations);
+ return transactionAwareHTable.checkAndMutate(row, family, qualifier, op, value, mutation);
}
@Override
@@ -353,7 +324,7 @@ public class TephraTransactionTable implements PhoenixTransactionalTable {
@Override
public void setWriteRpcTimeout(int writeRpcTimeout) {
- return transactionAwareHTable.setWriteRpcTimeout(writeRpcTimeout);
+ transactionAwareHTable.setWriteRpcTimeout(writeRpcTimeout);
}
@Override
@@ -380,4 +351,5 @@ public class TephraTransactionTable implements PhoenixTransactionalTable {
public long getOperationTimeout(TimeUnit unit) {
return transactionAwareHTable.getOperationTimeout(unit);
}
-}
+
+}
\ No newline at end of file
[7/8] phoenix git commit: PHOENIX-4399 Remove explicit abort on
RegionServerServices
Posted by an...@apache.org.
PHOENIX-4399 Remove explicit abort on RegionServerServices
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/5b9a07db
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/5b9a07db
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/5b9a07db
Branch: refs/heads/5.x-HBase-2.0
Commit: 5b9a07db16d033df2a5ed1a00c4783494f38afa6
Parents: 488b528
Author: Ankit Singhal <an...@gmail.com>
Authored: Mon Nov 27 15:47:45 2017 +0530
Committer: Ankit Singhal <an...@gmail.com>
Committed: Mon Nov 27 15:47:45 2017 +0530
----------------------------------------------------------------------
.../org/apache/phoenix/hbase/index/Indexer.java | 3 +--
.../builder/IndexBuildingFailureException.java | 4 +---
.../index/write/KillServerOnFailurePolicy.java | 17 ++++-------------
3 files changed, 6 insertions(+), 18 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/5b9a07db/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
index f9b882c..d6f1bcc 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
@@ -208,8 +208,7 @@ public class Indexer implements RegionObserver, RegionCoprocessor {
// make sure the right version <-> combinations are allowed.
String errormsg = Indexer.validateVersion(env.getHBaseVersion(), env.getConfiguration());
if (errormsg != null) {
- IOException ioe = new IOException(errormsg);
- throw ioe;
+ throw new RuntimeException(errormsg);
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/5b9a07db/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/builder/IndexBuildingFailureException.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/builder/IndexBuildingFailureException.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/builder/IndexBuildingFailureException.java
index cc7cc35..1a1aef4 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/builder/IndexBuildingFailureException.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/builder/IndexBuildingFailureException.java
@@ -19,15 +19,13 @@ package org.apache.phoenix.hbase.index.builder;
import java.io.IOException;
-import org.apache.hadoop.hbase.DoNotRetryIOException;
-
/**
* Unexpected failure while building index updates that wasn't caused by an {@link IOException}.
* This should be used if there is some basic issue with indexing - and no matter of retries will
* fix it.
*/
@SuppressWarnings("serial")
-public class IndexBuildingFailureException extends DoNotRetryIOException {
+public class IndexBuildingFailureException extends RuntimeException {
/**
* Constructor for over the wire propagation. Generally, shouldn't be used since index failure
http://git-wip-us.apache.org/repos/asf/phoenix/blob/5b9a07db/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/KillServerOnFailurePolicy.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/KillServerOnFailurePolicy.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/KillServerOnFailurePolicy.java
index cba2459..f9c6f3f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/KillServerOnFailurePolicy.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/KillServerOnFailurePolicy.java
@@ -21,10 +21,10 @@ import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.Abortable;
import org.apache.hadoop.hbase.Stoppable;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.phoenix.hbase.index.builder.IndexBuildingFailureException;
import org.apache.phoenix.hbase.index.table.HTableInterfaceReference;
import com.google.common.collect.Multimap;
@@ -35,17 +35,15 @@ import com.google.common.collect.Multimap;
public class KillServerOnFailurePolicy implements IndexFailurePolicy {
private static final Log LOG = LogFactory.getLog(KillServerOnFailurePolicy.class);
- private Abortable abortable;
private Stoppable stoppable;
@Override
public void setup(Stoppable parent, RegionCoprocessorEnvironment env) {
- setup(parent, env.getRegionServerServices());
+ setup(parent);
}
- public void setup(Stoppable parent, Abortable abort) {
+ public void setup(Stoppable parent) {
this.stoppable = parent;
- this.abortable = abort;
}
@Override
@@ -67,14 +65,7 @@ public class KillServerOnFailurePolicy implements IndexFailurePolicy {
String msg =
"Could not update the index table, killing server region because couldn't write to an index table";
LOG.error(msg, cause);
- try {
- this.abortable.abort(msg, cause);
- } catch (Exception e) {
- LOG.fatal("Couldn't abort this server to preserve index writes, "
- + "attempting to hard kill the server");
- System.exit(1);
- }
-
+ throw new IndexBuildingFailureException(msg,cause);
}
}
[4/8] phoenix git commit: PHOENIX-4304 Refactoring to avoid using
deprecated HTableDescriptor, HColumnDescriptor,
HRegionInfo( Rajeshbabu Chintaguntla)
Posted by an...@apache.org.
PHOENIX-4304 Refactoring to avoid using deprecated HTableDescriptor, HColumnDescriptor, HRegionInfo( Rajeshbabu Chintaguntla)
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/c3ec80d3
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/c3ec80d3
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/c3ec80d3
Branch: refs/heads/5.x-HBase-2.0
Commit: c3ec80d3166d8b365ae39a69bd57d24d8971c63d
Parents: 1beac27
Author: Ankit Singhal <an...@gmail.com>
Authored: Mon Nov 27 15:31:57 2017 +0530
Committer: Ankit Singhal <an...@gmail.com>
Committed: Mon Nov 27 15:31:57 2017 +0530
----------------------------------------------------------------------
.../wal/WALRecoveryRegionPostOpenIT.java | 8 +-
...ReplayWithIndexWritesAndCompressedWALIT.java | 41 +-
.../StatisticsCollectionRunTrackerIT.java | 18 +-
.../AlterMultiTenantTableWithViewsIT.java | 15 +-
.../apache/phoenix/end2end/AlterTableIT.java | 10 +-
.../end2end/ColumnProjectionOptimizationIT.java | 13 +-
.../apache/phoenix/end2end/CreateTableIT.java | 50 ++-
.../apache/phoenix/end2end/DynamicColumnIT.java | 14 +-
.../phoenix/end2end/FlappingAlterTableIT.java | 15 +-
.../phoenix/end2end/FlappingLocalIndexIT.java | 8 +-
.../end2end/IndexToolForPartialBuildIT.java | 2 +-
.../phoenix/end2end/LocalIndexSplitMergeIT.java | 28 +-
.../phoenix/end2end/MappingTableDataTypeIT.java | 14 +-
.../end2end/NamespaceSchemaMappingIT.java | 13 +-
.../phoenix/end2end/NativeHBaseTypesIT.java | 22 +-
.../end2end/QueryDatabaseMetaDataIT.java | 41 +-
.../apache/phoenix/end2end/SetPropertyIT.java | 89 ++--
.../end2end/SkipScanAfterManualSplitIT.java | 4 +-
.../org/apache/phoenix/end2end/UseSchemaIT.java | 10 +-
.../java/org/apache/phoenix/end2end/ViewIT.java | 22 +-
.../phoenix/end2end/index/BaseIndexIT.java | 12 +-
.../phoenix/end2end/index/DropMetadataIT.java | 20 +-
.../phoenix/end2end/index/LocalIndexIT.java | 27 +-
.../phoenix/end2end/index/MutableIndexIT.java | 96 +----
.../index/MutableIndexReplicationIT.java | 38 +-
.../end2end/index/MutableIndexSplitIT.java | 16 +-
.../UpsertSelectOverlappingBatchesIT.java | 24 +-
.../FailForUnsupportedHBaseVersionsIT.java | 18 +-
.../iterate/RoundRobinResultIteratorIT.java | 5 +-
.../apache/phoenix/rpc/PhoenixServerRpcIT.java | 18 +-
.../phoenix/schema/stats/StatsCollectorIT.java | 5 +-
.../phoenix/tx/ParameterizedTransactionIT.java | 29 +-
.../org/apache/phoenix/tx/TransactionIT.java | 51 +--
.../IndexHalfStoreFileReaderGenerator.java | 1 -
.../apache/phoenix/cache/ServerCacheClient.java | 8 +-
.../apache/phoenix/compile/UpsertCompiler.java | 4 +-
.../UngroupedAggregateRegionObserver.java | 4 +-
.../phoenix/execute/ClientAggregatePlan.java | 5 +-
.../org/apache/phoenix/hbase/index/Indexer.java | 12 +-
.../hbase/index/covered/NonTxIndexBuilder.java | 2 -
.../write/ParallelWriterIndexCommitter.java | 4 +-
.../TrackingParallelWriterIndexCommitter.java | 4 +-
.../apache/phoenix/index/PhoenixIndexCodec.java | 9 +-
.../index/PhoenixIndexFailurePolicy.java | 12 +-
.../phoenix/iterate/BaseResultIterators.java | 12 +-
.../iterate/MapReduceParallelScanGrouper.java | 15 +-
.../phoenix/iterate/RegionScannerFactory.java | 5 +-
.../apache/phoenix/iterate/SnapshotScanner.java | 76 ++--
.../iterate/TableSnapshotResultIterator.java | 28 +-
.../phoenix/parse/CreateTableStatement.java | 4 -
.../phoenix/query/ConnectionQueryServices.java | 4 +-
.../query/ConnectionQueryServicesImpl.java | 405 +++++++++----------
.../query/ConnectionlessQueryServicesImpl.java | 45 ++-
.../query/DelegateConnectionQueryServices.java | 4 +-
.../apache/phoenix/query/GuidePostsCache.java | 6 +-
.../apache/phoenix/query/QueryConstants.java | 18 +-
.../apache/phoenix/schema/MetaDataClient.java | 22 +-
.../transaction/OmidTransactionTable.java | 98 ++++-
.../transaction/PhoenixTransactionContext.java | 2 +
.../transaction/TephraTransactionTable.java | 30 +-
.../java/org/apache/phoenix/util/IndexUtil.java | 8 +-
.../org/apache/phoenix/util/MetaDataUtil.java | 49 +--
.../org/apache/phoenix/util/RepairUtil.java | 6 +-
.../java/org/apache/phoenix/util/ScanUtil.java | 4 +-
.../org/apache/phoenix/util/UpgradeUtil.java | 45 ++-
.../phoenix/compile/QueryCompilerTest.java | 4 +-
.../CoveredColumnIndexSpecifierBuilder.java | 12 +-
.../index/covered/NonTxIndexBuilderTest.java | 16 +-
.../index/util/TestIndexManagementUtil.java | 20 +-
.../hbase/index/write/TestIndexWriter.java | 16 +-
.../index/write/TestWALRecoveryCaching.java | 33 +-
.../recovery/TestPerRegionIndexWriteCache.java | 19 +-
.../java/org/apache/phoenix/query/BaseTest.java | 8 +-
.../schema/stats/StatisticsScannerTest.java | 7 +-
.../java/org/apache/phoenix/util/TestUtil.java | 25 +-
75 files changed, 967 insertions(+), 940 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALRecoveryRegionPostOpenIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALRecoveryRegionPostOpenIT.java b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALRecoveryRegionPostOpenIT.java
index 20d59a7..674c70c 100644
--- a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALRecoveryRegionPostOpenIT.java
+++ b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALRecoveryRegionPostOpenIT.java
@@ -125,7 +125,7 @@ public class WALRecoveryRegionPostOpenIT extends BaseTest {
throw new DoNotRetryIOException();
}
Mutation operation = miniBatchOp.getOperation(0);
- Set<byte[]> keySet = operation.getFamilyMap().keySet();
+ Set<byte[]> keySet = operation.getFamilyCellMap().keySet();
for(byte[] family: keySet) {
if(Bytes.toString(family).startsWith(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX) && failIndexTableWrite) {
throw new DoNotRetryIOException();
@@ -232,17 +232,17 @@ public class WALRecoveryRegionPostOpenIT extends BaseTest {
assertTrue(!Arrays.equals(mutations[0].getRow(),Bytes.toBytes("a")));
//wait for data table region repoen.
- List<Region> dataTableRegions=null;
+ List<HRegion> dataTableRegions=null;
for(int i=1;i<=200;i++) {
- dataTableRegions=liveRegionServer.getOnlineRegions(TableName.valueOf(DATA_TABLE_NAME));
+ dataTableRegions=liveRegionServer.getRegions(TableName.valueOf(DATA_TABLE_NAME));
if(dataTableRegions.size() > 0) {
break;
}
Thread.sleep(ONE_SEC);
}
- dataTableRegions=liveRegionServer.getOnlineRegions(TableName.valueOf(DATA_TABLE_NAME));
+ dataTableRegions=liveRegionServer.getRegions(TableName.valueOf(DATA_TABLE_NAME));
assertTrue(dataTableRegions.size()==1);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
index 67b7df3..2c6467a 100644
--- a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
+++ b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
@@ -33,20 +33,23 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.regionserver.RegionServerAccounting;
@@ -100,7 +103,6 @@ public class WALReplayWithIndexWritesAndCompressedWALIT {
@Before
public void setUp() throws Exception {
setupCluster();
- Path hbaseRootDir = UTIL.getDataTestDir();
this.conf = HBaseConfiguration.create(UTIL.getConfiguration());
this.fs = UTIL.getDFSCluster().getFileSystem();
this.hbaseRootDir = new Path(this.conf.get(HConstants.HBASE_DIR));
@@ -167,15 +169,13 @@ public class WALReplayWithIndexWritesAndCompressedWALIT {
* seqids.
* @throws Exception on failure
*/
- @SuppressWarnings("deprecation")
@Test
public void testReplayEditsWrittenViaHRegion() throws Exception {
final String tableNameStr = "testReplayEditsWrittenViaHRegion";
- final HRegionInfo hri = new HRegionInfo(org.apache.hadoop.hbase.TableName.valueOf(tableNameStr),
- null, null, false);
+ final RegionInfo hri = RegionInfoBuilder.newBuilder(org.apache.hadoop.hbase.TableName.valueOf(tableNameStr)).setSplit(false).build();
final Path basedir = FSUtils.getTableDir(hbaseRootDir, org.apache.hadoop.hbase.TableName.valueOf(tableNameStr));
deleteDir(basedir);
- final HTableDescriptor htd = createBasic3FamilyHTD(tableNameStr);
+ final TableDescriptor htd = createBasic3FamilyHTD(tableNameStr);
//setup basic indexing for the table
// enable indexing to a non-existant index table
@@ -216,7 +216,7 @@ public class WALReplayWithIndexWritesAndCompressedWALIT {
Mockito.any(Exception.class));
// then create the index table so we are successful on WAL replay
- TestIndexManagementUtil.createIndexTable(UTIL.getHBaseAdmin(), INDEX_TABLE_NAME);
+ TestIndexManagementUtil.createIndexTable(UTIL.getAdmin(), INDEX_TABLE_NAME);
// run the WAL split and setup the region
runWALSplit(this.conf, walFactory);
@@ -237,7 +237,7 @@ public class WALReplayWithIndexWritesAndCompressedWALIT {
assertEquals("Primary region wasn't updated from WAL replay!", 1, result.size());
// cleanup the index table
- Admin admin = UTIL.getHBaseAdmin();
+ Admin admin = UTIL.getAdmin();
admin.disableTable(TableName.valueOf(INDEX_TABLE_NAME));
admin.deleteTable(TableName.valueOf(INDEX_TABLE_NAME));
admin.close();
@@ -248,16 +248,15 @@ public class WALReplayWithIndexWritesAndCompressedWALIT {
* @param tableName name of the table descriptor
* @return
*/
- private HTableDescriptor createBasic3FamilyHTD(final String tableName) {
- @SuppressWarnings("deprecation")
- HTableDescriptor htd = new HTableDescriptor(tableName);
- HColumnDescriptor a = new HColumnDescriptor(Bytes.toBytes("a"));
- htd.addFamily(a);
- HColumnDescriptor b = new HColumnDescriptor(Bytes.toBytes("b"));
- htd.addFamily(b);
- HColumnDescriptor c = new HColumnDescriptor(Bytes.toBytes("c"));
- htd.addFamily(c);
- return htd;
+ private TableDescriptor createBasic3FamilyHTD(final String tableName) {
+ TableDescriptorBuilder tableBuilder = TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName));
+ ColumnFamilyDescriptor a = ColumnFamilyDescriptorBuilder.of(Bytes.toBytes("a"));
+ tableBuilder.addColumnFamily(a);
+ ColumnFamilyDescriptor b = ColumnFamilyDescriptorBuilder.of(Bytes.toBytes("b"));
+ tableBuilder.addColumnFamily(b);
+ ColumnFamilyDescriptor c = ColumnFamilyDescriptorBuilder.of(Bytes.toBytes("c"));
+ tableBuilder.addColumnFamily(c);
+ return tableBuilder.build();
}
/*
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/StatisticsCollectionRunTrackerIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/StatisticsCollectionRunTrackerIT.java b/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/StatisticsCollectionRunTrackerIT.java
index 27ebec0..71c9e01 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/StatisticsCollectionRunTrackerIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/StatisticsCollectionRunTrackerIT.java
@@ -28,9 +28,9 @@ import java.sql.PreparedStatement;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.phoenix.end2end.ParallelStatsEnabledIT;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.schema.stats.StatisticsCollectionRunTracker;
@@ -56,7 +56,7 @@ public class StatisticsCollectionRunTrackerIT extends ParallelStatsEnabledIT {
@Test
public void testStateBeforeAndAfterUpdateStatsCommand() throws Exception {
String tableName = fullTableName;
- HRegionInfo regionInfo = createTableAndGetRegion(tableName);
+ RegionInfo regionInfo = createTableAndGetRegion(tableName);
StatisticsCollectionRunTracker tracker =
StatisticsCollectionRunTracker.getInstance(new Configuration());
// assert that the region wasn't added to the tracker
@@ -71,7 +71,7 @@ public class StatisticsCollectionRunTrackerIT extends ParallelStatsEnabledIT {
@Test
public void testStateBeforeAndAfterMajorCompaction() throws Exception {
String tableName = fullTableName;
- HRegionInfo regionInfo = createTableAndGetRegion(tableName);
+ RegionInfo regionInfo = createTableAndGetRegion(tableName);
StatisticsCollectionRunTracker tracker =
StatisticsCollectionRunTracker.getInstance(new Configuration());
// Upsert values in the table.
@@ -99,7 +99,7 @@ public class StatisticsCollectionRunTrackerIT extends ParallelStatsEnabledIT {
@Test
public void testMajorCompactionPreventsUpdateStatsFromRunning() throws Exception {
String tableName = fullTableName;
- HRegionInfo regionInfo = createTableAndGetRegion(tableName);
+ RegionInfo regionInfo = createTableAndGetRegion(tableName);
// simulate stats collection via major compaction by marking the region as compacting in the tracker
markRegionAsCompacting(regionInfo);
Assert.assertEquals("Row count didn't match", COMPACTION_UPDATE_STATS_ROW_COUNT, runUpdateStats(tableName));
@@ -112,7 +112,7 @@ public class StatisticsCollectionRunTrackerIT extends ParallelStatsEnabledIT {
@Test
public void testUpdateStatsPreventsAnotherUpdateStatsFromRunning() throws Exception {
String tableName = fullTableName;
- HRegionInfo regionInfo = createTableAndGetRegion(tableName);
+ RegionInfo regionInfo = createTableAndGetRegion(tableName);
markRunningUpdateStats(regionInfo);
Assert.assertEquals("Row count didn't match", CONCURRENT_UPDATE_STATS_ROW_COUNT,
runUpdateStats(tableName));
@@ -123,26 +123,26 @@ public class StatisticsCollectionRunTrackerIT extends ParallelStatsEnabledIT {
assertTrue(tracker.removeUpdateStatsCommandRegion(regionInfo));
}
- private void markRegionAsCompacting(HRegionInfo regionInfo) {
+ private void markRegionAsCompacting(RegionInfo regionInfo) {
StatisticsCollectionRunTracker tracker =
StatisticsCollectionRunTracker.getInstance(new Configuration());
tracker.addCompactingRegion(regionInfo);
}
- private void markRunningUpdateStats(HRegionInfo regionInfo) {
+ private void markRunningUpdateStats(RegionInfo regionInfo) {
StatisticsCollectionRunTracker tracker =
StatisticsCollectionRunTracker.getInstance(new Configuration());
tracker.addUpdateStatsCommandRegion(regionInfo);
}
- private HRegionInfo createTableAndGetRegion(String tableName) throws Exception {
+ private RegionInfo createTableAndGetRegion(String tableName) throws Exception {
TableName tn = TableName.valueOf(tableName);
String ddl = "CREATE TABLE " + tableName + " (PK1 VARCHAR PRIMARY KEY, KV1 VARCHAR)";
try (Connection conn = DriverManager.getConnection(getUrl())) {
conn.createStatement().execute(ddl);
PhoenixConnection phxConn = conn.unwrap(PhoenixConnection.class);
try (Admin admin = phxConn.getQueryServices().getAdmin()) {
- List<HRegionInfo> tableRegions = admin.getTableRegions(tn);
+ List<RegionInfo> tableRegions = admin.getRegions(tn);
return tableRegions.get(0);
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterMultiTenantTableWithViewsIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterMultiTenantTableWithViewsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterMultiTenantTableWithViewsIT.java
index 7b4ff68..60dbb44 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterMultiTenantTableWithViewsIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterMultiTenantTableWithViewsIT.java
@@ -35,7 +35,8 @@ import java.sql.Statement;
import java.util.List;
import java.util.Properties;
-import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.compile.QueryPlan;
import org.apache.phoenix.exception.SQLExceptionCode;
@@ -565,14 +566,14 @@ public class AlterMultiTenantTableWithViewsIT extends ParallelStatsDisabledIT {
try (Connection conn = DriverManager.getConnection(getUrl())) {
String baseTableDDL = "CREATE TABLE " + baseTable + " (TENANT_ID VARCHAR NOT NULL, PK1 VARCHAR NOT NULL, V1 VARCHAR, V2 VARCHAR, V3 VARCHAR CONSTRAINT NAME_PK PRIMARY KEY(TENANT_ID, PK1)) MULTI_TENANT = true ";
conn.createStatement().execute(baseTableDDL);
- HTableDescriptor tableDesc1 = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin().getTableDescriptor(Bytes.toBytes(baseTable));
+ TableDescriptor tableDesc1 = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin().getDescriptor(TableName.valueOf(baseTable));
try (Connection tenant1Conn = getTenantConnection("tenant1")) {
String view1DDL = "CREATE VIEW " + view1 + " ( VIEW_COL1 DECIMAL(10,2), VIEW_COL2 CHAR(256)) AS SELECT * FROM " + baseTable;
tenant1Conn.createStatement().execute(view1DDL);
// This should not modify the base table
String alterView = "ALTER VIEW " + view1 + " ADD NEWCOL1 VARCHAR";
tenant1Conn.createStatement().execute(alterView);
- HTableDescriptor tableDesc2 = tenant1Conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin().getTableDescriptor(Bytes.toBytes(baseTable));
+ TableDescriptor tableDesc2 = tenant1Conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin().getDescriptor(TableName.valueOf(baseTable));
assertEquals(tableDesc1, tableDesc2);
// Add a new column family that doesn't already exist in the base table
@@ -580,16 +581,16 @@ public class AlterMultiTenantTableWithViewsIT extends ParallelStatsDisabledIT {
tenant1Conn.createStatement().execute(alterView);
// Verify that the column family now shows up in the base table descriptor
- tableDesc2 = tenant1Conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin().getTableDescriptor(Bytes.toBytes(baseTable));
+ tableDesc2 = tenant1Conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin().getDescriptor(TableName.valueOf(baseTable));
assertFalse(tableDesc2.equals(tableDesc1));
- assertNotNull(tableDesc2.getFamily(Bytes.toBytes("CF")));
+ assertNotNull(tableDesc2.getColumnFamily(Bytes.toBytes("CF")));
// Add a column with an existing column family. This shouldn't modify the base table.
alterView = "ALTER VIEW " + view1 + " ADD CF.NEWCOL3 VARCHAR";
tenant1Conn.createStatement().execute(alterView);
- HTableDescriptor tableDesc3 = tenant1Conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin().getTableDescriptor(Bytes.toBytes(baseTable));
+ TableDescriptor tableDesc3 = tenant1Conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin().getDescriptor(TableName.valueOf(baseTable));
assertTrue(tableDesc3.equals(tableDesc2));
- assertNotNull(tableDesc3.getFamily(Bytes.toBytes("CF")));
+ assertNotNull(tableDesc3.getColumnFamily(Bytes.toBytes("CF")));
}
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
index 903fddc..34186bb 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
@@ -46,10 +46,10 @@ import java.util.Arrays;
import java.util.Collection;
import java.util.Properties;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.exception.SQLExceptionCode;
import org.apache.phoenix.jdbc.PhoenixConnection;
@@ -233,7 +233,7 @@ public class AlterTableIT extends ParallelStatsDisabledIT {
conn.createStatement().execute(ddl);
conn.createStatement().execute("ALTER TABLE " + dataTableFullName + " ADD CF.col2 integer CF.IN_MEMORY=true");
try (Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
- HColumnDescriptor[] columnFamilies = admin.getTableDescriptor(TableName.valueOf(dataTableFullName)).getColumnFamilies();
+ ColumnFamilyDescriptor[] columnFamilies = admin.getDescriptor(TableName.valueOf(dataTableFullName)).getColumnFamilies();
assertEquals(2, columnFamilies.length);
assertEquals("0", columnFamilies[0].getNameAsString());
assertFalse(columnFamilies[0].isInMemory());
@@ -938,8 +938,8 @@ public class AlterTableIT extends ParallelStatsDisabledIT {
assertFalse(rs.next());
try (Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
- HTableDescriptor tableDesc = admin.getTableDescriptor(TableName.valueOf(dataTableFullName));
- HColumnDescriptor[] columnFamilies = tableDesc.getColumnFamilies();
+ TableDescriptor tableDesc = admin.getDescriptor(TableName.valueOf(dataTableFullName));
+ ColumnFamilyDescriptor[] columnFamilies = tableDesc.getColumnFamilies();
assertEquals(2, columnFamilies.length);
assertEquals("0", columnFamilies[0].getNameAsString());
assertEquals(true, columnFamilies[0].isInMemory());
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnProjectionOptimizationIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnProjectionOptimizationIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnProjectionOptimizationIT.java
index 56947bb..c399caf 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnProjectionOptimizationIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ColumnProjectionOptimizationIT.java
@@ -42,12 +42,13 @@ import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.util.Properties;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.schema.types.PInteger;
@@ -219,13 +220,11 @@ public class ColumnProjectionOptimizationIT extends ParallelStatsDisabledIT {
byte[] htableName = SchemaUtil.getTableNameAsBytes("", table);
Admin admin = pconn.getQueryServices().getAdmin();
- @SuppressWarnings("deprecation")
- HTableDescriptor descriptor = new HTableDescriptor(htableName);
+ TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(TableName.valueOf(htableName));
for (byte[] familyName : familyNames) {
- HColumnDescriptor columnDescriptor = new HColumnDescriptor(familyName);
- descriptor.addFamily(columnDescriptor);
+ builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(familyName));
}
- admin.createTable(descriptor);
+ admin.createTable(builder.build());
Properties props = new Properties();
Connection conn1 = DriverManager.getConnection(getUrl(), props);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java
index 866bd85..b7b0957 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java
@@ -17,7 +17,6 @@
*/
package org.apache.phoenix.end2end;
-import static org.apache.hadoop.hbase.HColumnDescriptor.DEFAULT_REPLICATION_SCOPE;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotEquals;
@@ -34,12 +33,11 @@ import java.sql.Statement;
import java.util.List;
import java.util.Properties;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.GlobalPermissionOrBuilder;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.regionserver.BloomType;
-import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.exception.SQLExceptionCode;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.jdbc.PhoenixStatement;
@@ -116,9 +114,9 @@ public class CreateTableIT extends ParallelStatsDisabledIT {
conn.createStatement().execute(ddl);
}
Admin admin = driver.getConnectionQueryServices(getUrl(), props).getAdmin();
- assertNotNull(admin.getTableDescriptor(TableName.valueOf(tableName)));
- HColumnDescriptor[] columnFamilies =
- admin.getTableDescriptor(TableName.valueOf(tableName)).getColumnFamilies();
+ assertNotNull(admin.getDescriptor(TableName.valueOf(tableName)));
+ ColumnFamilyDescriptor[] columnFamilies =
+ admin.getDescriptor(TableName.valueOf(tableName)).getColumnFamilies();
assertEquals(BloomType.NONE, columnFamilies[0].getBloomFilterType());
try (Connection conn = DriverManager.getConnection(getUrl(), props);) {
@@ -137,7 +135,7 @@ public class CreateTableIT extends ParallelStatsDisabledIT {
}
try (Connection conn = DriverManager.getConnection(getUrl(), props);) {
conn.createStatement().execute(ddl);
- assertNotEquals(null, admin.getTableDescriptor(TableName.valueOf(
+ assertNotEquals(null, admin.getDescriptor(TableName.valueOf(
SchemaUtil.getPhysicalTableName(tableName.getBytes(), true).getName())));
} finally {
admin.close();
@@ -187,8 +185,8 @@ public class CreateTableIT extends ParallelStatsDisabledIT {
Connection conn = DriverManager.getConnection(getUrl(), props);
conn.createStatement().execute(ddl);
Admin admin = driver.getConnectionQueryServices(getUrl(), props).getAdmin();
- HColumnDescriptor[] columnFamilies =
- admin.getTableDescriptor(TableName.valueOf(tableName)).getColumnFamilies();
+ ColumnFamilyDescriptor[] columnFamilies =
+ admin.getDescriptor(TableName.valueOf(tableName)).getColumnFamilies();
assertEquals(1, columnFamilies.length);
assertEquals(86400, columnFamilies[0].getTimeToLive());
}
@@ -240,8 +238,8 @@ public class CreateTableIT extends ParallelStatsDisabledIT {
Connection conn = DriverManager.getConnection(getUrl(), props);
conn.createStatement().execute(ddl);
Admin admin = driver.getConnectionQueryServices(getUrl(), props).getAdmin();
- HColumnDescriptor[] columnFamilies =
- admin.getTableDescriptor(TableName.valueOf(tableName)).getColumnFamilies();
+ ColumnFamilyDescriptor[] columnFamilies =
+ admin.getDescriptor(TableName.valueOf(tableName)).getColumnFamilies();
assertEquals(2, columnFamilies.length);
assertEquals(86400, columnFamilies[0].getTimeToLive());
assertEquals("B", columnFamilies[0].getNameAsString());
@@ -266,8 +264,8 @@ public class CreateTableIT extends ParallelStatsDisabledIT {
Connection conn = DriverManager.getConnection(getUrl(), props);
conn.createStatement().execute(ddl);
Admin admin = driver.getConnectionQueryServices(getUrl(), props).getAdmin();
- HColumnDescriptor[] columnFamilies =
- admin.getTableDescriptor(TableName.valueOf(tableName)).getColumnFamilies();
+ ColumnFamilyDescriptor[] columnFamilies =
+ admin.getDescriptor(TableName.valueOf(tableName)).getColumnFamilies();
assertEquals(2, columnFamilies.length);
assertEquals("0", columnFamilies[0].getNameAsString());
assertEquals(86400, columnFamilies[0].getTimeToLive());
@@ -294,11 +292,11 @@ public class CreateTableIT extends ParallelStatsDisabledIT {
Connection conn = DriverManager.getConnection(getUrl(), props);
conn.createStatement().execute(ddl);
Admin admin = driver.getConnectionQueryServices(getUrl(), props).getAdmin();
- HColumnDescriptor[] columnFamilies =
- admin.getTableDescriptor(TableName.valueOf(tableName)).getColumnFamilies();
+ ColumnFamilyDescriptor[] columnFamilies =
+ admin.getDescriptor(TableName.valueOf(tableName)).getColumnFamilies();
assertEquals(2, columnFamilies.length);
assertEquals("0", columnFamilies[0].getNameAsString());
- assertEquals(DEFAULT_REPLICATION_SCOPE, columnFamilies[0].getScope());
+ assertEquals(ColumnFamilyDescriptorBuilder.DEFAULT_REPLICATION_SCOPE, columnFamilies[0].getScope());
assertEquals("B", columnFamilies[1].getNameAsString());
assertEquals(1, columnFamilies[1].getScope());
}
@@ -321,8 +319,8 @@ public class CreateTableIT extends ParallelStatsDisabledIT {
Connection conn = DriverManager.getConnection(getUrl(), props);
conn.createStatement().execute(ddl);
Admin admin = driver.getConnectionQueryServices(getUrl(), props).getAdmin();
- HColumnDescriptor[] columnFamilies =
- admin.getTableDescriptor(TableName.valueOf(tableName)).getColumnFamilies();
+ ColumnFamilyDescriptor[] columnFamilies =
+ admin.getDescriptor(TableName.valueOf(tableName)).getColumnFamilies();
assertEquals(2, columnFamilies.length);
assertEquals("B", columnFamilies[0].getNameAsString());
assertEquals(0, columnFamilies[0].getScope());
@@ -346,8 +344,8 @@ public class CreateTableIT extends ParallelStatsDisabledIT {
Connection conn = DriverManager.getConnection(getUrl(), props);
conn.createStatement().execute(ddl);
Admin admin = driver.getConnectionQueryServices(getUrl(), props).getAdmin();
- HColumnDescriptor[] columnFamilies =
- admin.getTableDescriptor(TableName.valueOf(tableName)).getColumnFamilies();
+ ColumnFamilyDescriptor[] columnFamilies =
+ admin.getDescriptor(TableName.valueOf(tableName)).getColumnFamilies();
assertEquals(1, columnFamilies.length);
assertEquals("a", columnFamilies[0].getNameAsString());
assertEquals(10000, columnFamilies[0].getTimeToLive());
@@ -368,8 +366,8 @@ public class CreateTableIT extends ParallelStatsDisabledIT {
Connection conn = DriverManager.getConnection(getUrl(), props);
conn.createStatement().execute(ddl);
Admin admin = driver.getConnectionQueryServices(getUrl(), props).getAdmin();
- HColumnDescriptor[] columnFamilies =
- admin.getTableDescriptor(TableName.valueOf(tableName)).getColumnFamilies();
+ ColumnFamilyDescriptor[] columnFamilies =
+ admin.getDescriptor(TableName.valueOf(tableName)).getColumnFamilies();
assertEquals(1, columnFamilies.length);
assertEquals("a", columnFamilies[0].getNameAsString());
assertEquals(10000, columnFamilies[0].getTimeToLive());
@@ -387,8 +385,8 @@ public class CreateTableIT extends ParallelStatsDisabledIT {
Connection conn = DriverManager.getConnection(getUrl(), props);
conn.createStatement().execute(ddl);
Admin admin = driver.getConnectionQueryServices(getUrl(), props).getAdmin();
- HColumnDescriptor[] columnFamilies =
- admin.getTableDescriptor(TableName.valueOf(tableName)).getColumnFamilies();
+ ColumnFamilyDescriptor[] columnFamilies =
+ admin.getDescriptor(TableName.valueOf(tableName)).getColumnFamilies();
assertEquals(BloomType.ROW, columnFamilies[0].getBloomFilterType());
}
@@ -722,7 +720,7 @@ public class CreateTableIT extends ParallelStatsDisabledIT {
}
@Test
- public void testSetHTableDescriptorPropertyOnView() throws Exception {
+ public void testSetTableDescriptorPropertyOnView() throws Exception {
Properties props = new Properties();
final String dataTableFullName = generateUniqueName();
String ddl =
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/it/java/org/apache/phoenix/end2end/DynamicColumnIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DynamicColumnIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DynamicColumnIT.java
index 6a53906..747107a 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DynamicColumnIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DynamicColumnIT.java
@@ -31,13 +31,13 @@ import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Row;
import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.query.ConnectionQueryServices;
@@ -69,11 +69,11 @@ public class DynamicColumnIT extends ParallelStatsDisabledIT {
try (PhoenixConnection pconn = DriverManager.getConnection(getUrl()).unwrap(PhoenixConnection.class)) {
ConnectionQueryServices services = pconn.getQueryServices();
try (Admin admin = services.getAdmin()) {
- HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
- htd.addFamily(new HColumnDescriptor(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES));
- htd.addFamily(new HColumnDescriptor(FAMILY_NAME_A));
- htd.addFamily(new HColumnDescriptor(FAMILY_NAME_B));
- admin.createTable(htd);
+ TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName));
+ builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES));
+ builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_NAME_A));
+ builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_NAME_B));
+ admin.createTable(builder.build());
}
try (Table hTable = services.getTable(Bytes.toBytes(tableName))) {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/it/java/org/apache/phoenix/end2end/FlappingAlterTableIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/FlappingAlterTableIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/FlappingAlterTableIT.java
index 0e0e555..27285e3 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/FlappingAlterTableIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/FlappingAlterTableIT.java
@@ -24,10 +24,11 @@ import java.sql.Connection;
import java.sql.DriverManager;
import java.util.Properties;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.util.PropertiesUtil;
import org.apache.phoenix.util.SchemaUtil;
@@ -59,12 +60,12 @@ public class FlappingAlterTableIT extends ParallelStatsDisabledIT {
ddl = "ALTER TABLE " + dataTableFullName + " ADD CF.STRING VARCHAR";
conn1.createStatement().execute(ddl);
try (Admin admin = conn1.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
- HColumnDescriptor[] columnFamilies = admin.getTableDescriptor(TableName.valueOf(dataTableFullName)).getColumnFamilies();
+ ColumnFamilyDescriptor[] columnFamilies = admin.getDescriptor(TableName.valueOf(dataTableFullName)).getColumnFamilies();
assertEquals(2, columnFamilies.length);
assertEquals("0", columnFamilies[0].getNameAsString());
- assertEquals(HColumnDescriptor.DEFAULT_TTL, columnFamilies[0].getTimeToLive());
+ assertEquals(ColumnFamilyDescriptorBuilder.DEFAULT_TTL, columnFamilies[0].getTimeToLive());
assertEquals("CF", columnFamilies[1].getNameAsString());
- assertEquals(HColumnDescriptor.DEFAULT_TTL, columnFamilies[1].getTimeToLive());
+ assertEquals(ColumnFamilyDescriptorBuilder.DEFAULT_TTL, columnFamilies[1].getTimeToLive());
}
}
@@ -83,8 +84,8 @@ public class FlappingAlterTableIT extends ParallelStatsDisabledIT {
ddl = "ALTER TABLE " + dataTableFullName + " ADD CF.STRING VARCHAR";
conn1.createStatement().execute(ddl);
try (Admin admin = conn1.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
- HTableDescriptor tableDesc = admin.getTableDescriptor(TableName.valueOf(dataTableFullName));
- HColumnDescriptor[] columnFamilies = tableDesc.getColumnFamilies();
+ TableDescriptor tableDesc = admin.getDescriptor(TableName.valueOf(dataTableFullName));
+ ColumnFamilyDescriptor[] columnFamilies = tableDesc.getColumnFamilies();
assertEquals(2, columnFamilies.length);
assertEquals("0", columnFamilies[0].getNameAsString());
assertEquals(1000, columnFamilies[0].getTimeToLive());
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/it/java/org/apache/phoenix/end2end/FlappingLocalIndexIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/FlappingLocalIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/FlappingLocalIndexIT.java
index 7e769ba..a5aa27e 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/FlappingLocalIndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/FlappingLocalIndexIT.java
@@ -328,11 +328,11 @@ public class FlappingLocalIndexIT extends BaseLocalIndexIT {
conn1.createStatement().execute("UPSERT INTO "+tableName+" values('q',3,1,1,'c')");
conn1.commit();
Admin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin();
- HTableDescriptor tableDescriptor = admin.getTableDescriptor(physicalTableName);
- tableDescriptor.addCoprocessor(DeleyOpenRegionObserver.class.getName(), null,
- QueryServicesOptions.DEFAULT_COPROCESSOR_PRIORITY - 1, null);
+ TableDescriptor tableDescriptor = admin.getDescriptor(physicalTableName);
+ tableDescriptor=TableDescriptorBuilder.newBuilder(tableDescriptor).addCoprocessor(DeleyOpenRegionObserver.class.getName(), null,
+ QueryServicesOptions.DEFAULT_COPROCESSOR_PRIORITY - 1, null).build();
admin.disableTable(physicalTableName);
- admin.modifyTable(physicalTableName, tableDescriptor);
+ admin.modifyTable(tableDescriptor);
admin.enableTable(physicalTableName);
DeleyOpenRegionObserver.DELAY_OPEN = true;
conn1.createStatement().execute(
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolForPartialBuildIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolForPartialBuildIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolForPartialBuildIT.java
index 19ffe1a..70812c3 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolForPartialBuildIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolForPartialBuildIT.java
@@ -270,7 +270,7 @@ public class IndexToolForPartialBuildIT extends BaseOwnClusterIT {
throw new DoNotRetryIOException();
}
Mutation operation = miniBatchOp.getOperation(0);
- Set<byte[]> keySet = operation.getFamilyMap().keySet();
+ Set<byte[]> keySet = operation.getFamilyCellMap().keySet();
for(byte[] family: keySet) {
if(Bytes.toString(family).startsWith(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX) && FAIL_WRITE) {
throw new DoNotRetryIOException();
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/it/java/org/apache/phoenix/end2end/LocalIndexSplitMergeIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/LocalIndexSplitMergeIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/LocalIndexSplitMergeIT.java
index 0781097..ee8063c 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/LocalIndexSplitMergeIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/LocalIndexSplitMergeIT.java
@@ -29,10 +29,10 @@ import java.util.List;
import java.util.Map;
import java.util.Properties;
-import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.query.BaseTest;
@@ -114,15 +114,15 @@ public class LocalIndexSplitMergeIT extends BaseTest {
Admin admin = conn1.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();
for (int i = 1; i < 5; i++) {
admin.split(physicalTableName, ByteUtil.concat(Bytes.toBytes(strings[3 * i])));
- List<HRegionInfo> regionsOfUserTable =
- MetaTableAccessor.getTableRegions(getUtility().getZooKeeperWatcher(),
- admin.getConnection(), physicalTableName, false);
+ List<RegionInfo> regionsOfUserTable =
+ MetaTableAccessor.getTableRegions(admin.getConnection(), physicalTableName,
+ false);
while (regionsOfUserTable.size() != (4 + i)) {
Thread.sleep(100);
regionsOfUserTable =
- MetaTableAccessor.getTableRegions(getUtility().getZooKeeperWatcher(),
- admin.getConnection(), physicalTableName, false);
+ MetaTableAccessor.getTableRegions(admin.getConnection(),
+ physicalTableName, false);
}
assertEquals(4 + i, regionsOfUserTable.size());
String[] tIdColumnValues = new String[26];
@@ -213,20 +213,20 @@ public class LocalIndexSplitMergeIT extends BaseTest {
assertTrue(rs.next());
Admin admin = conn1.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();
- List<HRegionInfo> regionsOfUserTable =
- MetaTableAccessor.getTableRegions(getUtility().getZooKeeperWatcher(),
- admin.getConnection(), physicalTableName, false);
- admin.mergeRegions(regionsOfUserTable.get(0).getEncodedNameAsBytes(),
+ List<RegionInfo> regionsOfUserTable =
+ MetaTableAccessor.getTableRegions(admin.getConnection(), physicalTableName,
+ false);
+ admin.mergeRegionsAsync(regionsOfUserTable.get(0).getEncodedNameAsBytes(),
regionsOfUserTable.get(1).getEncodedNameAsBytes(), false);
regionsOfUserTable =
- MetaTableAccessor.getTableRegions(getUtility().getZooKeeperWatcher(),
- admin.getConnection(), physicalTableName, false);
+ MetaTableAccessor.getTableRegions(admin.getConnection(), physicalTableName,
+ false);
while (regionsOfUserTable.size() != 3) {
Thread.sleep(100);
regionsOfUserTable =
- MetaTableAccessor.getTableRegions(getUtility().getZooKeeperWatcher(),
- admin.getConnection(), physicalTableName, false);
+ MetaTableAccessor.getTableRegions(admin.getConnection(), physicalTableName,
+ false);
}
String query = "SELECT t_id,k1,v1 FROM " + tableName;
rs = conn1.createStatement().executeQuery(query);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/it/java/org/apache/phoenix/end2end/MappingTableDataTypeIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MappingTableDataTypeIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MappingTableDataTypeIT.java
index e8a4f80..f064730 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MappingTableDataTypeIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MappingTableDataTypeIT.java
@@ -34,15 +34,15 @@ import java.util.List;
import java.util.Properties;
import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.util.PropertiesUtil;
@@ -60,12 +60,10 @@ public class MappingTableDataTypeIT extends ParallelStatsDisabledIT {
Admin admin = conn.getQueryServices().getAdmin();
try {
// Create table then get the single region for our new table.
- HTableDescriptor descriptor = new HTableDescriptor(tableName);
- HColumnDescriptor columnDescriptor1 = new HColumnDescriptor(Bytes.toBytes("cf1"));
- HColumnDescriptor columnDescriptor2 = new HColumnDescriptor(Bytes.toBytes("cf2"));
- descriptor.addFamily(columnDescriptor1);
- descriptor.addFamily(columnDescriptor2);
- admin.createTable(descriptor);
+ TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName);
+ builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(Bytes.toBytes("cf1")))
+ .addColumnFamily(ColumnFamilyDescriptorBuilder.of(Bytes.toBytes("cf2")));
+ admin.createTable(builder.build());
Table t = conn.getQueryServices().getTable(Bytes.toBytes(mtest));
insertData(tableName.getName(), admin, t);
t.close();
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/it/java/org/apache/phoenix/end2end/NamespaceSchemaMappingIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/NamespaceSchemaMappingIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/NamespaceSchemaMappingIT.java
index b0c681e..7d24cdd 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/NamespaceSchemaMappingIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/NamespaceSchemaMappingIT.java
@@ -25,13 +25,13 @@ import java.sql.DriverManager;
import java.sql.ResultSet;
import java.util.Properties;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
import org.apache.phoenix.query.QueryConstants;
@@ -52,7 +52,6 @@ public class NamespaceSchemaMappingIT extends ParallelStatsDisabledIT {
* namespace or not
*/
@Test
- @SuppressWarnings("deprecation")
public void testBackWardCompatibility() throws Exception {
String namespace = "TEST_SCHEMA";
@@ -63,10 +62,10 @@ public class NamespaceSchemaMappingIT extends ParallelStatsDisabledIT {
String hbaseFullTableName = schemaName + ":" + tableName;
Admin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin();
admin.createNamespace(NamespaceDescriptor.create(namespace).build());
- admin.createTable(new HTableDescriptor(TableName.valueOf(namespace, tableName))
- .addFamily(new HColumnDescriptor(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES)));
- admin.createTable(new HTableDescriptor(TableName.valueOf(phoenixFullTableName))
- .addFamily(new HColumnDescriptor(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES)));
+ admin.createTable(TableDescriptorBuilder.newBuilder(TableName.valueOf(namespace, tableName))
+ .addColumnFamily(ColumnFamilyDescriptorBuilder.of(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES)).build());
+ admin.createTable(TableDescriptorBuilder.newBuilder(TableName.valueOf(phoenixFullTableName))
+ .addColumnFamily(ColumnFamilyDescriptorBuilder.of(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES)).build());
Put put = new Put(PVarchar.INSTANCE.toBytes(phoenixFullTableName));
put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES,
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/it/java/org/apache/phoenix/end2end/NativeHBaseTypesIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/NativeHBaseTypesIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/NativeHBaseTypesIT.java
index 5ece0bd..ebe5425 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/NativeHBaseTypesIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/NativeHBaseTypesIT.java
@@ -32,17 +32,18 @@ import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
-import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.KeepDeletedCells;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Row;
import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.jdbc.PhoenixConnection;
@@ -67,7 +68,6 @@ import org.junit.Test;
public class NativeHBaseTypesIT extends ParallelStatsDisabledIT {
- @SuppressWarnings("deprecation")
private String initTableValues() throws Exception {
final String tableName = SchemaUtil.getTableName(generateUniqueName(), generateUniqueName());
final byte[] tableBytes = tableName.getBytes();
@@ -75,11 +75,10 @@ public class NativeHBaseTypesIT extends ParallelStatsDisabledIT {
final byte[][] splits = new byte[][] {Bytes.toBytes(20), Bytes.toBytes(30)};
Admin admin = driver.getConnectionQueryServices(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).getAdmin();
try {
- HTableDescriptor descriptor = new HTableDescriptor(tableBytes);
- HColumnDescriptor columnDescriptor = new HColumnDescriptor(familyName);
- columnDescriptor.setKeepDeletedCells(true);
- descriptor.addFamily(columnDescriptor);
- admin.createTable(descriptor, splits);
+ admin.createTable(TableDescriptorBuilder.newBuilder(TableName.valueOf(tableBytes))
+ .addColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(familyName)
+ .setKeepDeletedCells(KeepDeletedCells.TRUE).build())
+ .build(), splits);
} finally {
admin.close();
}
@@ -149,7 +148,7 @@ public class NativeHBaseTypesIT extends ParallelStatsDisabledIT {
" \"1\".uint_col unsigned_int," +
" \"1\".ulong_col unsigned_long" +
" CONSTRAINT pk PRIMARY KEY (uint_key, ulong_key, string_key))\n" +
- HColumnDescriptor.DATA_BLOCK_ENCODING + "='" + DataBlockEncoding.NONE + "'";
+ ColumnFamilyDescriptorBuilder.DATA_BLOCK_ENCODING + "='" + DataBlockEncoding.NONE + "'";
try (Connection conn = DriverManager.getConnection(url)) {
conn.createStatement().execute(ddl);
@@ -162,7 +161,6 @@ public class NativeHBaseTypesIT extends ParallelStatsDisabledIT {
public void testRangeQuery1() throws Exception {
String tableName = initTableValues();
String query = "SELECT uint_key, ulong_key, string_key FROM " + tableName + " WHERE uint_key > 20 and ulong_key >= 400";
- Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
Connection conn = DriverManager.getConnection(getUrl());
try {
PreparedStatement statement = conn.prepareStatement(query);
@@ -181,7 +179,6 @@ public class NativeHBaseTypesIT extends ParallelStatsDisabledIT {
public void testRangeQuery2() throws Exception {
String tableName = initTableValues();
String query = "SELECT uint_key, ulong_key, string_key FROM " + tableName + " WHERE uint_key > 20 and uint_key < 40";
- Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
Connection conn = DriverManager.getConnection(getUrl());
try {
PreparedStatement statement = conn.prepareStatement(query);
@@ -268,7 +265,6 @@ public class NativeHBaseTypesIT extends ParallelStatsDisabledIT {
}
}
- @SuppressWarnings("deprecation")
@Test
public void testNegativeCompareNegativeValue() throws Exception {
String tableName = initTableValues();
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryDatabaseMetaDataIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryDatabaseMetaDataIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryDatabaseMetaDataIT.java
index c65ca63..63b7c36 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryDatabaseMetaDataIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryDatabaseMetaDataIT.java
@@ -44,13 +44,15 @@ import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.Properties;
-import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.coprocessor.GroupedAggregateRegionObserver;
@@ -83,7 +85,7 @@ public class QueryDatabaseMetaDataIT extends ParallelStatsDisabledIT {
+ " a.col1 integer,\n" + " b.col2 bigint,\n" + " b.col3 decimal,\n"
+ " b.col4 decimal(5),\n" + " b.col5 decimal(6,3))\n" + " a."
+ HConstants.VERSIONS + "=" + 1 + "," + "a."
- + HColumnDescriptor.DATA_BLOCK_ENCODING + "='" + DataBlockEncoding.NONE
+ + ColumnFamilyDescriptorBuilder.DATA_BLOCK_ENCODING + "='" + DataBlockEncoding.NONE
+ "'";
if (extraProps != null && extraProps.length() > 0) {
ddl += "," + extraProps;
@@ -716,32 +718,30 @@ public class QueryDatabaseMetaDataIT extends ParallelStatsDisabledIT {
} catch (org.apache.hadoop.hbase.TableNotFoundException e) {
}
- @SuppressWarnings("deprecation")
- HTableDescriptor descriptor = new HTableDescriptor(htableName);
+ TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(TableName.valueOf(htableName));
for (byte[] familyName : familyNames) {
- HColumnDescriptor columnDescriptor = new HColumnDescriptor(familyName);
- descriptor.addFamily(columnDescriptor);
+ builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(familyName));
}
- admin.createTable(descriptor);
+ admin.createTable(builder.build());
createMDTestTable(pconn, tableName,
- "a." + HColumnDescriptor.KEEP_DELETED_CELLS + "=" + Boolean.TRUE);
+ "a." + ColumnFamilyDescriptorBuilder.KEEP_DELETED_CELLS + "=" + Boolean.TRUE);
- descriptor = admin.getTableDescriptor(TableName.valueOf(htableName));
+ TableDescriptor descriptor = admin.getDescriptor(TableName.valueOf(htableName));
assertEquals(3, descriptor.getColumnFamilies().length);
- HColumnDescriptor cdA = descriptor.getFamily(cfA);
- assertNotEquals(HColumnDescriptor.DEFAULT_KEEP_DELETED, cdA.getKeepDeletedCells());
+ ColumnFamilyDescriptor cdA = descriptor.getColumnFamily(cfA);
+ assertNotEquals(ColumnFamilyDescriptorBuilder.DEFAULT_KEEP_DELETED, cdA.getKeepDeletedCells());
assertEquals(DataBlockEncoding.NONE, cdA.getDataBlockEncoding()); // Overriden using
// WITH
assertEquals(1, cdA.getMaxVersions());// Overriden using WITH
- HColumnDescriptor cdB = descriptor.getFamily(cfB);
+ ColumnFamilyDescriptor cdB = descriptor.getColumnFamily(cfB);
// Allow KEEP_DELETED_CELLS to be false for VIEW
- assertEquals(HColumnDescriptor.DEFAULT_KEEP_DELETED, cdB.getKeepDeletedCells());
+ assertEquals(ColumnFamilyDescriptorBuilder.DEFAULT_KEEP_DELETED, cdB.getKeepDeletedCells());
assertEquals(DataBlockEncoding.NONE, cdB.getDataBlockEncoding()); // Should keep the
// original value.
// CF c should stay the same since it's not a Phoenix cf.
- HColumnDescriptor cdC = descriptor.getFamily(cfC);
+ ColumnFamilyDescriptor cdC = descriptor.getColumnFamily(cfC);
assertNotNull("Column family not found", cdC);
- assertEquals(HColumnDescriptor.DEFAULT_KEEP_DELETED, cdC.getKeepDeletedCells());
+ assertEquals(ColumnFamilyDescriptorBuilder.DEFAULT_KEEP_DELETED, cdC.getKeepDeletedCells());
assertFalse(SchemaUtil.DEFAULT_DATA_BLOCK_ENCODING == cdC.getDataBlockEncoding());
assertTrue(descriptor.hasCoprocessor(UngroupedAggregateRegionObserver.class.getName()));
assertTrue(descriptor.hasCoprocessor(GroupedAggregateRegionObserver.class.getName()));
@@ -794,12 +794,11 @@ public class QueryDatabaseMetaDataIT extends ParallelStatsDisabledIT {
} catch (org.apache.hadoop.hbase.TableNotFoundException e) {
}
- HTableDescriptor descriptor = new HTableDescriptor(htableName);
+ TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(TableName.valueOf(htableName));
for (byte[] familyName : familyNames) {
- HColumnDescriptor columnDescriptor = new HColumnDescriptor(familyName);
- descriptor.addFamily(columnDescriptor);
+ builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(familyName));
}
- admin.createTable(descriptor);
+ admin.createTable(builder.build());
}
String createStmt =
"create view " + generateUniqueName() + " (id char(1) not null primary key,\n"
@@ -866,7 +865,7 @@ public class QueryDatabaseMetaDataIT extends ParallelStatsDisabledIT {
Table htable =
pconn.getQueryServices()
- .getTable(TableName.valueOf(SchemaUtil.getTableNameAsBytes(schemaName, tableName)));
+ .getTable(SchemaUtil.getTableNameAsBytes(schemaName, tableName));
Put put = new Put(Bytes.toBytes("0"));
put.addColumn(cfB, Bytes.toBytes("COL1"), PInteger.INSTANCE.toBytes(1));
put.addColumn(cfC, Bytes.toBytes("COL2"), PLong.INSTANCE.toBytes(2));
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3ec80d3/phoenix-core/src/it/java/org/apache/phoenix/end2end/SetPropertyIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SetPropertyIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SetPropertyIT.java
index d785063..fc8c474 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SetPropertyIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SetPropertyIT.java
@@ -17,7 +17,6 @@
*/
package org.apache.phoenix.end2end;
-import static org.apache.hadoop.hbase.HColumnDescriptor.DEFAULT_REPLICATION_SCOPE;
import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
@@ -29,11 +28,13 @@ import java.sql.DriverManager;
import java.sql.SQLException;
import java.util.Properties;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeepDeletedCells;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.phoenix.exception.SQLExceptionCode;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.schema.PTable;
@@ -95,7 +96,7 @@ public abstract class SetPropertyIT extends ParallelStatsDisabledIT {
ddl = "ALTER TABLE " + dataTableFullName + " SET REPLICATION_SCOPE=1";
conn1.createStatement().execute(ddl);
try (Admin admin = conn1.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
- HColumnDescriptor[] columnFamilies = admin.getTableDescriptor(TableName.valueOf(dataTableFullName))
+ ColumnFamilyDescriptor[] columnFamilies = admin.getDescriptor(TableName.valueOf(dataTableFullName))
.getColumnFamilies();
assertEquals(1, columnFamilies.length);
assertEquals("0", columnFamilies[0].getNameAsString());
@@ -118,10 +119,10 @@ public abstract class SetPropertyIT extends ParallelStatsDisabledIT {
ddl = "ALTER TABLE " + dataTableFullName + " SET COMPACTION_ENABLED=FALSE";
conn1.createStatement().execute(ddl);
try (Admin admin = conn1.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
- HTableDescriptor tableDesc = admin.getTableDescriptor(TableName.valueOf(dataTableFullName));
+ TableDescriptor tableDesc = admin.getDescriptor(TableName.valueOf(dataTableFullName));
assertEquals(1, tableDesc.getColumnFamilies().length);
assertEquals("0", tableDesc.getColumnFamilies()[0].getNameAsString());
- assertEquals(Boolean.toString(false), tableDesc.getValue(HTableDescriptor.COMPACTION_ENABLED));
+ assertEquals(Boolean.toString(false), tableDesc.getValue(TableDescriptorBuilder.COMPACTION_ENABLED));
}
}
@@ -140,8 +141,8 @@ public abstract class SetPropertyIT extends ParallelStatsDisabledIT {
ddl = "ALTER TABLE " + dataTableFullName + " SET COMPACTION_ENABLED = FALSE, REPLICATION_SCOPE = 1";
conn1.createStatement().execute(ddl);
try (Admin admin = conn1.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
- HTableDescriptor tableDesc = admin.getTableDescriptor(TableName.valueOf(dataTableFullName));
- HColumnDescriptor[] columnFamilies = tableDesc.getColumnFamilies();
+ TableDescriptor tableDesc = admin.getDescriptor(TableName.valueOf(dataTableFullName));
+ ColumnFamilyDescriptor[] columnFamilies = tableDesc.getColumnFamilies();
assertEquals(1, columnFamilies.length);
assertEquals("0", columnFamilies[0].getNameAsString());
assertEquals(1, columnFamilies[0].getScope());
@@ -169,8 +170,8 @@ public abstract class SetPropertyIT extends ParallelStatsDisabledIT {
conn.createStatement().execute(ddl);
try (Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
- HTableDescriptor tableDesc = admin.getTableDescriptor(TableName.valueOf(dataTableFullName));
- HColumnDescriptor[] columnFamilies = tableDesc.getColumnFamilies();
+ TableDescriptor tableDesc = admin.getDescriptor(TableName.valueOf(dataTableFullName));
+ ColumnFamilyDescriptor[] columnFamilies = tableDesc.getColumnFamilies();
assertEquals(3, columnFamilies.length);
assertEquals("0", columnFamilies[0].getNameAsString());
@@ -188,7 +189,7 @@ public abstract class SetPropertyIT extends ParallelStatsDisabledIT {
assertEquals(10, columnFamilies[2].getMaxVersions());
assertEquals(KeepDeletedCells.FALSE, columnFamilies[2].getKeepDeletedCells());
- assertEquals(Boolean.toString(false), tableDesc.getValue(HTableDescriptor.COMPACTION_ENABLED));
+ assertEquals(Boolean.toString(false), tableDesc.getValue(TableDescriptorBuilder.COMPACTION_ENABLED));
}
}
@@ -388,16 +389,16 @@ public abstract class SetPropertyIT extends ParallelStatsDisabledIT {
conn.createStatement().execute(ddl);
assertImmutableRows(conn, dataTableFullName, true);
try (Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
- HTableDescriptor tableDesc = admin.getTableDescriptor(TableName.valueOf(dataTableFullName));
- HColumnDescriptor[] columnFamilies = tableDesc.getColumnFamilies();
+ TableDescriptor tableDesc = admin.getDescriptor(TableName.valueOf(dataTableFullName));
+ ColumnFamilyDescriptor[] columnFamilies = tableDesc.getColumnFamilies();
assertEquals(2, columnFamilies.length);
assertEquals("CF", columnFamilies[0].getNameAsString());
assertEquals(1, columnFamilies[0].getScope());
assertEquals(1000, columnFamilies[0].getTimeToLive());
assertEquals("XYZ", columnFamilies[1].getNameAsString());
- assertEquals(DEFAULT_REPLICATION_SCOPE, columnFamilies[1].getScope());
+ assertEquals(ColumnFamilyDescriptorBuilder.DEFAULT_REPLICATION_SCOPE, columnFamilies[1].getScope());
assertEquals(1000, columnFamilies[1].getTimeToLive());
- assertEquals(Boolean.toString(false), tableDesc.getValue(HTableDescriptor.COMPACTION_ENABLED));
+ assertEquals(Boolean.toString(false), tableDesc.getValue(TableDescriptorBuilder.COMPACTION_ENABLED));
}
}
@@ -419,7 +420,7 @@ public abstract class SetPropertyIT extends ParallelStatsDisabledIT {
conn.createStatement().execute(
"ALTER TABLE " + dataTableFullName + " ADD CF.col3 integer CF.IN_MEMORY=true");
try (Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
- HColumnDescriptor[] columnFamilies = admin.getTableDescriptor(TableName.valueOf(dataTableFullName))
+ ColumnFamilyDescriptor[] columnFamilies = admin.getDescriptor(TableName.valueOf(dataTableFullName))
.getColumnFamilies();
assertEquals(2, columnFamilies.length);
assertEquals("0", columnFamilies[0].getNameAsString());
@@ -448,7 +449,7 @@ public abstract class SetPropertyIT extends ParallelStatsDisabledIT {
+ dataTableFullName
+ " ADD col4 integer, CF1.col5 integer, CF2.col6 integer IN_MEMORY=true, CF1.REPLICATION_SCOPE=1, CF2.IN_MEMORY=false ");
try (Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
- HColumnDescriptor[] columnFamilies = admin.getTableDescriptor(TableName.valueOf(dataTableFullName))
+ ColumnFamilyDescriptor[] columnFamilies = admin.getDescriptor(TableName.valueOf(dataTableFullName))
.getColumnFamilies();
assertEquals(3, columnFamilies.length);
assertEquals("0", columnFamilies[0].getNameAsString());
@@ -482,7 +483,7 @@ public abstract class SetPropertyIT extends ParallelStatsDisabledIT {
+ dataTableFullName
+ " ADD col4 integer, CF1.col5 integer, CF2.col6 integer IN_MEMORY=true, CF1.REPLICATION_SCOPE=1, CF2.IN_MEMORY=false, XYZ.REPLICATION_SCOPE=1 ");
try (Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
- HColumnDescriptor[] columnFamilies = admin.getTableDescriptor(TableName.valueOf(dataTableFullName))
+ ColumnFamilyDescriptor[] columnFamilies = admin.getDescriptor(TableName.valueOf(dataTableFullName))
.getColumnFamilies();
assertEquals(3, columnFamilies.length);
assertEquals("CF1", columnFamilies[0].getNameAsString());
@@ -539,7 +540,7 @@ public abstract class SetPropertyIT extends ParallelStatsDisabledIT {
+ dataTableFullName
+ " ADD col4 integer, CF1.col5 integer, CF2.col6 integer, CF3.col7 integer CF1.REPLICATION_SCOPE=1, CF1.IN_MEMORY=false, IN_MEMORY=true ");
try (Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
- HColumnDescriptor[] columnFamilies = admin.getTableDescriptor(TableName.valueOf(dataTableFullName))
+ ColumnFamilyDescriptor[] columnFamilies = admin.getDescriptor(TableName.valueOf(dataTableFullName))
.getColumnFamilies();
assertEquals(4, columnFamilies.length);
assertEquals("CF1", columnFamilies[0].getNameAsString());
@@ -575,7 +576,7 @@ public abstract class SetPropertyIT extends ParallelStatsDisabledIT {
conn.createStatement()
.execute("ALTER TABLE " + dataTableFullName + " ADD XYZ.col5 integer IN_MEMORY=true ");
try (Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
- HColumnDescriptor[] columnFamilies = admin.getTableDescriptor(TableName.valueOf(dataTableFullName))
+ ColumnFamilyDescriptor[] columnFamilies = admin.getDescriptor(TableName.valueOf(dataTableFullName))
.getColumnFamilies();
assertEquals(2, columnFamilies.length);
assertEquals("CF1", columnFamilies[0].getNameAsString());
@@ -602,7 +603,7 @@ public abstract class SetPropertyIT extends ParallelStatsDisabledIT {
conn.createStatement().execute(ddl);
conn.createStatement().execute("ALTER TABLE " + dataTableFullName + " ADD col2 integer IN_MEMORY=true");
try (Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
- HColumnDescriptor[] columnFamilies = admin.getTableDescriptor(TableName.valueOf(dataTableFullName))
+ ColumnFamilyDescriptor[] columnFamilies = admin.getDescriptor(TableName.valueOf(dataTableFullName))
.getColumnFamilies();
assertEquals(1, columnFamilies.length);
assertEquals("0", columnFamilies[0].getNameAsString());
@@ -632,9 +633,9 @@ public abstract class SetPropertyIT extends ParallelStatsDisabledIT {
conn.createStatement().execute(ddl);
try (Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
- HTableDescriptor tableDesc = admin.getTableDescriptor(TableName.valueOf(dataTableFullName));
+ TableDescriptor tableDesc = admin.getDescriptor(TableName.valueOf(dataTableFullName));
assertTrue(tableDesc.isCompactionEnabled());
- HColumnDescriptor[] columnFamilies = tableDesc.getColumnFamilies();
+ ColumnFamilyDescriptor[] columnFamilies = tableDesc.getColumnFamilies();
assertEquals(5, columnFamilies.length);
assertEquals("0", columnFamilies[0].getNameAsString());
assertFalse(columnFamilies[0].isInMemory());
@@ -671,9 +672,9 @@ public abstract class SetPropertyIT extends ParallelStatsDisabledIT {
conn.createStatement().execute(ddl);
try (Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
- HTableDescriptor tableDesc = admin.getTableDescriptor(TableName.valueOf(dataTableFullName));
+ TableDescriptor tableDesc = admin.getDescriptor(TableName.valueOf(dataTableFullName));
assertTrue(tableDesc.isCompactionEnabled());
- HColumnDescriptor[] columnFamilies = tableDesc.getColumnFamilies();
+ ColumnFamilyDescriptor[] columnFamilies = tableDesc.getColumnFamilies();
assertEquals(3, columnFamilies.length);
assertEquals("0", columnFamilies[0].getNameAsString());
assertFalse(columnFamilies[0].isInMemory());
@@ -748,8 +749,8 @@ public abstract class SetPropertyIT extends ParallelStatsDisabledIT {
+ " ) " + generateDDLOptions("TTL=86400, SALT_BUCKETS = 4, DEFAULT_COLUMN_FAMILY='XYZ'");
conn.createStatement().execute(ddl);
try (Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
- HTableDescriptor tableDesc = admin.getTableDescriptor(TableName.valueOf(dataTableFullName));
- HColumnDescriptor[] columnFamilies = tableDesc.getColumnFamilies();
+ TableDescriptor tableDesc = admin.getDescriptor(TableName.valueOf(dataTableFullName));
+ ColumnFamilyDescriptor[] columnFamilies = tableDesc.getColumnFamilies();
assertEquals(1, columnFamilies.length);
assertEquals("XYZ", columnFamilies[0].getNameAsString());
assertEquals(86400, columnFamilies[0].getTimeToLive());
@@ -758,8 +759,8 @@ public abstract class SetPropertyIT extends ParallelStatsDisabledIT {
conn.createStatement().execute(ddl);
conn.commit();
try (Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
- HTableDescriptor tableDesc = admin.getTableDescriptor(TableName.valueOf(dataTableFullName));
- HColumnDescriptor[] columnFamilies = tableDesc.getColumnFamilies();
+ TableDescriptor tableDesc = admin.getDescriptor(TableName.valueOf(dataTableFullName));
+ ColumnFamilyDescriptor[] columnFamilies = tableDesc.getColumnFamilies();
assertEquals(1, columnFamilies.length);
assertEquals(30, columnFamilies[0].getTimeToLive());
assertEquals("XYZ", columnFamilies[0].getNameAsString());
@@ -786,8 +787,8 @@ public abstract class SetPropertyIT extends ParallelStatsDisabledIT {
conn.createStatement().execute(ddl);
conn.commit();
try (Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
- HTableDescriptor tableDesc = admin.getTableDescriptor(TableName.valueOf(dataTableFullName));
- HColumnDescriptor[] columnFamilies = tableDesc.getColumnFamilies();
+ TableDescriptor tableDesc = admin.getDescriptor(TableName.valueOf(dataTableFullName));
+ ColumnFamilyDescriptor[] columnFamilies = tableDesc.getColumnFamilies();
assertEquals(1, columnFamilies.length);
assertEquals(true, columnFamilies[0].isInMemory());
assertEquals("XYZ", columnFamilies[0].getNameAsString());
@@ -814,8 +815,8 @@ public abstract class SetPropertyIT extends ParallelStatsDisabledIT {
conn.createStatement().execute(ddl);
conn.commit();
try (Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
- HTableDescriptor tableDesc = admin.getTableDescriptor(TableName.valueOf(dataTableFullName));
- HColumnDescriptor[] columnFamilies = tableDesc.getColumnFamilies();
+ TableDescriptor tableDesc = admin.getDescriptor(TableName.valueOf(dataTableFullName));
+ ColumnFamilyDescriptor[] columnFamilies = tableDesc.getColumnFamilies();
assertEquals(1, columnFamilies.length);
assertEquals(true, columnFamilies[0].isInMemory());
assertEquals("0", columnFamilies[0].getNameAsString());
@@ -842,8 +843,8 @@ public abstract class SetPropertyIT extends ParallelStatsDisabledIT {
conn.createStatement().execute(ddl);
conn.commit();
try (Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
- HTableDescriptor tableDesc = admin.getTableDescriptor(TableName.valueOf(dataTableFullName));
- HColumnDescriptor[] columnFamilies = tableDesc.getColumnFamilies();
+ TableDescriptor tableDesc = admin.getDescriptor(TableName.valueOf(dataTableFullName));
+ ColumnFamilyDescriptor[] columnFamilies = tableDesc.getColumnFamilies();
assertEquals(1, columnFamilies.length);
assertEquals(true, columnFamilies[0].isInMemory());
assertEquals("XYZ", columnFamilies[0].getNameAsString());
@@ -870,8 +871,8 @@ public abstract class SetPropertyIT extends ParallelStatsDisabledIT {
conn.createStatement().execute(ddl);
conn.commit();
try (Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
- HTableDescriptor tableDesc = admin.getTableDescriptor(TableName.valueOf(dataTableFullName));
- HColumnDescriptor[] columnFamilies = tableDesc.getColumnFamilies();
+ TableDescriptor tableDesc = admin.getDescriptor(TableName.valueOf(dataTableFullName));
+ ColumnFamilyDescriptor[] columnFamilies = tableDesc.getColumnFamilies();
assertEquals(2, columnFamilies.length);
assertEquals("NEWCF", columnFamilies[0].getNameAsString());
assertEquals(true, columnFamilies[0].isInMemory());
@@ -900,8 +901,8 @@ public abstract class SetPropertyIT extends ParallelStatsDisabledIT {
conn.createStatement().execute(ddl);
conn.commit();
try (Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
- HTableDescriptor tableDesc = admin.getTableDescriptor(TableName.valueOf(dataTableFullName));
- HColumnDescriptor[] columnFamilies = tableDesc.getColumnFamilies();
+ TableDescriptor tableDesc = admin.getDescriptor(TableName.valueOf(dataTableFullName));
+ ColumnFamilyDescriptor[] columnFamilies = tableDesc.getColumnFamilies();
assertEquals(2, columnFamilies.length);
assertEquals("NEWCF", columnFamilies[0].getNameAsString());
assertEquals(true, columnFamilies[0].isInMemory());
@@ -915,8 +916,8 @@ public abstract class SetPropertyIT extends ParallelStatsDisabledIT {
conn.createStatement().execute(ddl);
conn.commit();
try (Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
- HTableDescriptor tableDesc = admin.getTableDescriptor(TableName.valueOf(dataTableFullName));
- HColumnDescriptor[] columnFamilies = tableDesc.getColumnFamilies();
+ TableDescriptor tableDesc = admin.getDescriptor(TableName.valueOf(dataTableFullName));
+ ColumnFamilyDescriptor[] columnFamilies = tableDesc.getColumnFamilies();
assertEquals(2, columnFamilies.length);
assertEquals("NEWCF", columnFamilies[0].getNameAsString());
assertEquals(true, columnFamilies[0].isInMemory());
@@ -933,8 +934,8 @@ public abstract class SetPropertyIT extends ParallelStatsDisabledIT {
conn.createStatement().execute(ddl);
conn.commit();
try (Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
- HTableDescriptor tableDesc = admin.getTableDescriptor(TableName.valueOf(dataTableFullName));
- HColumnDescriptor[] columnFamilies = tableDesc.getColumnFamilies();
+ TableDescriptor tableDesc = admin.getDescriptor(TableName.valueOf(dataTableFullName));
+ ColumnFamilyDescriptor[] columnFamilies = tableDesc.getColumnFamilies();
assertEquals(2, columnFamilies.length);
assertEquals("NEWCF", columnFamilies[0].getNameAsString());
assertEquals(true, columnFamilies[0].isInMemory());
@@ -970,7 +971,7 @@ public abstract class SetPropertyIT extends ParallelStatsDisabledIT {
ddl = "ALTER TABLE " + dataTableFullName + " SET UNKNOWN_PROP='ABC'";
conn.createStatement().execute(ddl);
try (Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
- HTableDescriptor tableDesc = admin.getTableDescriptor(TableName.valueOf(dataTableFullName));
+ TableDescriptor tableDesc = admin.getDescriptor(TableName.valueOf(dataTableFullName));
assertEquals("ABC", tableDesc.getValue("UNKNOWN_PROP"));
}
} finally {