You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by ra...@apache.org on 2017/11/14 04:30:36 UTC
[4/4] phoenix git commit: PHOENIX-4305 Make use of Cell interface
APIs where ever possible.(Rajeshbabu)
PHOENIX-4305 Make use of Cell interface APIs where ever possible.(Rajeshbabu)
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/c82cc18d
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/c82cc18d
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/c82cc18d
Branch: refs/heads/5.x-HBase-2.0
Commit: c82cc18d8432baba8e2cbd10af121cd39f83ca05
Parents: 0454e42
Author: Rajeshbabu Chintaguntla <ra...@apache.org>
Authored: Tue Nov 14 10:00:00 2017 +0530
Committer: Rajeshbabu Chintaguntla <ra...@apache.org>
Committed: Tue Nov 14 10:00:00 2017 +0530
----------------------------------------------------------------------
...ReplayWithIndexWritesAndCompressedWALIT.java | 2 +-
.../phoenix/end2end/MappingTableDataTypeIT.java | 6 +-
.../apache/phoenix/end2end/RowTimestampIT.java | 8 +-
.../phoenix/end2end/index/DropColumnIT.java | 6 +-
.../phoenix/end2end/index/ImmutableIndexIT.java | 4 +-
.../phoenix/end2end/index/IndexTestUtil.java | 4 +-
.../DataTableLocalIndexRegionScanner.java | 3 +-
.../regionserver/IndexHalfStoreFileReader.java | 4 +-
.../IndexHalfStoreFileReaderGenerator.java | 5 +-
.../regionserver/IndexKeyValueSkipListSet.java | 16 +-
.../hbase/regionserver/KeyValueSkipListSet.java | 57 ++---
.../LocalIndexStoreFileScanner.java | 24 +-
.../regionserver/wal/IndexedWALEditCodec.java | 6 +-
.../phoenix/cache/aggcache/SpillManager.java | 7 +-
.../cache/aggcache/SpillableGroupByCache.java | 4 +-
.../phoenix/compile/ListJarsQueryPlan.java | 7 +-
.../MutatingParallelIteratorFactory.java | 6 +-
.../apache/phoenix/compile/TraceQueryPlan.java | 11 +-
.../GroupedAggregateRegionObserver.java | 13 +-
.../coprocessor/MetaDataEndpointImpl.java | 215 +++++++++-------
.../coprocessor/SequenceRegionObserver.java | 46 ++--
.../UngroupedAggregateRegionObserver.java | 16 +-
.../apache/phoenix/execute/MutationState.java | 9 +-
.../phoenix/execute/SortMergeJoinPlan.java | 6 +-
.../apache/phoenix/execute/TupleProjector.java | 17 +-
.../phoenix/filter/DistinctPrefixFilter.java | 4 +-
.../hbase/index/builder/BaseIndexBuilder.java | 4 +-
.../hbase/index/builder/IndexBuilder.java | 4 +-
.../phoenix/hbase/index/covered/Batch.java | 9 +-
.../hbase/index/covered/KeyValueStore.java | 6 +-
.../hbase/index/covered/LocalTableState.java | 29 +--
.../hbase/index/covered/NonTxIndexBuilder.java | 5 +-
.../phoenix/hbase/index/covered/TableState.java | 4 +-
.../hbase/index/covered/data/IndexMemStore.java | 53 ++--
.../index/covered/data/LocalHBaseState.java | 4 +-
.../filter/ApplyAndFilterDeletesFilter.java | 21 +-
.../covered/filter/MaxTimestampFilter.java | 11 +-
.../index/covered/update/ColumnReference.java | 3 +-
.../index/scanner/FilteredKeyValueScanner.java | 4 +-
.../index/util/GenericKeyValueBuilder.java | 7 +-
.../hbase/index/util/IndexManagementUtil.java | 10 +-
.../hbase/index/util/KeyValueBuilder.java | 6 +-
.../hbase/index/wal/IndexedKeyValue.java | 10 +-
.../phoenix/hbase/index/wal/KeyValueCodec.java | 2 -
.../apache/phoenix/index/IndexMaintainer.java | 9 +-
.../phoenix/index/PhoenixIndexBuilder.java | 6 +-
.../index/PhoenixTransactionalIndexer.java | 8 +-
.../BaseGroupedAggregatingResultIterator.java | 8 +-
.../GroupedAggregatingResultIterator.java | 4 +-
.../iterate/MappedByteBufferSortedQueue.java | 5 +-
.../NonAggregateRegionScannerFactory.java | 4 +-
.../RowKeyOrderedAggregateResultIterator.java | 4 +-
.../UngroupedAggregatingResultIterator.java | 4 +-
.../phoenix/jdbc/PhoenixDatabaseMetaData.java | 7 +-
.../apache/phoenix/jdbc/PhoenixStatement.java | 10 +-
.../mapreduce/FormatToBytesWritableMapper.java | 24 +-
.../ImportPreUpsertKeyValueProcessor.java | 3 +-
.../mapreduce/MultiHfileOutputFormat.java | 3 +-
.../index/PhoenixIndexImportMapper.java | 12 +-
.../index/PhoenixIndexPartialBuildMapper.java | 2 +-
.../org/apache/phoenix/schema/PTableImpl.java | 1 -
.../org/apache/phoenix/schema/Sequence.java | 92 +++----
.../stats/DefaultStatisticsCollector.java | 10 +-
.../schema/tuple/MultiKeyValueTuple.java | 4 +-
.../schema/tuple/PositionBasedResultTuple.java | 10 +-
.../phoenix/schema/tuple/ResultTuple.java | 12 +-
.../java/org/apache/phoenix/util/IndexUtil.java | 31 +--
.../org/apache/phoenix/util/KeyValueUtil.java | 238 ------------------
.../org/apache/phoenix/util/MetaDataUtil.java | 2 +-
.../phoenix/util/PhoenixKeyValueUtil.java | 245 +++++++++++++++++++
.../org/apache/phoenix/util/PhoenixRuntime.java | 19 +-
.../org/apache/phoenix/util/ResultUtil.java | 12 +-
.../java/org/apache/phoenix/util/TupleUtil.java | 5 +-
.../org/apache/phoenix/util/UpgradeUtil.java | 8 +-
.../wal/ReadWriteKeyValuesWithCodecTest.java | 5 +-
.../phoenix/execute/MutationStateTest.java | 11 +-
.../phoenix/execute/UnnestArrayPlanTest.java | 4 +-
.../phoenix/filter/SkipScanFilterTest.java | 9 +-
.../phoenix/hbase/index/IndexTestingUtils.java | 7 +-
.../index/covered/CoveredColumnIndexCodec.java | 22 +-
.../index/covered/LocalTableStateTest.java | 9 +-
.../covered/TestCoveredColumnIndexCodec.java | 10 +-
.../index/covered/data/TestIndexMemStore.java | 8 +-
.../filter/TestApplyAndFilterDeletesFilter.java | 5 +-
.../phoenix/index/IndexMaintainerTest.java | 19 +-
.../mapreduce/CsvBulkImportUtilTest.java | 4 +-
.../FormatToBytesWritableMapperTest.java | 3 +-
.../phoenix/query/ConnectionlessTest.java | 39 +--
.../EncodedColumnQualifierCellsListTest.java | 71 ++----
.../org/apache/phoenix/query/OrderByTest.java | 41 ++--
.../apache/phoenix/schema/RowKeySchemaTest.java | 8 +-
.../phoenix/schema/RowKeyValueAccessorTest.java | 8 +-
92 files changed, 885 insertions(+), 898 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
index b504acd..9566e48 100644
--- a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
+++ b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
@@ -300,7 +300,7 @@ private int getKeyValueCount(Table table) throws IOException {
ResultScanner results = table.getScanner(scan);
int count = 0;
for (Result res : results) {
- count += res.list().size();
+ count += res.listCells().size();
LOG.debug(count + ") " + res);
}
results.close();
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/it/java/org/apache/phoenix/end2end/MappingTableDataTypeIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MappingTableDataTypeIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MappingTableDataTypeIT.java
index 5173fe4..fb78e1c 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MappingTableDataTypeIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MappingTableDataTypeIT.java
@@ -33,9 +33,9 @@ import java.sql.SQLException;
import java.util.List;
import java.util.Properties;
+import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.Put;
@@ -95,9 +95,9 @@ public class MappingTableDataTypeIT extends ParallelStatsDisabledIT {
ResultScanner results = t.getScanner(scan);
Result result = results.next();
assertNotNull("Expected single row", result);
- List<KeyValue> kvs = result.getColumn(Bytes.toBytes("cf2"), Bytes.toBytes("q2"));
+ List<Cell> kvs = result.getColumnCells(Bytes.toBytes("cf2"), Bytes.toBytes("q2"));
assertEquals("Expected single value ", 1, kvs.size());
- assertEquals("Column Value", "value2", Bytes.toString(kvs.get(0).getValue()));
+ assertEquals("Column Value", "value2", Bytes.toString(kvs.get(0).getValueArray(), kvs.get(0).getValueOffset(), kvs.get(0).getValueLength()));
assertNull("Expected single row", results.next());
} finally {
admin.close();
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/it/java/org/apache/phoenix/end2end/RowTimestampIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/RowTimestampIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/RowTimestampIT.java
index 930092d..509e305 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/RowTimestampIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/RowTimestampIT.java
@@ -149,14 +149,14 @@ public class RowTimestampIT extends ParallelStatsDisabledIT {
Table hTable = hbaseConn.getTable(TableName.valueOf(tableName));
ResultScanner resultScanner = hTable.getScanner(scan);
for (Result result : resultScanner) {
- long timeStamp = result.getColumnLatest(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, emptyKVQualifier).getTimestamp();
+ long timeStamp = result.getColumnLatestCell(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, emptyKVQualifier).getTimestamp();
assertEquals(rowTimestampDate.getTime(), timeStamp);
}
if (!mutable) {
hTable = hbaseConn.getTable(TableName.valueOf(indexName));
resultScanner = hTable.getScanner(scan);
for (Result result : resultScanner) {
- long timeStamp = result.getColumnLatest(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, emptyKVQualifier).getTimestamp();
+ long timeStamp = result.getColumnLatestCell(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, emptyKVQualifier).getTimestamp();
assertEquals(rowTimestampDate.getTime(), timeStamp);
}
}
@@ -260,14 +260,14 @@ public class RowTimestampIT extends ParallelStatsDisabledIT {
Table hTable = hbaseConn.getTable(TableName.valueOf(tableName));
ResultScanner resultScanner = hTable.getScanner(scan);
for (Result result : resultScanner) {
- long timeStamp = result.getColumnLatest(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, emptyKVQualifier).getTimestamp();
+ long timeStamp = result.getColumnLatestCell(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, emptyKVQualifier).getTimestamp();
assertEquals(rowTimestampDate.getTime(), timeStamp);
}
if (!mutable) {
hTable = hbaseConn.getTable(TableName.valueOf(indexName));
resultScanner = hTable.getScanner(scan);
for (Result result : resultScanner) {
- long timeStamp = result.getColumnLatest(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, emptyKVQualifier).getTimestamp();
+ long timeStamp = result.getColumnLatestCell(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, emptyKVQualifier).getTimestamp();
assertEquals(rowTimestampDate.getTime(), timeStamp);
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/DropColumnIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/DropColumnIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/DropColumnIT.java
index 766e924..db445f1 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/DropColumnIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/DropColumnIT.java
@@ -203,7 +203,7 @@ public class DropColumnIT extends ParallelStatsDisabledIT {
Result result = results.next();
assertNotNull(result);
- assertEquals("data table column value should have been deleted", KeyValue.Type.DeleteColumn.getCode(), result.getColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, dataCq).get(0).getTypeByte());
+ assertEquals("data table column value should have been deleted", KeyValue.Type.DeleteColumn.getCode(), result.getColumnCells(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, dataCq).get(0).getTypeByte());
assertNull(results.next());
// key value for v2 should have been deleted from the global index table
@@ -213,7 +213,7 @@ public class DropColumnIT extends ParallelStatsDisabledIT {
results = table.getScanner(scan);
result = results.next();
assertNotNull(result);
- assertEquals("data table column value should have been deleted", KeyValue.Type.DeleteColumn.getCode(), result.getColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, globalIndexCq).get(0).getTypeByte());
+ assertEquals("data table column value should have been deleted", KeyValue.Type.DeleteColumn.getCode(), result.getColumnCells(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, globalIndexCq).get(0).getTypeByte());
assertNull(results.next());
// key value for v2 should have been deleted from the local index table
@@ -225,7 +225,7 @@ public class DropColumnIT extends ParallelStatsDisabledIT {
result = results.next();
assertNotNull(result);
assertEquals("data table col"
- + "umn value should have been deleted", KeyValue.Type.DeleteColumn.getCode(), result.getColumn(QueryConstants.DEFAULT_LOCAL_INDEX_COLUMN_FAMILY_BYTES, localIndexCq).get(0).getTypeByte());
+ + "umn value should have been deleted", KeyValue.Type.DeleteColumn.getCode(), result.getColumnCells(QueryConstants.DEFAULT_LOCAL_INDEX_COLUMN_FAMILY_BYTES, localIndexCq).get(0).getTypeByte());
assertNull(results.next());
}
else {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ImmutableIndexIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ImmutableIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ImmutableIndexIT.java
index e0398c7..9b06955 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ImmutableIndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ImmutableIndexIT.java
@@ -39,8 +39,8 @@ import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
+import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.HBaseIOException;
-import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
@@ -247,7 +247,7 @@ public class ImmutableIndexIT extends BaseUniqueNamesOwnClusterIT {
}
private void assertIndexMutations(Connection conn) throws SQLException {
- Iterator<Pair<byte[], List<KeyValue>>> iterator = PhoenixRuntime.getUncommittedDataIterator(conn);
+ Iterator<Pair<byte[], List<Cell>>> iterator = PhoenixRuntime.getUncommittedDataIterator(conn);
assertTrue(iterator.hasNext());
iterator.next();
assertEquals(!localIndex, iterator.hasNext());
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexTestUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexTestUtil.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexTestUtil.java
index 52af966..888ff45 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexTestUtil.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexTestUtil.java
@@ -34,6 +34,7 @@ import java.util.List;
import java.util.Map;
import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
@@ -133,8 +134,7 @@ public class IndexTestUtil {
for (Map.Entry<byte[],List<Cell>> entry : dataMutation.getFamilyCellMap().entrySet()) {
PColumnFamily family = dataTable.getColumnFamily(entry.getKey());
for (Cell kv : entry.getValue()) {
- @SuppressWarnings("deprecation")
- byte[] cq = kv.getQualifier();
+ byte[] cq = CellUtil.cloneQualifier(kv);
byte[] emptyKVQualifier = EncodedColumnsUtil.getEmptyKeyValueInfo(dataTable).getFirst();
if (Bytes.compareTo(emptyKVQualifier, cq) != 0) {
try {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/DataTableLocalIndexRegionScanner.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/DataTableLocalIndexRegionScanner.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/DataTableLocalIndexRegionScanner.java
index eee6c93..859b9ba 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/DataTableLocalIndexRegionScanner.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/DataTableLocalIndexRegionScanner.java
@@ -27,7 +27,6 @@ import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Mutation;
@@ -124,7 +123,7 @@ public class DataTableLocalIndexRegionScanner extends DelegateRegionScanner {
del = new Delete(CellUtil.cloneRow(cell));
mutationList.add(del);
}
- del.addDeleteMarker(cell);
+ del.add(cell);
}
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
index d1d12fb..4b6b7e2 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
@@ -25,7 +25,7 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.io.Reference;
@@ -84,7 +84,7 @@ public class IndexHalfStoreFileReader extends StoreFile.Reader {
this.splitkey = splitKey == null ? r.getSplitKey() : splitKey;
// Is it top or bottom half?
this.top = Reference.isTopFileRegion(r.getFileRegion());
- this.splitRow = CellUtil.cloneRow(KeyValue.createKeyValueFromKey(splitkey));
+ this.splitRow = CellUtil.cloneRow(KeyValueUtil.createKeyValueFromKey(splitkey));
this.indexMaintainers = indexMaintainers;
this.viewConstants = viewConstants;
this.regionInfo = regionInfo;
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
index a50d5ce..6e0bbcb 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Connection;
@@ -119,7 +120,7 @@ public class IndexHalfStoreFileReaderGenerator extends BaseRegionObserver {
region.getRegionInfo().getRegionName());
if (mergeRegions == null || mergeRegions.getFirst() == null) return reader;
byte[] splitRow =
- CellUtil.cloneRow(KeyValue.createKeyValueFromKey(r.getSplitKey()));
+ CellUtil.cloneRow(KeyValueUtil.createKeyValueFromKey(r.getSplitKey()));
// We need not change any thing in first region data because first region start key
// is equal to merged region start key. So returning same reader.
if (Bytes.compareTo(mergeRegions.getFirst().getStartKey(), splitRow) == 0) {
@@ -138,7 +139,7 @@ public class IndexHalfStoreFileReaderGenerator extends BaseRegionObserver {
childRegion = mergeRegions.getSecond();
regionStartKeyInHFile = mergeRegions.getSecond().getStartKey();
}
- splitKey = KeyValue.createFirstOnRow(region.getRegionInfo().getStartKey().length == 0 ?
+ splitKey = KeyValueUtil.createFirstOnRow(region.getRegionInfo().getStartKey().length == 0 ?
new byte[region.getRegionInfo().getEndKey().length] :
region.getRegionInfo().getStartKey()).getKey();
} else {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexKeyValueSkipListSet.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexKeyValueSkipListSet.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexKeyValueSkipListSet.java
index c322cb4..0d2de89 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexKeyValueSkipListSet.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexKeyValueSkipListSet.java
@@ -17,10 +17,10 @@
*/
package org.apache.hadoop.hbase.regionserver;
-import java.util.Comparator;
import java.util.concurrent.ConcurrentSkipListMap;
-import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellComparator;
/**
* Like a {@link KeyValueSkipListSet}, but also exposes useful, atomic methods (e.g.
@@ -30,7 +30,7 @@ public class IndexKeyValueSkipListSet extends KeyValueSkipListSet {
// this is annoying that we need to keep this extra pointer around here, but its pretty minimal
// and means we don't need to change the HBase code.
- private ConcurrentSkipListMap<KeyValue, KeyValue> delegate;
+ private ConcurrentSkipListMap<Cell, Cell> delegate;
/**
* Create a new {@link IndexKeyValueSkipListSet} based on the passed comparator.
@@ -38,9 +38,9 @@ public class IndexKeyValueSkipListSet extends KeyValueSkipListSet {
* well as object equality in the map.
* @return a map that uses the passed comparator
*/
- public static IndexKeyValueSkipListSet create(Comparator<KeyValue> comparator) {
- ConcurrentSkipListMap<KeyValue, KeyValue> delegate =
- new ConcurrentSkipListMap<KeyValue, KeyValue>(comparator);
+ public static IndexKeyValueSkipListSet create(CellComparator comparator) {
+ ConcurrentSkipListMap<Cell, Cell> delegate =
+ new ConcurrentSkipListMap<Cell, Cell>(comparator);
IndexKeyValueSkipListSet ret = new IndexKeyValueSkipListSet(delegate);
return ret;
}
@@ -48,7 +48,7 @@ public class IndexKeyValueSkipListSet extends KeyValueSkipListSet {
/**
* @param delegate map to which to delegate all calls
*/
- public IndexKeyValueSkipListSet(ConcurrentSkipListMap<KeyValue, KeyValue> delegate) {
+ public IndexKeyValueSkipListSet(ConcurrentSkipListMap<Cell, Cell> delegate) {
super(delegate);
this.delegate = delegate;
}
@@ -70,7 +70,7 @@ public class IndexKeyValueSkipListSet extends KeyValueSkipListSet {
* the map
* @throws NullPointerException if the specified key is null
*/
- public KeyValue putIfAbsent(KeyValue kv) {
+ public Cell putIfAbsent(Cell kv) {
return this.delegate.putIfAbsent(kv, kv);
}
}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueSkipListSet.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueSkipListSet.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueSkipListSet.java
index 211aa10..b68abd9 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueSkipListSet.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueSkipListSet.java
@@ -18,7 +18,8 @@
*/
package org.apache.hadoop.hbase.regionserver;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.KeyValue;
import java.util.Collection;
@@ -43,96 +44,96 @@ import java.util.concurrent.ConcurrentSkipListMap;
* has same attributes as ConcurrentSkipListSet: e.g. tolerant of concurrent
* get and set and won't throw ConcurrentModificationException when iterating.
*/
-public class KeyValueSkipListSet implements NavigableSet<KeyValue> {
- private final ConcurrentNavigableMap<KeyValue, KeyValue> delegatee;
+public class KeyValueSkipListSet implements NavigableSet<Cell> {
+ private final ConcurrentNavigableMap<Cell, Cell> delegatee;
- KeyValueSkipListSet(final KeyValue.KVComparator c) {
- this.delegatee = new ConcurrentSkipListMap<KeyValue, KeyValue>(c);
+ KeyValueSkipListSet(final CellComparator c) {
+ this.delegatee = new ConcurrentSkipListMap<Cell, Cell>(c);
}
- KeyValueSkipListSet(final ConcurrentNavigableMap<KeyValue, KeyValue> m) {
+ KeyValueSkipListSet(final ConcurrentNavigableMap<Cell, Cell> m) {
this.delegatee = m;
}
- public KeyValue ceiling(KeyValue e) {
+ public Cell ceiling(Cell e) {
throw new UnsupportedOperationException("Not implemented");
}
- public Iterator<KeyValue> descendingIterator() {
+ public Iterator<Cell> descendingIterator() {
return this.delegatee.descendingMap().values().iterator();
}
- public NavigableSet<KeyValue> descendingSet() {
+ public NavigableSet<Cell> descendingSet() {
throw new UnsupportedOperationException("Not implemented");
}
- public KeyValue floor(KeyValue e) {
+ public Cell floor(Cell e) {
throw new UnsupportedOperationException("Not implemented");
}
- public SortedSet<KeyValue> headSet(final KeyValue toElement) {
+ public SortedSet<Cell> headSet(final Cell toElement) {
return headSet(toElement, false);
}
- public NavigableSet<KeyValue> headSet(final KeyValue toElement,
+ public NavigableSet<Cell> headSet(final Cell toElement,
boolean inclusive) {
return new KeyValueSkipListSet(this.delegatee.headMap(toElement, inclusive));
}
- public KeyValue higher(KeyValue e) {
+ public KeyValue higher(Cell e) {
throw new UnsupportedOperationException("Not implemented");
}
- public Iterator<KeyValue> iterator() {
+ public Iterator<Cell> iterator() {
return this.delegatee.values().iterator();
}
- public KeyValue lower(KeyValue e) {
+ public Cell lower(Cell e) {
throw new UnsupportedOperationException("Not implemented");
}
- public KeyValue pollFirst() {
+ public Cell pollFirst() {
throw new UnsupportedOperationException("Not implemented");
}
- public KeyValue pollLast() {
+ public Cell pollLast() {
throw new UnsupportedOperationException("Not implemented");
}
- public SortedSet<KeyValue> subSet(KeyValue fromElement, KeyValue toElement) {
+ public SortedSet<Cell> subSet(Cell fromElement, Cell toElement) {
throw new UnsupportedOperationException("Not implemented");
}
- public NavigableSet<KeyValue> subSet(KeyValue fromElement,
- boolean fromInclusive, KeyValue toElement, boolean toInclusive) {
+ public NavigableSet<Cell> subSet(Cell fromElement,
+ boolean fromInclusive, Cell toElement, boolean toInclusive) {
throw new UnsupportedOperationException("Not implemented");
}
- public SortedSet<KeyValue> tailSet(KeyValue fromElement) {
+ public SortedSet<Cell> tailSet(Cell fromElement) {
return tailSet(fromElement, true);
}
- public NavigableSet<KeyValue> tailSet(KeyValue fromElement, boolean inclusive) {
+ public NavigableSet<Cell> tailSet(Cell fromElement, boolean inclusive) {
return new KeyValueSkipListSet(this.delegatee.tailMap(fromElement, inclusive));
}
- public Comparator<? super KeyValue> comparator() {
+ public Comparator<? super Cell> comparator() {
throw new UnsupportedOperationException("Not implemented");
}
- public KeyValue first() {
+ public Cell first() {
return this.delegatee.get(this.delegatee.firstKey());
}
- public KeyValue last() {
+ public Cell last() {
return this.delegatee.get(this.delegatee.lastKey());
}
- public boolean add(KeyValue e) {
+ public boolean add(Cell e) {
return this.delegatee.put(e, e) == null;
}
- public boolean addAll(Collection<? extends KeyValue> c) {
+ public boolean addAll(Collection<? extends Cell> c) {
throw new UnsupportedOperationException("Not implemented");
}
@@ -165,7 +166,7 @@ public class KeyValueSkipListSet implements NavigableSet<KeyValue> {
throw new UnsupportedOperationException("Not implemented");
}
- public KeyValue get(KeyValue kv) {
+ public Cell get(Cell kv) {
return this.delegatee.get(kv);
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexStoreFileScanner.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexStoreFileScanner.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexStoreFileScanner.java
index 3a80698..3b36a7e 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexStoreFileScanner.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexStoreFileScanner.java
@@ -21,16 +21,14 @@ import java.io.IOException;
import java.util.Map.Entry;
import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValue.Type;
-import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
import org.apache.hadoop.hbase.regionserver.StoreFile.Reader;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.index.IndexMaintainer;
+import org.apache.phoenix.util.PhoenixKeyValueUtil;
import static org.apache.hadoop.hbase.KeyValue.ROW_LENGTH_SIZE;
@@ -71,7 +69,7 @@ public class LocalIndexStoreFileScanner extends StoreFileScanner{
return peek;
}
- private KeyValue getChangedKey(Cell next, boolean changeBottomKeys) {
+ private Cell getChangedKey(Cell next, boolean changeBottomKeys) {
// If it is a top store file change the StartKey with SplitKey in Key
//and produce the new value corresponding to the change in key
byte[] changedKey = getNewRowkeyByRegionStartKeyReplacedWithSplitKey(next, changeBottomKeys);
@@ -114,7 +112,7 @@ public class LocalIndexStoreFileScanner extends StoreFileScanner{
@Override
public boolean seekToPreviousRow(Cell key) throws IOException {
- KeyValue kv = KeyValueUtil.ensureKeyValue(key);
+ KeyValue kv = PhoenixKeyValueUtil.maybeCopyCell(key);
if (reader.isTop()) {
byte[] fk = reader.getFirstKey();
// This will be null when the file is empty in which we can not seekBefore to
@@ -122,10 +120,10 @@ public class LocalIndexStoreFileScanner extends StoreFileScanner{
if (fk == null) {
return false;
}
- if (getComparator().compare(kv.getBuffer(), kv.getKeyOffset(), kv.getKeyLength(), fk, 0, fk.length) <= 0) {
+ if (getComparator().compare(kv.getRowArray(), kv.getKeyOffset(), kv.getKeyLength(), fk, 0, fk.length) <= 0) {
return super.seekToPreviousRow(key);
}
- KeyValue replacedKey = getKeyPresentInHFiles(kv.getBuffer());
+ KeyValue replacedKey = getKeyPresentInHFiles(kv.getRowArray());
boolean seekToPreviousRow = super.seekToPreviousRow(replacedKey);
while(super.peek()!=null && !isSatisfiedMidKeyCondition(super.peek())) {
seekToPreviousRow = super.seekToPreviousRow(super.peek());
@@ -134,7 +132,7 @@ public class LocalIndexStoreFileScanner extends StoreFileScanner{
} else {
// The equals sign isn't strictly necessary just here to be consistent with
// seekTo
- if (getComparator().compare(kv.getBuffer(), kv.getKeyOffset(), kv.getKeyLength(), reader.getSplitkey(), 0, reader.getSplitkey().length) >= 0) {
+ if (getComparator().compare(kv.getRowArray(), kv.getKeyOffset(), kv.getKeyLength(), reader.getSplitkey(), 0, reader.getSplitkey().length) >= 0) {
boolean seekToPreviousRow = super.seekToPreviousRow(kv);
while(super.peek()!=null && !isSatisfiedMidKeyCondition(super.peek())) {
seekToPreviousRow = super.seekToPreviousRow(super.peek());
@@ -221,24 +219,24 @@ public class LocalIndexStoreFileScanner extends StoreFileScanner{
* @throws IOException
*/
public boolean seekOrReseek(Cell cell, boolean isSeek) throws IOException{
- KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
+ KeyValue kv = PhoenixKeyValueUtil.maybeCopyCell(cell);
KeyValue keyToSeek = kv;
if (reader.isTop()) {
- if(getComparator().compare(kv.getBuffer(), kv.getKeyOffset(), kv.getKeyLength(), reader.getSplitkey(), 0, reader.getSplitkey().length) < 0){
+ if(getComparator().compare(kv.getRowArray(), kv.getKeyOffset(), kv.getKeyLength(), reader.getSplitkey(), 0, reader.getSplitkey().length) < 0){
if(!isSeek && realSeekDone()) {
return true;
}
return seekOrReseekToProperKey(isSeek, keyToSeek);
}
- keyToSeek = getKeyPresentInHFiles(kv.getBuffer());
+ keyToSeek = getKeyPresentInHFiles(kv.getRowArray());
return seekOrReseekToProperKey(isSeek, keyToSeek);
} else {
- if (getComparator().compare(kv.getBuffer(), kv.getKeyOffset(), kv.getKeyLength(), reader.getSplitkey(), 0, reader.getSplitkey().length) >= 0) {
+ if (getComparator().compare(kv.getRowArray(), kv.getKeyOffset(), kv.getKeyLength(), reader.getSplitkey(), 0, reader.getSplitkey().length) >= 0) {
close();
return false;
}
if(!isSeek && reader.getRegionInfo().getStartKey().length == 0 && reader.getSplitRow().length > reader.getRegionStartKeyInHFile().length) {
- keyToSeek = getKeyPresentInHFiles(kv.getBuffer());
+ keyToSeek = getKeyPresentInHFiles(kv.getRowArray());
}
}
return seekOrReseekToProperKey(isSeek, keyToSeek);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/wal/IndexedWALEditCodec.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/wal/IndexedWALEditCodec.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/wal/IndexedWALEditCodec.java
index 80745a8..ebd212e 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/wal/IndexedWALEditCodec.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/wal/IndexedWALEditCodec.java
@@ -30,13 +30,13 @@ import java.io.OutputStream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.codec.BaseDecoder;
import org.apache.hadoop.hbase.codec.BaseEncoder;
import org.apache.hadoop.hbase.util.VersionInfo;
import org.apache.phoenix.hbase.index.util.VersionUtil;
import org.apache.phoenix.hbase.index.wal.IndexedKeyValue;
import org.apache.phoenix.hbase.index.wal.KeyValueCodec;
+import org.apache.phoenix.util.PhoenixKeyValueUtil;
/**
@@ -215,7 +215,7 @@ public class IndexedWALEditCodec extends WALCellCodec {
checkFlushed();
// use the standard encoding mechanism
- KeyValueCodec.write(this.dataOutput, KeyValueUtil.ensureKeyValue(cell));
+ KeyValueCodec.write(this.dataOutput, PhoenixKeyValueUtil.maybeCopyCell(cell));
}
}
@@ -255,7 +255,7 @@ public class IndexedWALEditCodec extends WALCellCodec {
this.compressedKvEncoder.write(cell);
}
else{
- KeyValueCodec.write(this.dataOutput, KeyValueUtil.ensureKeyValue(cell));
+ KeyValueCodec.write(this.dataOutput, PhoenixKeyValueUtil.maybeCopyCell(cell));
}
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillManager.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillManager.java b/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillManager.java
index 6d89c99..6e3956f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillManager.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillManager.java
@@ -29,6 +29,7 @@ import java.util.Iterator;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.io.WritableUtils;
@@ -43,7 +44,7 @@ import org.apache.phoenix.schema.ValueBitSet;
import org.apache.phoenix.schema.tuple.SingleKeyValueTuple;
import org.apache.phoenix.schema.tuple.Tuple;
import org.apache.phoenix.util.Closeables;
-import org.apache.phoenix.util.KeyValueUtil;
+import org.apache.phoenix.util.PhoenixKeyValueUtil;
import org.apache.phoenix.util.TupleUtil;
import com.google.common.base.Preconditions;
@@ -201,8 +202,8 @@ public class SpillManager implements Closeable {
input.skip(keyLength);
int valueLength = WritableUtils.readVInt(input);
int vIntValLength = WritableUtils.getVIntSize(keyLength);
- KeyValue keyValue =
- KeyValueUtil.newKeyValue(ptr.get(), ptr.getOffset(), ptr.getLength(),
+ Cell keyValue =
+ PhoenixKeyValueUtil.newKeyValue(ptr.get(), ptr.getOffset(), ptr.getLength(),
QueryConstants.SINGLE_COLUMN_FAMILY, QueryConstants.SINGLE_COLUMN,
QueryConstants.AGG_TIMESTAMP, data, vIntKeyLength + keyLength + vIntValLength, valueLength);
Tuple result = new SingleKeyValueTuple(keyValue);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillableGroupByCache.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillableGroupByCache.java b/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillableGroupByCache.java
index dc0ae21..69d5144 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillableGroupByCache.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillableGroupByCache.java
@@ -51,7 +51,7 @@ import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
import org.apache.phoenix.memory.InsufficientMemoryException;
import org.apache.phoenix.memory.MemoryManager.MemoryChunk;
import org.apache.phoenix.util.Closeables;
-import org.apache.phoenix.util.KeyValueUtil;
+import org.apache.phoenix.util.PhoenixKeyValueUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -364,7 +364,7 @@ public class SpillableGroupByCache implements GroupByCache {
+ Bytes.toStringBinary(key.get(), key.getOffset(), key.getLength()) + " with aggregators "
+ aggs.toString() + " value = " + Bytes.toStringBinary(value));
}
- results.add(KeyValueUtil.newKeyValue(key.get(), key.getOffset(), key.getLength(), SINGLE_COLUMN_FAMILY,
+ results.add(PhoenixKeyValueUtil.newKeyValue(key.get(), key.getOffset(), key.getLength(), SINGLE_COLUMN_FAMILY,
SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length));
return cacheIter.hasNext();
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/compile/ListJarsQueryPlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/ListJarsQueryPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/ListJarsQueryPlan.java
index 839e7c9..5c20f4d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/ListJarsQueryPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/ListJarsQueryPlan.java
@@ -31,9 +31,7 @@ import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.KeyValue.Type;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
@@ -67,6 +65,7 @@ import org.apache.phoenix.schema.tuple.Tuple;
import org.apache.phoenix.schema.types.PVarchar;
import org.apache.phoenix.util.ByteUtil;
import org.apache.phoenix.util.EnvironmentEdgeManager;
+import org.apache.phoenix.util.PhoenixKeyValueUtil;
import org.apache.phoenix.util.SizedUtil;
public class ListJarsQueryPlan implements QueryPlan {
@@ -163,9 +162,9 @@ public class ListJarsQueryPlan implements QueryPlan {
expression.evaluate(null, ptr);
byte[] rowKey = ByteUtil.copyKeyBytesIfNecessary(ptr);
Cell cell =
- CellUtil.createCell(rowKey, HConstants.EMPTY_BYTE_ARRAY,
+ PhoenixKeyValueUtil.newKeyValue(rowKey, HConstants.EMPTY_BYTE_ARRAY,
HConstants.EMPTY_BYTE_ARRAY, EnvironmentEdgeManager.currentTimeMillis(),
- Type.Put.getCode(), HConstants.EMPTY_BYTE_ARRAY);
+ HConstants.EMPTY_BYTE_ARRAY);
List<Cell> cells = new ArrayList<Cell>(1);
cells.add(cell);
return new ResultTuple(Result.create(cells));
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/compile/MutatingParallelIteratorFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/MutatingParallelIteratorFactory.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/MutatingParallelIteratorFactory.java
index 8e63fa9..343c8f0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/MutatingParallelIteratorFactory.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/MutatingParallelIteratorFactory.java
@@ -25,7 +25,7 @@ import static org.apache.phoenix.query.QueryConstants.UNGROUPED_AGG_ROW_KEY;
import java.sql.SQLException;
import java.util.List;
-import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.phoenix.execute.MutationState;
import org.apache.phoenix.iterate.ParallelIteratorFactory;
@@ -35,7 +35,7 @@ import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.schema.tuple.SingleKeyValueTuple;
import org.apache.phoenix.schema.tuple.Tuple;
import org.apache.phoenix.schema.types.PLong;
-import org.apache.phoenix.util.KeyValueUtil;
+import org.apache.phoenix.util.PhoenixKeyValueUtil;
/**
* Factory class used to instantiate an iterator to handle mutations made during a parallel scan.
@@ -66,7 +66,7 @@ public abstract class MutatingParallelIteratorFactory implements ParallelIterato
final MutationState finalState = state;
byte[] value = PLong.INSTANCE.toBytes(totalRowCount);
- KeyValue keyValue = KeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length);
+ Cell keyValue = PhoenixKeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length);
final Tuple tuple = new SingleKeyValueTuple(keyValue);
return new PeekingResultIterator() {
private boolean done = false;
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/compile/TraceQueryPlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/TraceQueryPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/TraceQueryPlan.java
index 62e6991..4e5ef84 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/TraceQueryPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/TraceQueryPlan.java
@@ -25,9 +25,7 @@ import java.util.List;
import java.util.Set;
import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.KeyValue.Type;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
@@ -64,6 +62,7 @@ import org.apache.phoenix.schema.types.PLong;
import org.apache.phoenix.trace.util.Tracing;
import org.apache.phoenix.util.ByteUtil;
import org.apache.phoenix.util.EnvironmentEdgeManager;
+import org.apache.phoenix.util.PhoenixKeyValueUtil;
import org.apache.phoenix.util.SizedUtil;
public class TraceQueryPlan implements QueryPlan {
@@ -167,9 +166,11 @@ public class TraceQueryPlan implements QueryPlan {
expression.evaluate(null, ptr);
byte[] rowKey = ByteUtil.copyKeyBytesIfNecessary(ptr);
Cell cell =
- CellUtil.createCell(rowKey, HConstants.EMPTY_BYTE_ARRAY,
- HConstants.EMPTY_BYTE_ARRAY, EnvironmentEdgeManager.currentTimeMillis(),
- Type.Put.getCode(), HConstants.EMPTY_BYTE_ARRAY);
+ PhoenixKeyValueUtil
+ .newKeyValue(rowKey, HConstants.EMPTY_BYTE_ARRAY,
+ HConstants.EMPTY_BYTE_ARRAY,
+ EnvironmentEdgeManager.currentTimeMillis(),
+ HConstants.EMPTY_BYTE_ARRAY);
List<Cell> cells = new ArrayList<Cell>(1);
cells.add(cell);
return new ResultTuple(Result.create(cells));
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
index 67cc114..7c4d06d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
@@ -39,7 +39,6 @@ import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
@@ -71,7 +70,7 @@ import org.apache.phoenix.schema.types.PInteger;
import org.apache.phoenix.util.Closeables;
import org.apache.phoenix.util.EncodedColumnsUtil;
import org.apache.phoenix.util.IndexUtil;
-import org.apache.phoenix.util.KeyValueUtil;
+import org.apache.phoenix.util.PhoenixKeyValueUtil;
import org.apache.phoenix.util.LogUtil;
import org.apache.phoenix.util.ScanUtil;
import org.apache.phoenix.util.SizedUtil;
@@ -297,7 +296,7 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
long estSize = sizeOfUnorderedGroupByMap(aggregateMap.size(), aggregators.getEstimatedByteSize());
chunk.resize(estSize);
- final List<KeyValue> aggResults = new ArrayList<KeyValue>(aggregateMap.size());
+ final List<Cell> aggResults = new ArrayList<Cell>(aggregateMap.size());
final Iterator<Map.Entry<ImmutableBytesPtr, Aggregator[]>> cacheIter =
aggregateMap.entrySet().iterator();
@@ -314,8 +313,8 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
+ " with aggregators " + Arrays.asList(rowAggregators).toString()
+ " value = " + Bytes.toStringBinary(value), customAnnotations));
}
- KeyValue keyValue =
- KeyValueUtil.newKeyValue(key.get(), key.getOffset(), key.getLength(),
+ Cell keyValue =
+ PhoenixKeyValueUtil.newKeyValue(key.get(), key.getOffset(), key.getLength(),
SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0,
value.length);
aggResults.add(keyValue);
@@ -522,8 +521,8 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
if (currentKey != null) {
byte[] value = aggregators.toBytes(rowAggregators);
- KeyValue keyValue =
- KeyValueUtil.newKeyValue(currentKey.get(), currentKey.getOffset(),
+ Cell keyValue =
+ PhoenixKeyValueUtil.newKeyValue(currentKey.get(), currentKey.getOffset(),
currentKey.getLength(), SINGLE_COLUMN_FAMILY, SINGLE_COLUMN,
AGG_TIMESTAMP, value, 0, value.length);
results.add(keyValue);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c82cc18d/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index 5dbf765..a87e961 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -98,6 +98,11 @@ import java.util.NavigableMap;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellBuilder;
+import org.apache.hadoop.hbase.CellBuilder.DataType;
+import org.apache.hadoop.hbase.CellBuilderFactory;
+import org.apache.hadoop.hbase.CellBuilderType;
+import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.Coprocessor;
import org.apache.hadoop.hbase.CoprocessorEnvironment;
@@ -105,6 +110,7 @@ import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValue.Type;
+import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Mutation;
@@ -225,7 +231,7 @@ import org.apache.phoenix.util.ByteUtil;
import org.apache.phoenix.util.EncodedColumnsUtil;
import org.apache.phoenix.util.EnvironmentEdgeManager;
import org.apache.phoenix.util.IndexUtil;
-import org.apache.phoenix.util.KeyValueUtil;
+import org.apache.phoenix.util.PhoenixKeyValueUtil;
import org.apache.phoenix.util.MetaDataUtil;
import org.apache.phoenix.util.QueryUtil;
import org.apache.phoenix.util.ReadOnlyProps;
@@ -266,39 +272,38 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
// Column to track tables that have been upgraded based on PHOENIX-2067
public static final String ROW_KEY_ORDER_OPTIMIZABLE = "ROW_KEY_ORDER_OPTIMIZABLE";
public static final byte[] ROW_KEY_ORDER_OPTIMIZABLE_BYTES = Bytes.toBytes(ROW_KEY_ORDER_OPTIMIZABLE);
-
// KeyValues for Table
- private static final KeyValue TABLE_TYPE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, TABLE_TYPE_BYTES);
- private static final KeyValue TABLE_SEQ_NUM_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, TABLE_SEQ_NUM_BYTES);
- private static final KeyValue COLUMN_COUNT_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, COLUMN_COUNT_BYTES);
- private static final KeyValue SALT_BUCKETS_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, SALT_BUCKETS_BYTES);
- private static final KeyValue PK_NAME_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, PK_NAME_BYTES);
- private static final KeyValue DATA_TABLE_NAME_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, DATA_TABLE_NAME_BYTES);
- private static final KeyValue INDEX_STATE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, INDEX_STATE_BYTES);
- private static final KeyValue IMMUTABLE_ROWS_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, IMMUTABLE_ROWS_BYTES);
- private static final KeyValue VIEW_EXPRESSION_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, VIEW_STATEMENT_BYTES);
- private static final KeyValue DEFAULT_COLUMN_FAMILY_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, DEFAULT_COLUMN_FAMILY_NAME_BYTES);
- private static final KeyValue DISABLE_WAL_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, DISABLE_WAL_BYTES);
- private static final KeyValue MULTI_TENANT_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, MULTI_TENANT_BYTES);
- private static final KeyValue VIEW_TYPE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, VIEW_TYPE_BYTES);
- private static final KeyValue VIEW_INDEX_ID_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, VIEW_INDEX_ID_BYTES);
- private static final KeyValue INDEX_TYPE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, INDEX_TYPE_BYTES);
- private static final KeyValue INDEX_DISABLE_TIMESTAMP_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, INDEX_DISABLE_TIMESTAMP_BYTES);
- private static final KeyValue STORE_NULLS_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, STORE_NULLS_BYTES);
- private static final KeyValue EMPTY_KEYVALUE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES);
- private static final KeyValue BASE_COLUMN_COUNT_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.BASE_COLUMN_COUNT_BYTES);
- private static final KeyValue ROW_KEY_ORDER_OPTIMIZABLE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, ROW_KEY_ORDER_OPTIMIZABLE_BYTES);
- private static final KeyValue TRANSACTIONAL_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, TRANSACTIONAL_BYTES);
- private static final KeyValue UPDATE_CACHE_FREQUENCY_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, UPDATE_CACHE_FREQUENCY_BYTES);
- private static final KeyValue IS_NAMESPACE_MAPPED_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY,
+ private static final Cell TABLE_TYPE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, TABLE_TYPE_BYTES);
+ private static final Cell TABLE_SEQ_NUM_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, TABLE_SEQ_NUM_BYTES);
+ private static final Cell COLUMN_COUNT_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, COLUMN_COUNT_BYTES);
+ private static final Cell SALT_BUCKETS_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, SALT_BUCKETS_BYTES);
+ private static final Cell PK_NAME_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, PK_NAME_BYTES);
+ private static final Cell DATA_TABLE_NAME_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, DATA_TABLE_NAME_BYTES);
+ private static final Cell INDEX_STATE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, INDEX_STATE_BYTES);
+ private static final Cell IMMUTABLE_ROWS_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, IMMUTABLE_ROWS_BYTES);
+ private static final Cell VIEW_EXPRESSION_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, VIEW_STATEMENT_BYTES);
+ private static final Cell DEFAULT_COLUMN_FAMILY_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, DEFAULT_COLUMN_FAMILY_NAME_BYTES);
+ private static final Cell DISABLE_WAL_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, DISABLE_WAL_BYTES);
+ private static final Cell MULTI_TENANT_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, MULTI_TENANT_BYTES);
+ private static final Cell VIEW_TYPE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, VIEW_TYPE_BYTES);
+ private static final Cell VIEW_INDEX_ID_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, VIEW_INDEX_ID_BYTES);
+ private static final Cell INDEX_TYPE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, INDEX_TYPE_BYTES);
+ private static final Cell INDEX_DISABLE_TIMESTAMP_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, INDEX_DISABLE_TIMESTAMP_BYTES);
+ private static final Cell STORE_NULLS_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, STORE_NULLS_BYTES);
+ private static final Cell EMPTY_KEYVALUE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES);
+ private static final Cell BASE_COLUMN_COUNT_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.BASE_COLUMN_COUNT_BYTES);
+ private static final Cell ROW_KEY_ORDER_OPTIMIZABLE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, ROW_KEY_ORDER_OPTIMIZABLE_BYTES);
+ private static final Cell TRANSACTIONAL_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, TRANSACTIONAL_BYTES);
+ private static final Cell UPDATE_CACHE_FREQUENCY_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, UPDATE_CACHE_FREQUENCY_BYTES);
+ private static final Cell IS_NAMESPACE_MAPPED_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY,
TABLE_FAMILY_BYTES, IS_NAMESPACE_MAPPED_BYTES);
- private static final KeyValue AUTO_PARTITION_SEQ_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, AUTO_PARTITION_SEQ_BYTES);
- private static final KeyValue APPEND_ONLY_SCHEMA_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, APPEND_ONLY_SCHEMA_BYTES);
- private static final KeyValue STORAGE_SCHEME_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, STORAGE_SCHEME_BYTES);
- private static final KeyValue ENCODING_SCHEME_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, ENCODING_SCHEME_BYTES);
- private static final KeyValue USE_STATS_FOR_PARALLELIZATION_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, USE_STATS_FOR_PARALLELIZATION_BYTES);
+ private static final Cell AUTO_PARTITION_SEQ_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, AUTO_PARTITION_SEQ_BYTES);
+ private static final Cell APPEND_ONLY_SCHEMA_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, APPEND_ONLY_SCHEMA_BYTES);
+ private static final Cell STORAGE_SCHEME_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, STORAGE_SCHEME_BYTES);
+ private static final Cell ENCODING_SCHEME_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, ENCODING_SCHEME_BYTES);
+ private static final Cell USE_STATS_FOR_PARALLELIZATION_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, USE_STATS_FOR_PARALLELIZATION_BYTES);
- private static final List<KeyValue> TABLE_KV_COLUMNS = Arrays.<KeyValue>asList(
+ private static final List<Cell> TABLE_KV_COLUMNS = Arrays.<Cell>asList(
EMPTY_KEYVALUE_KV,
TABLE_TYPE_KV,
TABLE_SEQ_NUM_KV,
@@ -329,7 +334,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
USE_STATS_FOR_PARALLELIZATION_KV
);
static {
- Collections.sort(TABLE_KV_COLUMNS, KeyValue.COMPARATOR);
+ Collections.sort(TABLE_KV_COLUMNS, CellComparatorImpl.COMPARATOR);
}
private static final int TABLE_TYPE_INDEX = TABLE_KV_COLUMNS.indexOf(TABLE_TYPE_KV);
@@ -361,20 +366,20 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
private static final int USE_STATS_FOR_PARALLELIZATION_INDEX = TABLE_KV_COLUMNS.indexOf(USE_STATS_FOR_PARALLELIZATION_KV);
// KeyValues for Column
- private static final KeyValue DECIMAL_DIGITS_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, DECIMAL_DIGITS_BYTES);
- private static final KeyValue COLUMN_SIZE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, COLUMN_SIZE_BYTES);
- private static final KeyValue NULLABLE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, NULLABLE_BYTES);
- private static final KeyValue DATA_TYPE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, DATA_TYPE_BYTES);
- private static final KeyValue ORDINAL_POSITION_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, ORDINAL_POSITION_BYTES);
- private static final KeyValue SORT_ORDER_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, SORT_ORDER_BYTES);
- private static final KeyValue ARRAY_SIZE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, ARRAY_SIZE_BYTES);
- private static final KeyValue VIEW_CONSTANT_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, VIEW_CONSTANT_BYTES);
- private static final KeyValue IS_VIEW_REFERENCED_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, IS_VIEW_REFERENCED_BYTES);
- private static final KeyValue COLUMN_DEF_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, COLUMN_DEF_BYTES);
- private static final KeyValue IS_ROW_TIMESTAMP_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, IS_ROW_TIMESTAMP_BYTES);
- private static final KeyValue COLUMN_QUALIFIER_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, COLUMN_QUALIFIER_BYTES);
-
- private static final List<KeyValue> COLUMN_KV_COLUMNS = Arrays.<KeyValue>asList(
+ private static final Cell DECIMAL_DIGITS_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, DECIMAL_DIGITS_BYTES);
+ private static final Cell COLUMN_SIZE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, COLUMN_SIZE_BYTES);
+ private static final Cell NULLABLE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, NULLABLE_BYTES);
+ private static final Cell DATA_TYPE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, DATA_TYPE_BYTES);
+ private static final Cell ORDINAL_POSITION_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, ORDINAL_POSITION_BYTES);
+ private static final Cell SORT_ORDER_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, SORT_ORDER_BYTES);
+ private static final Cell ARRAY_SIZE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, ARRAY_SIZE_BYTES);
+ private static final Cell VIEW_CONSTANT_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, VIEW_CONSTANT_BYTES);
+ private static final Cell IS_VIEW_REFERENCED_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, IS_VIEW_REFERENCED_BYTES);
+ private static final Cell COLUMN_DEF_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, COLUMN_DEF_BYTES);
+ private static final Cell IS_ROW_TIMESTAMP_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, IS_ROW_TIMESTAMP_BYTES);
+ private static final Cell COLUMN_QUALIFIER_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, COLUMN_QUALIFIER_BYTES);
+
+ private static final List<Cell> COLUMN_KV_COLUMNS = Arrays.<Cell>asList(
DECIMAL_DIGITS_KV,
COLUMN_SIZE_KV,
NULLABLE_KV,
@@ -390,9 +395,9 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
COLUMN_QUALIFIER_KV
);
static {
- Collections.sort(COLUMN_KV_COLUMNS, KeyValue.COMPARATOR);
+ Collections.sort(COLUMN_KV_COLUMNS, CellComparatorImpl.COMPARATOR);
}
- private static final KeyValue QUALIFIER_COUNTER_KV = KeyValue.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, COLUMN_QUALIFIER_COUNTER_BYTES);
+ private static final Cell QUALIFIER_COUNTER_KV = KeyValueUtil.createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, COLUMN_QUALIFIER_COUNTER_BYTES);
private static final int DECIMAL_DIGITS_INDEX = COLUMN_KV_COLUMNS.indexOf(DECIMAL_DIGITS_KV);
private static final int COLUMN_SIZE_INDEX = COLUMN_KV_COLUMNS.indexOf(COLUMN_SIZE_KV);
private static final int NULLABLE_INDEX = COLUMN_KV_COLUMNS.indexOf(NULLABLE_KV);
@@ -408,18 +413,18 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
private static final int LINK_TYPE_INDEX = 0;
- private static final KeyValue CLASS_NAME_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, CLASS_NAME_BYTES);
- private static final KeyValue JAR_PATH_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, JAR_PATH_BYTES);
- private static final KeyValue RETURN_TYPE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, RETURN_TYPE_BYTES);
- private static final KeyValue NUM_ARGS_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, NUM_ARGS_BYTES);
- private static final KeyValue TYPE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, TYPE_BYTES);
- private static final KeyValue IS_CONSTANT_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, IS_CONSTANT_BYTES);
- private static final KeyValue DEFAULT_VALUE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, DEFAULT_VALUE_BYTES);
- private static final KeyValue MIN_VALUE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, MIN_VALUE_BYTES);
- private static final KeyValue MAX_VALUE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, MAX_VALUE_BYTES);
- private static final KeyValue IS_ARRAY_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, IS_ARRAY_BYTES);
-
- private static final List<KeyValue> FUNCTION_KV_COLUMNS = Arrays.<KeyValue>asList(
+ private static final Cell CLASS_NAME_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, CLASS_NAME_BYTES);
+ private static final Cell JAR_PATH_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, JAR_PATH_BYTES);
+ private static final Cell RETURN_TYPE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, RETURN_TYPE_BYTES);
+ private static final Cell NUM_ARGS_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, NUM_ARGS_BYTES);
+ private static final Cell TYPE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, TYPE_BYTES);
+ private static final Cell IS_CONSTANT_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, IS_CONSTANT_BYTES);
+ private static final Cell DEFAULT_VALUE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, DEFAULT_VALUE_BYTES);
+ private static final Cell MIN_VALUE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, MIN_VALUE_BYTES);
+ private static final Cell MAX_VALUE_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, MAX_VALUE_BYTES);
+ private static final Cell IS_ARRAY_KV = createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, IS_ARRAY_BYTES);
+
+ private static final List<Cell> FUNCTION_KV_COLUMNS = Arrays.<Cell>asList(
EMPTY_KEYVALUE_KV,
CLASS_NAME_KV,
JAR_PATH_KV,
@@ -427,7 +432,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
NUM_ARGS_KV
);
static {
- Collections.sort(FUNCTION_KV_COLUMNS, KeyValue.COMPARATOR);
+ Collections.sort(FUNCTION_KV_COLUMNS, CellComparatorImpl.COMPARATOR);
}
private static final int CLASS_NAME_INDEX = FUNCTION_KV_COLUMNS.indexOf(CLASS_NAME_KV);
@@ -435,7 +440,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
private static final int RETURN_TYPE_INDEX = FUNCTION_KV_COLUMNS.indexOf(RETURN_TYPE_KV);
private static final int NUM_ARGS_INDEX = FUNCTION_KV_COLUMNS.indexOf(NUM_ARGS_KV);
- private static final List<KeyValue> FUNCTION_ARG_KV_COLUMNS = Arrays.<KeyValue>asList(
+ private static final List<Cell> FUNCTION_ARG_KV_COLUMNS = Arrays.<Cell>asList(
TYPE_KV,
IS_ARRAY_KV,
IS_CONSTANT_KV,
@@ -444,7 +449,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
MAX_VALUE_KV
);
static {
- Collections.sort(FUNCTION_ARG_KV_COLUMNS, KeyValue.COMPARATOR);
+ Collections.sort(FUNCTION_ARG_KV_COLUMNS, CellComparatorImpl.COMPARATOR);
}
private static final int IS_ARRAY_INDEX = FUNCTION_ARG_KV_COLUMNS.indexOf(IS_ARRAY_KV);
@@ -718,7 +723,10 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
PInteger.INSTANCE.getCodec().decodeInt(arraySizeKv.getValueArray(), arraySizeKv.getValueOffset(), SortOrder.getDefault());
Cell viewConstantKv = colKeyValues[VIEW_CONSTANT_INDEX];
- byte[] viewConstant = viewConstantKv == null ? null : viewConstantKv.getValue();
+ byte[] viewConstant =
+ viewConstantKv == null ? null : new ImmutableBytesPtr(
+ viewConstantKv.getValueArray(), viewConstantKv.getValueOffset(),
+ viewConstantKv.getValueLength()).copyBytesIfNecessary();
Cell isViewReferencedKv = colKeyValues[IS_VIEW_REFERENCED_INDEX];
boolean isViewReferenced = isViewReferencedKv != null && Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(isViewReferencedKv.getValueArray(), isViewReferencedKv.getValueOffset(), isViewReferencedKv.getValueLength()));
Cell columnDefKv = colKeyValues[COLUMN_DEF_INDEX];
@@ -1360,6 +1368,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
public void createTable(RpcController controller, CreateTableRequest request,
RpcCallback<MetaDataResponse> done) {
MetaDataResponse.Builder builder = MetaDataResponse.newBuilder();
+ CellBuilder cellBuilder = CellBuilderFactory.create(CellBuilderType.DEEP_COPY);
byte[][] rowKeyMetaData = new byte[3][];
byte[] schemaName = null;
byte[] tableName = null;
@@ -1544,10 +1553,14 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
else {
viewStatement = Bytes.toBytes(QueryUtil.getViewStatement(parentTable.getSchemaName().getString(), parentTable.getTableName().getString(), autoPartitionWhere));
}
- Cell viewStatementCell = new KeyValue(cell.getRow(), cell.getFamily(), VIEW_STATEMENT_BYTES,
- cell.getTimestamp(), Type.codeToType(cell.getTypeByte()), viewStatement);
- cells.add(viewStatementCell);
-
+ cellBuilder
+ .clear()
+ .setRow(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())
+ .setFamily(cell.getFamilyArray(), cell.getFamilyOffset(),
+ cell.getFamilyLength()).setQualifier(VIEW_STATEMENT_BYTES)
+ .setTimestamp(cell.getTimestamp()).setType(DataType.Put)
+ .setValue(viewStatement);
+ cells.add(cellBuilder.build());
// set the IS_VIEW_REFERENCED column of the auto partition column row
Put autoPartitionPut = MetaDataUtil.getPutOnlyAutoPartitionColumn(parentTable, tableMetadata);
familyCellMap = autoPartitionPut.getFamilyCellMap();
@@ -1557,9 +1570,14 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
Object val = dataType.toObject(autoPartitionNum, PLong.INSTANCE);
byte[] bytes = new byte [dataType.getByteSize() + 1];
dataType.toBytes(val, bytes, 0);
- Cell viewConstantCell = new KeyValue(cell.getRow(), cell.getFamily(), VIEW_CONSTANT_BYTES,
- cell.getTimestamp(), Type.codeToType(cell.getTypeByte()), bytes);
- cells.add(viewConstantCell);
+ cellBuilder
+ .clear()
+ .setRow(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())
+ .setFamily(cell.getFamilyArray(), cell.getFamilyOffset(),
+ cell.getFamilyLength()).setQualifier(VIEW_CONSTANT_BYTES)
+ .setTimestamp(cell.getTimestamp()).setType(DataType.Put)
+ .setValue(bytes);
+ cells.add(cellBuilder.build());
}
Short indexId = null;
if (request.hasAllocateIndexId() && request.getAllocateIndexId()) {
@@ -1600,9 +1618,15 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
Object val = dataType.toObject(seqValue, PLong.INSTANCE);
byte[] bytes = new byte [dataType.getByteSize() + 1];
dataType.toBytes(val, bytes, 0);
- Cell indexIdCell = new KeyValue(cell.getRow(), cell.getFamily(), VIEW_INDEX_ID_BYTES,
- cell.getTimestamp(), Type.codeToType(cell.getTypeByte()), bytes);
- cells.add(indexIdCell);
+ cellBuilder
+ .clear()
+ .setRow(cell.getRowArray(), cell.getRowOffset(),
+ cell.getRowLength())
+ .setFamily(cell.getFamilyArray(), cell.getFamilyOffset(),
+ cell.getFamilyLength()).setQualifier(VIEW_INDEX_ID_BYTES)
+ .setTimestamp(cell.getTimestamp()).setType(DataType.Put)
+ .setValue(bytes);
+ cells.add(cellBuilder.build());
indexId = (short) seqValue;
}
}
@@ -1845,9 +1869,9 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
List<RowLock> locks = Lists.newArrayList();
try {
- acquireLock(region, lockKey, locks);
+ ServerUtil.acquireLock(region, lockKey, locks);
if (key != lockKey) {
- acquireLock(region, key, locks);
+ ServerUtil.acquireLock(region, key, locks);
}
List<ImmutableBytesPtr> invalidateList = new ArrayList<ImmutableBytesPtr>();
result =
@@ -2303,6 +2327,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
Region region, List<RowLock> locks, int clientVersion) throws IOException, SQLException {
List<PutWithOrdinalPosition> columnPutsForBaseTable = Lists.newArrayListWithExpectedSize(tableMetadata.size());
Map<TableProperty, Cell> tablePropertyCellMap = Maps.newHashMapWithExpectedSize(tableMetadata.size());
+ CellBuilder cellBuilder = CellBuilderFactory.create(CellBuilderType.DEEP_COPY);
// Isolate the puts relevant to adding columns. Also figure out what kind of columns are being added.
for (Mutation m : tableMetadata) {
if (m instanceof Put) {
@@ -2324,9 +2349,21 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
byte[] propNameBytes = Bytes.toBytes(tableProp.getPropertyName());
if (Bytes.compareTo(propNameBytes, 0, propNameBytes.length, cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength())==0
&& tableProp.isValidOnView() && tableProp.isMutable()) {
- Cell tablePropCell = CellUtil.createCell(cell.getRow(), CellUtil.cloneFamily(cell),
- CellUtil.cloneQualifier(cell), cell.getTimestamp(), cell.getTypeByte(),
- CellUtil.cloneValue(cell));
+ Cell tablePropCell =
+ cellBuilder
+ .clear()
+ .setRow(cell.getRowArray(), cell.getRowOffset(),
+ cell.getRowLength())
+ .setFamily(cell.getFamilyArray(),
+ cell.getFamilyOffset(), cell.getFamilyLength())
+ .setQualifier(cell.getQualifierArray(),
+ cell.getQualifierOffset(),
+ cell.getQualifierLength())
+ .setTimestamp(cell.getTimestamp())
+ .setType(DataType.Put)
+ .setValue(cell.getValueArray(),
+ cell.getValueOffset(), cell.getValueLength())
+ .build();
tablePropertyCellMap.put(tableProp, tablePropCell);
}
}
@@ -2432,9 +2469,19 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
// The column doesn't exist in the view.
Put viewColumnPut = new Put(columnKey, clientTimeStamp);
for (Cell cell : baseTableColumnPut.getFamilyCellMap().values().iterator().next()) {
- viewColumnPut.add(CellUtil.createCell(columnKey, CellUtil.cloneFamily(cell),
- CellUtil.cloneQualifier(cell), cell.getTimestamp(), cell.getTypeByte(),
- CellUtil.cloneValue(cell)));
+ Cell newCell =
+ cellBuilder
+ .clear()
+ .setRow(columnKey)
+ .setFamily(cell.getFamilyArray(), cell.getFamilyOffset(),
+ cell.getFamilyLength())
+ .setQualifier(cell.getQualifierArray(),
+ cell.getQualifierOffset(), cell.getQualifierLength())
+ .setTimestamp(cell.getTimestamp())
+ .setType(DataType.Put)
+ .setValue(cell.getValueArray(), cell.getValueOffset(),
+ cell.getValueLength()).build();
+ viewColumnPut.add(newCell);
}
if (isDivergedView(view)) {
if (isPkCol) {
@@ -3556,7 +3603,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
}
if ((currentState == PIndexState.ACTIVE || currentState == PIndexState.PENDING_ACTIVE) && newState == PIndexState.UNUSABLE) {
newState = PIndexState.INACTIVE;
- newKVs.set(indexStateKVIndex, KeyValueUtil.newKeyValue(key, TABLE_FAMILY_BYTES,
+ newKVs.set(indexStateKVIndex, PhoenixKeyValueUtil.newKeyValue(key, TABLE_FAMILY_BYTES,
INDEX_STATE_BYTES, timeStamp, Bytes.toBytes(newState.getSerializedValue())));
} else if ((currentState == PIndexState.INACTIVE || currentState == PIndexState.PENDING_ACTIVE) && newState == PIndexState.USABLE) {
// Don't allow manual state change to USABLE (i.e. ACTIVE) if non zero INDEX_DISABLE_TIMESTAMP
@@ -3565,7 +3612,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
} else {
newState = PIndexState.ACTIVE;
}
- newKVs.set(indexStateKVIndex, KeyValueUtil.newKeyValue(key, TABLE_FAMILY_BYTES,
+ newKVs.set(indexStateKVIndex, PhoenixKeyValueUtil.newKeyValue(key, TABLE_FAMILY_BYTES,
INDEX_STATE_BYTES, timeStamp, Bytes.toBytes(newState.getSerializedValue())));
}