You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by ra...@apache.org on 2017/11/17 14:17:49 UTC
phoenix git commit: PHOENIX-4318 Fix IndexHalfStoreFileReader and
related classes(Rajeshbabu)
Repository: phoenix
Updated Branches:
refs/heads/5.x-HBase-2.0 5bdd3b2d4 -> 8ab7cc142
PHOENIX-4318 Fix IndexHalfStoreFileReader and related classes(Rajeshbabu)
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/8ab7cc14
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/8ab7cc14
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/8ab7cc14
Branch: refs/heads/5.x-HBase-2.0
Commit: 8ab7cc142473e964ee124b5f9163ff341452f6b0
Parents: 5bdd3b2
Author: Rajeshbabu Chintaguntla <ra...@apache.org>
Authored: Fri Nov 17 19:47:09 2017 +0530
Committer: Rajeshbabu Chintaguntla <ra...@apache.org>
Committed: Fri Nov 17 19:47:09 2017 +0530
----------------------------------------------------------------------
.../regionserver/IndexHalfStoreFileReader.java | 23 ++-
.../IndexHalfStoreFileReaderGenerator.java | 193 +++++--------------
.../hbase/regionserver/LocalIndexSplitter.java | 4 +-
.../LocalIndexStoreFileScanner.java | 29 +--
.../hbase/regionserver/ScannerContextUtil.java | 7 +-
.../UngroupedAggregateRegionObserver.java | 1 -
.../java/org/apache/phoenix/util/IndexUtil.java | 8 +-
7 files changed, 94 insertions(+), 171 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/8ab7cc14/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
index 4b6b7e2..8e590f6 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.regionserver;
import java.io.IOException;
import java.util.Map;
+import java.util.concurrent.atomic.AtomicInteger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
@@ -26,6 +27,7 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.KeyValueUtil;
+import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.io.Reference;
@@ -47,7 +49,7 @@ import org.apache.phoenix.index.IndexMaintainer;
* This file is not splitable. Calls to {@link #midkey()} return null.
*/
-public class IndexHalfStoreFileReader extends StoreFile.Reader {
+public class IndexHalfStoreFileReader extends StoreFileReader {
private final boolean top;
// This is the key we split around. Its the first possible entry on a row:
// i.e. empty column and a timestamp of LATEST_TIMESTAMP.
@@ -56,7 +58,7 @@ public class IndexHalfStoreFileReader extends StoreFile.Reader {
private final Map<ImmutableBytesWritable, IndexMaintainer> indexMaintainers;
private final byte[][] viewConstants;
private final int offset;
- private final HRegionInfo regionInfo;
+ private final RegionInfo regionInfo;
private final byte[] regionStartKeyInHFile;
/**
@@ -78,9 +80,10 @@ public class IndexHalfStoreFileReader extends StoreFile.Reader {
final FSDataInputStreamWrapper in, long size, final Reference r,
final Configuration conf,
final Map<ImmutableBytesWritable, IndexMaintainer> indexMaintainers,
- final byte[][] viewConstants, final HRegionInfo regionInfo,
- byte[] regionStartKeyInHFile, byte[] splitKey) throws IOException {
- super(fs, p, in, size, cacheConf, conf);
+ final byte[][] viewConstants, final RegionInfo regionInfo,
+ byte[] regionStartKeyInHFile, byte[] splitKey, boolean primaryReplicaStoreFile) throws IOException {
+ super(fs, p, in, size, cacheConf, primaryReplicaStoreFile, new AtomicInteger(0), false,
+ conf);
this.splitkey = splitKey == null ? r.getSplitKey() : splitKey;
// Is it top or bottom half?
this.top = Reference.isTopFileRegion(r.getFileRegion());
@@ -104,7 +107,7 @@ public class IndexHalfStoreFileReader extends StoreFile.Reader {
return indexMaintainers;
}
- public HRegionInfo getRegionInfo() {
+ public RegionInfo getRegionInfo() {
return regionInfo;
}
@@ -123,4 +126,12 @@ public class IndexHalfStoreFileReader extends StoreFile.Reader {
public boolean isTop() {
return top;
}
+
+ @Override
+ public StoreFileScanner getStoreFileScanner(boolean cacheBlocks, boolean pread,
+ boolean isCompaction, long readPt, long scannerOrder,
+ boolean canOptimizeForNonNullColumn) {
+ return new LocalIndexStoreFileScanner(this, cacheBlocks, pread, isCompaction, readPt,
+ scannerOrder, canOptimizeForNonNullColumn);
+ }
}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/phoenix/blob/8ab7cc14/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
index 6e0bbcb..3cb36ee 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
@@ -19,39 +19,38 @@ package org.apache.hadoop.hbase.regionserver;
import java.io.IOException;
import java.sql.SQLException;
-import java.util.ArrayList;
-import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
-import java.util.NavigableSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.CompareOperator;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
+import org.apache.hadoop.hbase.coprocessor.RegionObserver;
import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
+import org.apache.hadoop.hbase.io.HalfStoreFileReader;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.io.Reference;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
-import org.apache.hadoop.hbase.regionserver.StoreFile.Reader;
+import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
@@ -70,53 +69,50 @@ import org.apache.phoenix.util.RepairUtil;
import com.google.common.collect.Lists;
-public class IndexHalfStoreFileReaderGenerator extends BaseRegionObserver {
+public class IndexHalfStoreFileReaderGenerator implements RegionObserver {
private static final String LOCAL_INDEX_AUTOMATIC_REPAIR = "local.index.automatic.repair";
public static final Log LOG = LogFactory.getLog(IndexHalfStoreFileReaderGenerator.class);
+
@Override
- public Reader preStoreFileReaderOpen(ObserverContext<RegionCoprocessorEnvironment> ctx,
+ public StoreFileReader preStoreFileReaderOpen(ObserverContext<RegionCoprocessorEnvironment> ctx,
FileSystem fs, Path p, FSDataInputStreamWrapper in, long size, CacheConfig cacheConf,
- Reference r, Reader reader) throws IOException {
- TableName tableName = ctx.getEnvironment().getRegion().getTableDesc().getTableName();
+ Reference r, StoreFileReader reader) throws IOException {
+ TableName tableName = ctx.getEnvironment().getRegion().getTableDescriptor().getTableName();
Region region = ctx.getEnvironment().getRegion();
- HRegionInfo childRegion = region.getRegionInfo();
+ RegionInfo childRegion = region.getRegionInfo();
byte[] splitKey = null;
+
if (reader == null && r != null) {
if(!p.toString().contains(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX)) {
- return super.preStoreFileReaderOpen(ctx, fs, p, in, size, cacheConf, r, reader);
- }
- Scan scan = MetaTableAccessor.getScanForTableName(tableName);
- SingleColumnValueFilter scvf = null;
- if (Reference.isTopFileRegion(r.getFileRegion())) {
- scvf = new SingleColumnValueFilter(HConstants.CATALOG_FAMILY,
- HConstants.SPLITB_QUALIFIER, CompareOp.EQUAL, region.getRegionInfo().toByteArray());
- scvf.setFilterIfMissing(true);
- } else {
- scvf = new SingleColumnValueFilter(HConstants.CATALOG_FAMILY,
- HConstants.SPLITA_QUALIFIER, CompareOp.EQUAL, region.getRegionInfo().toByteArray());
- scvf.setFilterIfMissing(true);
+ return reader;
}
- if(scvf != null) scan.setFilter(scvf);
- byte[] regionStartKeyInHFile = null;
- Connection connection = ctx.getEnvironment().getConnection();
- Table metaTable = null;
PhoenixConnection conn = null;
- try {
- metaTable = connection.getTable(TableName.META_TABLE_NAME);
- ResultScanner scanner = null;
+ Table metaTable = null;
+ byte[] regionStartKeyInHFile = null;
+ try (Connection hbaseConn =
+ ConnectionFactory.createConnection(ctx.getEnvironment().getConfiguration())) {
+ Scan scan = MetaTableAccessor.getScanForTableName(hbaseConn, tableName);
+ SingleColumnValueFilter scvf = null;
+ if (Reference.isTopFileRegion(r.getFileRegion())) {
+ scvf = new SingleColumnValueFilter(HConstants.CATALOG_FAMILY,
+ HConstants.SPLITB_QUALIFIER, CompareOperator.EQUAL, ((HRegionInfo)region.getRegionInfo()).toByteArray());
+ scvf.setFilterIfMissing(true);
+ } else {
+ scvf = new SingleColumnValueFilter(HConstants.CATALOG_FAMILY,
+ HConstants.SPLITA_QUALIFIER, CompareOperator.EQUAL, ((HRegionInfo)region.getRegionInfo()).toByteArray());
+ scvf.setFilterIfMissing(true);
+ }
+ if(scvf != null) scan.setFilter(scvf);
+ metaTable = hbaseConn.getTable(TableName.META_TABLE_NAME);
Result result = null;
- try {
- scanner = metaTable.getScanner(scan);
+ try (ResultScanner scanner = metaTable.getScanner(scan)) {
result = scanner.next();
- } finally {
- if(scanner != null) scanner.close();
}
if (result == null || result.isEmpty()) {
- Pair<HRegionInfo, HRegionInfo> mergeRegions =
- MetaTableAccessor.getRegionsFromMergeQualifier(ctx.getEnvironment()
- .getRegionServerServices().getConnection(),
+ Pair<RegionInfo, RegionInfo> mergeRegions =
+ MetaTableAccessor.getRegionsFromMergeQualifier(ctx.getEnvironment().getConnection(),
region.getRegionInfo().getRegionName());
if (mergeRegions == null || mergeRegions.getFirst() == null) return reader;
byte[] splitRow =
@@ -143,7 +139,7 @@ public class IndexHalfStoreFileReaderGenerator extends BaseRegionObserver {
new byte[region.getRegionInfo().getEndKey().length] :
region.getRegionInfo().getStartKey()).getKey();
} else {
- HRegionInfo parentRegion = HRegionInfo.getHRegionInfo(result);
+ RegionInfo parentRegion = MetaTableAccessor.getRegionInfo(result);
regionStartKeyInHFile =
parentRegion.getStartKey().length == 0 ? new byte[parentRegion
.getEndKey().length] : parentRegion.getStartKey();
@@ -154,7 +150,9 @@ public class IndexHalfStoreFileReaderGenerator extends BaseRegionObserver {
try {
conn = QueryUtil.getConnectionOnServer(ctx.getEnvironment().getConfiguration()).unwrap(
PhoenixConnection.class);
- PTable dataTable = IndexUtil.getPDataTable(conn, ctx.getEnvironment().getRegion().getTableDesc());
+ PTable dataTable =
+ IndexUtil.getPDataTable(conn, ctx.getEnvironment().getRegion()
+ .getTableDescriptor());
List<PTable> indexes = dataTable.getIndexes();
Map<ImmutableBytesWritable, IndexMaintainer> indexMaintainers =
new HashMap<ImmutableBytesWritable, IndexMaintainer>();
@@ -170,7 +168,8 @@ public class IndexHalfStoreFileReaderGenerator extends BaseRegionObserver {
byte[][] viewConstants = getViewConstants(dataTable);
return new IndexHalfStoreFileReader(fs, p, cacheConf, in, size, r, ctx
.getEnvironment().getConfiguration(), indexMaintainers, viewConstants,
- childRegion, regionStartKeyInHFile, splitKey);
+ childRegion, regionStartKeyInHFile, splitKey,
+ childRegion.getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID);
} catch (ClassNotFoundException e) {
throw new IOException(e);
} catch (SQLException e) {
@@ -188,19 +187,12 @@ public class IndexHalfStoreFileReaderGenerator extends BaseRegionObserver {
return reader;
}
- @SuppressWarnings("deprecation")
@Override
- public InternalScanner preCompactScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c,
- Store store, List<? extends KeyValueScanner> scanners, ScanType scanType,
- long earliestPutTs, InternalScanner s, CompactionRequest request) throws IOException {
+ public InternalScanner preCompact(ObserverContext<RegionCoprocessorEnvironment> c, Store store,
+ InternalScanner s, ScanType scanType, CompactionLifeCycleTracker tracker,
+ CompactionRequest request) throws IOException {
+
if (!IndexUtil.isLocalIndexStore(store)) { return s; }
- Scan scan = null;
- if (s!=null) {
- scan = ((StoreScanner)s).scan;
- } else {
- scan = new Scan();
- scan.setMaxVersions(store.getFamily().getMaxVersions());
- }
if (!store.hasReferences()) {
InternalScanner repairScanner = null;
if (request.isMajor() && (!RepairUtil.isLocalIndexStoreFilesConsistent(c.getEnvironment(), store))) {
@@ -218,20 +210,7 @@ public class IndexHalfStoreFileReaderGenerator extends BaseRegionObserver {
return s;
}
}
- List<StoreFileScanner> newScanners = new ArrayList<StoreFileScanner>(scanners.size());
- boolean scanUsePread = c.getEnvironment().getConfiguration().getBoolean("hbase.storescanner.use.pread", scan.isSmall());
- for(KeyValueScanner scanner: scanners) {
- Reader reader = ((StoreFileScanner) scanner).getReader();
- if (reader instanceof IndexHalfStoreFileReader) {
- newScanners.add(new LocalIndexStoreFileScanner(reader, reader.getScanner(
- scan.getCacheBlocks(), scanUsePread, false), true, reader.getHFileReader()
- .hasMVCCInfo(), store.getSmallestReadPoint()));
- } else {
- newScanners.add(((StoreFileScanner) scanner));
- }
- }
- return new StoreScanner(store, store.getScanInfo(), scan, newScanners,
- scanType, store.getSmallestReadPoint(), earliestPutTs);
+ return s;
}
private byte[][] getViewConstants(PTable dataTable) {
@@ -278,16 +257,16 @@ public class IndexHalfStoreFileReaderGenerator extends BaseRegionObserver {
private InternalScanner getRepairScanner(RegionCoprocessorEnvironment env, Store store) throws IOException {
//List<KeyValueScanner> scannersForStoreFiles = Lists.newArrayListWithExpectedSize(store.getStorefilesCount());
Scan scan = new Scan();
- scan.setMaxVersions(store.getFamily().getMaxVersions());
+ scan.readVersions(store.getColumnFamilyDescriptor().getMaxVersions());
for (Store s : env.getRegion().getStores()) {
if (!IndexUtil.isLocalIndexStore(s)) {
- scan.addFamily(s.getFamily().getName());
+ scan.addFamily(s.getColumnFamilyDescriptor().getName());
}
}
try {
PhoenixConnection conn = QueryUtil.getConnectionOnServer(env.getConfiguration())
.unwrap(PhoenixConnection.class);
- PTable dataPTable = IndexUtil.getPDataTable(conn, env.getRegion().getTableDesc());
+ PTable dataPTable = IndexUtil.getPDataTable(conn, env.getRegion().getTableDescriptor());
final List<IndexMaintainer> maintainers = Lists
.newArrayListWithExpectedSize(dataPTable.getIndexes().size());
for (PTable index : dataPTable.getIndexes()) {
@@ -296,7 +275,7 @@ public class IndexHalfStoreFileReaderGenerator extends BaseRegionObserver {
}
}
return new DataTableLocalIndexRegionScanner(env.getRegion().getScanner(scan), env.getRegion(),
- maintainers, store.getFamily().getName(),env.getConfiguration());
+ maintainers, store.getColumnFamilyDescriptor().getName(),env.getConfiguration());
} catch (ClassNotFoundException | SQLException e) {
@@ -304,78 +283,4 @@ public class IndexHalfStoreFileReaderGenerator extends BaseRegionObserver {
}
}
-
- @Override
- public KeyValueScanner preStoreScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
- final Store store, final Scan scan, final NavigableSet<byte[]> targetCols,
- final KeyValueScanner s) throws IOException {
- if (store.getFamily().getNameAsString()
- .startsWith(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX)
- && store.hasReferences()) {
- final long readPt = c.getEnvironment().getRegion().getReadpoint(scan.getIsolationLevel
- ());
- if (!scan.isReversed()) {
- return new StoreScanner(store, store.getScanInfo(), scan,
- targetCols, readPt) {
-
- @Override
- protected List<KeyValueScanner> getScannersNoCompaction() throws IOException {
- if (store.hasReferences()) {
- return getLocalIndexScanners(c, store, scan, readPt);
- } else {
- return super.getScannersNoCompaction();
- }
- }
- };
- } else {
- return new ReversedStoreScanner(store, store.getScanInfo(), scan,
- targetCols, readPt) {
- @Override
- protected List<KeyValueScanner> getScannersNoCompaction() throws IOException {
- if (store.hasReferences()) {
- return getLocalIndexScanners(c, store, scan, readPt);
- } else {
- return super.getScannersNoCompaction();
- }
- }
- };
- }
- }
- return s;
- }
-
- private List<KeyValueScanner> getLocalIndexScanners(final
- ObserverContext<RegionCoprocessorEnvironment> c,
- final Store store, final Scan scan, final long readPt) throws IOException {
-
- boolean scanUsePread = c.getEnvironment().getConfiguration().getBoolean("hbase.storescanner.use.pread", scan.isSmall());
- Collection<StoreFile> storeFiles = store.getStorefiles();
- List<StoreFile> nonReferenceStoreFiles = new ArrayList<>(store.getStorefiles().size());
- List<StoreFile> referenceStoreFiles = new ArrayList<>(store.getStorefiles().size
- ());
- final List<KeyValueScanner> keyValueScanners = new ArrayList<>(store
- .getStorefiles().size() + 1);
- for (StoreFile storeFile : storeFiles) {
- if (storeFile.isReference()) {
- referenceStoreFiles.add(storeFile);
- } else {
- nonReferenceStoreFiles.add(storeFile);
- }
- }
- final List<StoreFileScanner> scanners = StoreFileScanner.getScannersForStoreFiles(nonReferenceStoreFiles, scan.getCacheBlocks(), scanUsePread, readPt);
- keyValueScanners.addAll(scanners);
- for (StoreFile sf : referenceStoreFiles) {
- if (sf.getReader() instanceof IndexHalfStoreFileReader) {
- keyValueScanners.add(new LocalIndexStoreFileScanner(sf.getReader(), sf.getReader()
- .getScanner(scan.getCacheBlocks(), scanUsePread, false), true, sf
- .getReader().getHFileReader().hasMVCCInfo(), readPt));
- } else {
- keyValueScanners.add(new StoreFileScanner(sf.getReader(), sf.getReader()
- .getScanner(scan.getCacheBlocks(), scanUsePread, false), true, sf
- .getReader().getHFileReader().hasMVCCInfo(), readPt));
- }
- }
- keyValueScanners.addAll(((HStore) store).memstore.getScanners(readPt));
- return keyValueScanners;
- }
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/8ab7cc14/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexSplitter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexSplitter.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexSplitter.java
index c60058c..38bc8c5 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexSplitter.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexSplitter.java
@@ -17,8 +17,8 @@
*/
package org.apache.hadoop.hbase.regionserver;
-import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
+import org.apache.hadoop.hbase.coprocessor.RegionObserver;
-public class LocalIndexSplitter extends BaseRegionObserver {
+public class LocalIndexSplitter implements RegionObserver {
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/8ab7cc14/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexStoreFileScanner.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexStoreFileScanner.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexStoreFileScanner.java
index 3b36a7e..c8ec499 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexStoreFileScanner.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexStoreFileScanner.java
@@ -19,13 +19,14 @@ package org.apache.hadoop.hbase.regionserver;
import java.io.IOException;
import java.util.Map.Entry;
+import java.util.Optional;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValue.Type;
+import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
-import org.apache.hadoop.hbase.regionserver.StoreFile.Reader;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.index.IndexMaintainer;
import org.apache.phoenix.util.PhoenixKeyValueUtil;
@@ -36,10 +37,13 @@ public class LocalIndexStoreFileScanner extends StoreFileScanner{
private IndexHalfStoreFileReader reader;
private boolean changeBottomKeys;
- public LocalIndexStoreFileScanner(Reader reader, HFileScanner hfs, boolean useMVCC,
- boolean hasMVCC, long readPt) {
- super(reader, hfs, useMVCC, hasMVCC, readPt);
- this.reader = ((IndexHalfStoreFileReader)super.getReader());
+ @SuppressWarnings("deprecation")
+ public LocalIndexStoreFileScanner(IndexHalfStoreFileReader reader, boolean cacheBlocks, boolean pread,
+ boolean isCompaction, long readPt, long scannerOrder,
+ boolean canOptimizeForNonNullColumn) {
+ super(reader, reader.getScanner(cacheBlocks, pread, isCompaction), true, reader
+ .getHFileReader().hasMVCCInfo(), readPt, scannerOrder, canOptimizeForNonNullColumn);
+ this.reader = reader;
this.changeBottomKeys =
this.reader.getRegionInfo().getStartKey().length == 0
&& this.reader.getSplitRow().length != this.reader.getOffset();
@@ -114,13 +118,14 @@ public class LocalIndexStoreFileScanner extends StoreFileScanner{
public boolean seekToPreviousRow(Cell key) throws IOException {
KeyValue kv = PhoenixKeyValueUtil.maybeCopyCell(key);
if (reader.isTop()) {
- byte[] fk = reader.getFirstKey();
+ Optional<Cell> firstKey = reader.getFirstKey();
// This will be null when the file is empty in which we can not seekBefore to
// any key
- if (fk == null) {
+ if (firstKey.isPresent()) {
return false;
}
- if (getComparator().compare(kv.getRowArray(), kv.getKeyOffset(), kv.getKeyLength(), fk, 0, fk.length) <= 0) {
+ byte[] fk = PhoenixKeyValueUtil.maybeCopyCell(firstKey.get()).getKey();
+ if (getComparator().compare(kv, firstKey.get()) <= 0) {
return super.seekToPreviousRow(key);
}
KeyValue replacedKey = getKeyPresentInHFiles(kv.getRowArray());
@@ -132,7 +137,8 @@ public class LocalIndexStoreFileScanner extends StoreFileScanner{
} else {
// The equals sign isn't strictly necessary just here to be consistent with
// seekTo
- if (getComparator().compare(kv.getRowArray(), kv.getKeyOffset(), kv.getKeyLength(), reader.getSplitkey(), 0, reader.getSplitkey().length) >= 0) {
+ KeyValue splitKeyValue = KeyValueUtil.createKeyValueFromKey(reader.getSplitkey());
+ if (getComparator().compare(kv, splitKeyValue) >= 0) {
boolean seekToPreviousRow = super.seekToPreviousRow(kv);
while(super.peek()!=null && !isSatisfiedMidKeyCondition(super.peek())) {
seekToPreviousRow = super.seekToPreviousRow(super.peek());
@@ -221,8 +227,9 @@ public class LocalIndexStoreFileScanner extends StoreFileScanner{
public boolean seekOrReseek(Cell cell, boolean isSeek) throws IOException{
KeyValue kv = PhoenixKeyValueUtil.maybeCopyCell(cell);
KeyValue keyToSeek = kv;
+ KeyValue splitKeyValue = KeyValueUtil.createKeyValueFromKey(reader.getSplitkey());
if (reader.isTop()) {
- if(getComparator().compare(kv.getRowArray(), kv.getKeyOffset(), kv.getKeyLength(), reader.getSplitkey(), 0, reader.getSplitkey().length) < 0){
+ if(getComparator().compare(kv, splitKeyValue) < 0){
if(!isSeek && realSeekDone()) {
return true;
}
@@ -231,7 +238,7 @@ public class LocalIndexStoreFileScanner extends StoreFileScanner{
keyToSeek = getKeyPresentInHFiles(kv.getRowArray());
return seekOrReseekToProperKey(isSeek, keyToSeek);
} else {
- if (getComparator().compare(kv.getRowArray(), kv.getKeyOffset(), kv.getKeyLength(), reader.getSplitkey(), 0, reader.getSplitkey().length) >= 0) {
+ if (getComparator().compare(kv, splitKeyValue) >= 0) {
close();
return false;
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/8ab7cc14/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContextUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContextUtil.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContextUtil.java
index 126e0b1..040b98b 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContextUtil.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContextUtil.java
@@ -19,11 +19,11 @@
package org.apache.hadoop.hbase.regionserver;
+import java.util.List;
+
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
-import java.util.List;
-
/**
* @ScannerContext has all methods package visible. To properly update the context progress for our scanners we
* need this helper
@@ -31,7 +31,8 @@ import java.util.List;
public class ScannerContextUtil {
public static void incrementSizeProgress(ScannerContext sc, List<Cell> cells) {
for (Cell cell : cells) {
- sc.incrementSizeProgress(CellUtil.estimatedHeapSizeOfWithoutTags(cell));
+ sc.incrementSizeProgress(CellUtil.estimatedSerializedSizeOf(cell),
+ CellUtil.estimatedHeapSizeOf(cell));
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/8ab7cc14/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index 332ecf2..4e4e0f5 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -1325,7 +1325,6 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
}
}
-
/*
* TODO: use waitForFlushes PHOENIX-4352
*/
http://git-wip-us.apache.org/repos/asf/phoenix/blob/8ab7cc14/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
index c26d2cb..90760bc 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
@@ -39,7 +39,6 @@ import java.util.Map;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionLocation;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Delete;
@@ -50,6 +49,7 @@ import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.coprocessor.Batch;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
@@ -541,7 +541,7 @@ public class IndexUtil {
} else {
TableName dataTable =
TableName.valueOf(MetaDataUtil.getLocalIndexUserTableName(
- environment.getRegion().getTableDesc().getTableName().getNameAsString()));
+ environment.getRegion().getTableDescriptor().getTableName().getNameAsString()));
Table table = null;
try {
table = environment.getConnection().getTable(dataTable);
@@ -749,10 +749,10 @@ public class IndexUtil {
}
public static boolean isLocalIndexStore(Store store) {
- return store.getFamily().getNameAsString().startsWith(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX);
+ return store.getColumnFamilyDescriptor().getNameAsString().startsWith(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX);
}
- public static PTable getPDataTable(Connection conn, HTableDescriptor tableDesc) throws SQLException {
+ public static PTable getPDataTable(Connection conn, TableDescriptor tableDesc) throws SQLException {
String dataTableName = Bytes.toString(tableDesc.getValue(MetaDataUtil.DATA_TABLE_NAME_PROP_BYTES));
String physicalTableName = tableDesc.getTableName().getNameAsString();
PTable pDataTable = null;