You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2020/01/27 04:52:28 UTC
[hbase] branch branch-2 updated: Revert "HBASE-23705 Add
CellComparator to HFileContext (#1062)"
This is an automated email from the ASF dual-hosted git repository.
stack pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git
The following commit(s) were added to refs/heads/branch-2 by this push:
new f4b0ad9 Revert "HBASE-23705 Add CellComparator to HFileContext (#1062)"
f4b0ad9 is described below
commit f4b0ad9f6af455ee889560acb79a0e7c68ab1e2b
Author: stack <st...@apache.org>
AuthorDate: Sun Jan 26 20:49:03 2020 -0800
Revert "HBASE-23705 Add CellComparator to HFileContext (#1062)"
This reverts commit f817293fb4328922462a1ef25d56cdab425853e1.
---
.../org/apache/hadoop/hbase/CellComparator.java | 24 +--
.../apache/hadoop/hbase/CellComparatorImpl.java | 43 +----
.../java/org/apache/hadoop/hbase/TableName.java | 3 -
.../io/encoding/AbstractDataBlockEncoder.java | 15 +-
.../io/encoding/BufferedDataBlockEncoder.java | 15 +-
.../hbase/io/encoding/CopyKeyDataBlockEncoder.java | 10 +-
.../hadoop/hbase/io/encoding/DataBlockEncoder.java | 19 ++-
.../hbase/io/encoding/DiffKeyDeltaEncoder.java | 7 +-
.../hbase/io/encoding/FastDiffDeltaEncoder.java | 7 +-
.../io/encoding/HFileBlockDecodingContext.java | 5 +-
.../encoding/HFileBlockDefaultDecodingContext.java | 6 +-
.../encoding/HFileBlockDefaultEncodingContext.java | 13 +-
.../io/encoding/HFileBlockEncodingContext.java | 5 +
.../hbase/io/encoding/PrefixKeyDeltaEncoder.java | 7 +-
.../hadoop/hbase/io/encoding/RowIndexCodecV1.java | 15 +-
.../hbase/io/encoding/RowIndexEncoderV1.java | 6 +-
.../hadoop/hbase/io/encoding/RowIndexSeekerV1.java | 30 +++-
.../apache/hadoop/hbase/io/hfile/HFileContext.java | 31 ++--
.../hadoop/hbase/io/hfile/HFileContextBuilder.java | 10 +-
.../hadoop/hbase/mapreduce/HFileOutputFormat2.java | 178 ++++++++++++---------
.../hadoop/hbase/io/hfile/FixedFileTrailer.java | 167 +++++++++----------
.../org/apache/hadoop/hbase/io/hfile/HFile.java | 62 +++----
.../apache/hadoop/hbase/io/hfile/HFileBlock.java | 60 ++++---
.../apache/hadoop/hbase/io/hfile/HFileInfo.java | 16 +-
.../hadoop/hbase/io/hfile/HFileReaderImpl.java | 56 ++++---
.../hadoop/hbase/io/hfile/HFileWriterImpl.java | 70 ++++----
.../java/org/apache/hadoop/hbase/mob/MobUtils.java | 26 ++-
.../apache/hadoop/hbase/regionserver/HStore.java | 88 +++++-----
.../hadoop/hbase/regionserver/StoreFileWriter.java | 42 +++--
.../wal/BoundedRecoveredHFilesOutputSink.java | 10 +-
.../hadoop/hbase/HFilePerformanceEvaluation.java | 1 +
.../hbase/io/encoding/TestDataBlockEncoders.java | 14 +-
.../io/encoding/TestSeekToBlockWithEncoders.java | 5 +-
.../hadoop/hbase/io/hfile/TestCacheOnWrite.java | 3 +-
.../hbase/io/hfile/TestFixedFileTrailer.java | 4 -
.../apache/hadoop/hbase/io/hfile/TestHFile.java | 6 +-
.../hadoop/hbase/io/hfile/TestHFileReaderImpl.java | 6 +-
.../hfile/TestHFileScannerImplReferenceCount.java | 3 +-
.../hadoop/hbase/io/hfile/TestHFileSeek.java | 2 +
.../hadoop/hbase/io/hfile/TestHFileWriterV3.java | 2 +
.../apache/hadoop/hbase/io/hfile/TestPrefetch.java | 2 +
.../apache/hadoop/hbase/io/hfile/TestReseekTo.java | 3 +
.../apache/hadoop/hbase/io/hfile/TestSeekTo.java | 4 +-
.../master/procedure/TestIgnoreUnknownFamily.java | 4 +-
.../hadoop/hbase/regionserver/TestBulkLoad.java | 4 +-
.../regionserver/TestBulkLoadReplication.java | 2 +-
.../regionserver/TestGetClosestAtOrBefore.java | 24 ++-
.../regionserver/TestHRegionReplayEvents.java | 4 +-
.../regionserver/TestScannerWithBulkload.java | 3 +-
.../regionserver/TestStoreScannerClosure.java | 3 +-
50 files changed, 623 insertions(+), 522 deletions(-)
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java
index 474f772..83a868d 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java
@@ -17,9 +17,8 @@
*/
package org.apache.hadoop.hbase;
-import java.nio.ByteBuffer;
import java.util.Comparator;
-import org.apache.hadoop.hbase.util.ByteBufferUtils;
+
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.yetus.audience.InterfaceStability;
@@ -32,12 +31,11 @@ import org.apache.yetus.audience.InterfaceStability;
public interface CellComparator extends Comparator<Cell> {
/**
* A comparator for ordering cells in user-space tables. Useful when writing cells in sorted
- * order as necessary for bulk import (i.e. via MapReduce).
+ * order as necessary for bulk import (i.e. via MapReduce)
* <p>
* CAUTION: This comparator may provide inaccurate ordering for cells from system tables,
* and should not be relied upon in that case.
*/
- // For internal use, see CellComparatorImpl utility methods.
static CellComparator getInstance() {
return CellComparatorImpl.COMPARATOR;
}
@@ -83,24 +81,6 @@ public interface CellComparator extends Comparator<Cell> {
int compareRows(Cell cell, byte[] bytes, int offset, int length);
/**
- * @param row ByteBuffer that wraps a row; will read from current position and will reading all
- * remaining; will not disturb the ByteBuffer internal state.
- * @return greater than 0 if leftCell is bigger, less than 0 if rightCell is bigger, 0 if both
- * cells are equal
- */
- default int compareRows(ByteBuffer row, Cell cell) {
- if (cell instanceof ByteBufferExtendedCell) {
- return ByteBufferUtils.compareTo(row, row.position(), row.remaining(),
- ((ByteBufferExtendedCell) cell).getRowByteBuffer(),
- ((ByteBufferExtendedCell) cell).getRowPosition(),
- cell.getRowLength());
- }
- return ByteBufferUtils.compareTo(row, row.position(), row.remaining(),
- cell.getRowArray(), cell.getRowOffset(),
- cell.getRowLength());
- }
-
- /**
* Lexographically compares the two cells excluding the row part. It compares family, qualifier,
* timestamp and the type
* @param leftCell the left hand side cell
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java
index e6c8e3d..c647318 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java
@@ -17,8 +17,8 @@
*/
package org.apache.hadoop.hbase;
-import java.nio.ByteBuffer;
import java.util.Comparator;
+
import org.apache.hadoop.hbase.KeyValue.Type;
import org.apache.hadoop.hbase.util.ByteBufferUtils;
import org.apache.hadoop.hbase.util.Bytes;
@@ -26,6 +26,7 @@ import org.apache.yetus.audience.InterfaceAudience;
import org.apache.yetus.audience.InterfaceStability;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+
import org.apache.hbase.thirdparty.com.google.common.primitives.Longs;
/**
@@ -377,26 +378,6 @@ public class CellComparatorImpl implements CellComparator {
}
@Override
- public int compareRows(ByteBuffer row, Cell cell) {
- byte [] array;
- int offset;
- int len = row.remaining();
- if (row.hasArray()) {
- array = row.array();
- offset = row.position() + row.arrayOffset();
- } else {
- // We copy the row array if offheap just so we can do a compare. We do this elsewhere too
- // in BBUtils when Cell is backed by an offheap ByteBuffer. Needs fixing so no copy. TODO.
- array = new byte[len];
- offset = 0;
- ByteBufferUtils.copyFromBufferToArray(array, row, row.position(),
- 0, len);
- }
- // Reverse result since we swap the order of the params we pass below.
- return -compareRows(cell, array, offset, len);
- }
-
- @Override
public Comparator getSimpleComparator() {
return this;
}
@@ -406,24 +387,4 @@ public class CellComparatorImpl implements CellComparator {
public Comparator getSimpleComparator() {
return new BBKVComparator(this);
}
-
- /**
- * Utility method that makes a guess at comparator to use based off passed tableName.
- * Use in extreme when no comparator specified.
- * @return CellComparator to use going off the {@code tableName} passed.
- */
- public static CellComparator getCellComparator(TableName tableName) {
- return getCellComparator(tableName.toBytes());
- }
-
- /**
- * Utility method that makes a guess at comparator to use based off passed tableName.
- * Use in extreme when no comparator specified.
- * @return CellComparator to use going off the {@code tableName} passed.
- */
- public static CellComparator getCellComparator(byte [] tableName) {
- // FYI, TableName.toBytes does not create an array; just returns existing array pointer.
- return Bytes.equals(tableName, TableName.META_TABLE_NAME.toBytes())?
- CellComparatorImpl.META_COMPARATOR: CellComparatorImpl.COMPARATOR;
- }
}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java
index b1205e0..e6cabbc 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java
@@ -286,9 +286,6 @@ public final class TableName implements Comparable<TableName> {
return qualifierAsString;
}
- /**
- * @return A pointer to TableName as String bytes.
- */
public byte[] toBytes() {
return name;
}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/AbstractDataBlockEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/AbstractDataBlockEncoder.java
index e96b800..ab95717 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/AbstractDataBlockEncoder.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/AbstractDataBlockEncoder.java
@@ -18,8 +18,10 @@ package org.apache.hadoop.hbase.io.encoding;
import java.io.IOException;
import java.nio.ByteBuffer;
+
import org.apache.hadoop.hbase.ByteBufferKeyOnlyKeyValue;
import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.io.hfile.BlockType;
import org.apache.hadoop.hbase.io.hfile.HFileContext;
@@ -57,13 +59,14 @@ public abstract class AbstractDataBlockEncoder implements DataBlockEncoder {
}
}
- /**
- * Decorates EncodedSeeker with a {@link HFileBlockDecodingContext}
- */
- protected abstract static class AbstractEncodedSeeker implements EncodedSeeker {
+ protected abstract static class AbstractEncodedSeeker implements
+ EncodedSeeker {
protected HFileBlockDecodingContext decodingCtx;
+ protected final CellComparator comparator;
- public AbstractEncodedSeeker(HFileBlockDecodingContext decodingCtx) {
+ public AbstractEncodedSeeker(CellComparator comparator,
+ HFileBlockDecodingContext decodingCtx) {
+ this.comparator = comparator;
this.decodingCtx = decodingCtx;
}
@@ -74,5 +77,7 @@ public abstract class AbstractDataBlockEncoder implements DataBlockEncoder {
protected boolean includesTags() {
return this.decodingCtx.getHFileContext().isIncludesTags();
}
+
}
+
}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java
index 46034bf..7fe2b71 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java
@@ -732,8 +732,9 @@ abstract class BufferedDataBlockEncoder extends AbstractDataBlockEncoder {
protected final ObjectIntPair<ByteBuffer> tmpPair = new ObjectIntPair<>();
protected STATE current, previous;
- public BufferedEncodedSeeker(HFileBlockDecodingContext decodingCtx) {
- super(decodingCtx);
+ public BufferedEncodedSeeker(CellComparator comparator,
+ HFileBlockDecodingContext decodingCtx) {
+ super(comparator, decodingCtx);
if (decodingCtx.getHFileContext().isCompressTags()) {
try {
tagCompressionContext = new TagCompressionContext(LRUDictionary.class, Byte.MAX_VALUE);
@@ -1007,7 +1008,11 @@ abstract class BufferedDataBlockEncoder extends AbstractDataBlockEncoder {
}
/**
+ * @param cell
+ * @param out
+ * @param encodingCtx
* @return unencoded size added
+ * @throws IOException
*/
protected final int afterEncodingKeyValue(Cell cell, DataOutputStream out,
HFileBlockDefaultEncodingContext encodingCtx) throws IOException {
@@ -1097,7 +1102,7 @@ abstract class BufferedDataBlockEncoder extends AbstractDataBlockEncoder {
public void startBlockEncoding(HFileBlockEncodingContext blkEncodingCtx, DataOutputStream out)
throws IOException {
if (blkEncodingCtx.getClass() != HFileBlockDefaultEncodingContext.class) {
- throw new IOException(this.getClass().getName() + " only accepts "
+ throw new IOException (this.getClass().getName() + " only accepts "
+ HFileBlockDefaultEncodingContext.class.getName() + " as the " +
"encoding context.");
}
@@ -1149,8 +1154,8 @@ abstract class BufferedDataBlockEncoder extends AbstractDataBlockEncoder {
.getEncodingState();
// Write the unencodedDataSizeWritten (with header size)
Bytes.putInt(uncompressedBytesWithHeader,
- HConstants.HFILEBLOCK_HEADER_SIZE + DataBlockEncoding.ID_SIZE,
- state.unencodedDataSizeWritten);
+ HConstants.HFILEBLOCK_HEADER_SIZE + DataBlockEncoding.ID_SIZE, state.unencodedDataSizeWritten
+ );
postEncoding(encodingCtx);
}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/CopyKeyDataBlockEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/CopyKeyDataBlockEncoder.java
index 02bb843..d7ab009 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/CopyKeyDataBlockEncoder.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/CopyKeyDataBlockEncoder.java
@@ -20,7 +20,9 @@ import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
+
import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.nio.ByteBuff;
import org.apache.hadoop.hbase.util.ByteBufferUtils;
import org.apache.hadoop.hbase.util.Bytes;
@@ -46,8 +48,7 @@ public class CopyKeyDataBlockEncoder extends BufferedDataBlockEncoder {
+ "encoding context.");
}
- HFileBlockDefaultEncodingContext encodingCtx =
- (HFileBlockDefaultEncodingContext) blkEncodingCtx;
+ HFileBlockDefaultEncodingContext encodingCtx = (HFileBlockDefaultEncodingContext) blkEncodingCtx;
encodingCtx.prepareEncoding(out);
NoneEncoder encoder = new NoneEncoder(out, encodingCtx);
@@ -80,8 +81,9 @@ public class CopyKeyDataBlockEncoder extends BufferedDataBlockEncoder {
}
@Override
- public EncodedSeeker createSeeker(final HFileBlockDecodingContext decodingCtx) {
- return new BufferedEncodedSeeker<SeekerState>(decodingCtx) {
+ public EncodedSeeker createSeeker(CellComparator comparator,
+ final HFileBlockDecodingContext decodingCtx) {
+ return new BufferedEncodedSeeker<SeekerState>(comparator, decodingCtx) {
@Override
protected void decodeNext() {
current.keyLength = currentBuffer.getInt();
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java
index d3c41fb..e6f339d 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java
@@ -20,6 +20,7 @@ import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
+
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.io.hfile.HFileContext;
@@ -44,13 +45,20 @@ public interface DataBlockEncoder {
* Starts encoding for a block of KeyValues. Call
* {@link #endBlockEncoding(HFileBlockEncodingContext, DataOutputStream, byte[])} to finish
* encoding of a block.
+ * @param encodingCtx
+ * @param out
+ * @throws IOException
*/
void startBlockEncoding(HFileBlockEncodingContext encodingCtx, DataOutputStream out)
throws IOException;
/**
* Encodes a KeyValue.
+ * @param cell
+ * @param encodingCtx
+ * @param out
* @return unencoded kv size written
+ * @throws IOException
*/
int encode(Cell cell, HFileBlockEncodingContext encodingCtx, DataOutputStream out)
throws IOException;
@@ -58,6 +66,10 @@ public interface DataBlockEncoder {
/**
* Ends encoding for a block of KeyValues. Gives a chance for the encoder to do the finishing
* stuff for the encoded block. It must be called at the end of block encoding.
+ * @param encodingCtx
+ * @param out
+ * @param uncompressedBytesWithHeader
+ * @throws IOException
*/
void endBlockEncoding(HFileBlockEncodingContext encodingCtx, DataOutputStream out,
byte[] uncompressedBytesWithHeader) throws IOException;
@@ -65,6 +77,7 @@ public interface DataBlockEncoder {
/**
* Decode.
* @param source Compressed stream of KeyValues.
+ * @param decodingCtx
* @return Uncompressed block of KeyValues.
* @throws IOException If there is an error in source.
*/
@@ -83,9 +96,11 @@ public interface DataBlockEncoder {
/**
* Create a HFileBlock seeker which find KeyValues within a block.
+ * @param comparator what kind of comparison should be used
+ * @param decodingCtx
* @return A newly created seeker.
*/
- EncodedSeeker createSeeker(HFileBlockDecodingContext decodingCtx);
+ EncodedSeeker createSeeker(CellComparator comparator, HFileBlockDecodingContext decodingCtx);
/**
* Creates a encoder specific encoding context
@@ -173,6 +188,8 @@ public interface DataBlockEncoder {
/**
* Compare the given key against the current key
+ * @param comparator
+ * @param key
* @return -1 is the passed key is smaller than the current key, 0 if equal and 1 if greater
*/
public int compareKey(CellComparator comparator, Cell key);
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DiffKeyDeltaEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DiffKeyDeltaEncoder.java
index afb3e6d..ab93d19 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DiffKeyDeltaEncoder.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DiffKeyDeltaEncoder.java
@@ -20,7 +20,9 @@ import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
+
import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.PrivateCellUtil;
@@ -380,8 +382,9 @@ public class DiffKeyDeltaEncoder extends BufferedDataBlockEncoder {
}
@Override
- public EncodedSeeker createSeeker(HFileBlockDecodingContext decodingCtx) {
- return new BufferedEncodedSeeker<DiffSeekerState>(decodingCtx) {
+ public EncodedSeeker createSeeker(CellComparator comparator,
+ HFileBlockDecodingContext decodingCtx) {
+ return new BufferedEncodedSeeker<DiffSeekerState>(comparator, decodingCtx) {
private byte[] familyNameWithSize;
private static final int TIMESTAMP_WITH_TYPE_LENGTH =
Bytes.SIZEOF_LONG + Bytes.SIZEOF_BYTE;
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/FastDiffDeltaEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/FastDiffDeltaEncoder.java
index a5774e5..aa9a436 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/FastDiffDeltaEncoder.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/FastDiffDeltaEncoder.java
@@ -21,7 +21,9 @@ import java.io.DataOutput;
import java.io.DataOutputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
+
import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.PrivateCellUtil;
@@ -395,8 +397,9 @@ public class FastDiffDeltaEncoder extends BufferedDataBlockEncoder {
}
@Override
- public EncodedSeeker createSeeker(final HFileBlockDecodingContext decodingCtx) {
- return new BufferedEncodedSeeker<FastDiffSeekerState>(decodingCtx) {
+ public EncodedSeeker createSeeker(CellComparator comparator,
+ final HFileBlockDecodingContext decodingCtx) {
+ return new BufferedEncodedSeeker<FastDiffSeekerState>(comparator, decodingCtx) {
private void decode(boolean isFirst) {
byte flag = currentBuffer.get();
if ((flag & FLAG_SAME_KEY_LENGTH) == 0) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockDecodingContext.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockDecodingContext.java
index 2cbffa9..7f29302 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockDecodingContext.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockDecodingContext.java
@@ -17,18 +17,20 @@
package org.apache.hadoop.hbase.io.encoding;
import java.io.IOException;
+
import org.apache.hadoop.hbase.io.hfile.HFileContext;
import org.apache.hadoop.hbase.nio.ByteBuff;
import org.apache.yetus.audience.InterfaceAudience;
/**
* A decoding context that is created by a reader's encoder, and is shared
- * across all of the reader's read operations.
+ * across the reader's all read operations.
*
* @see HFileBlockEncodingContext for encoding
*/
@InterfaceAudience.Private
public interface HFileBlockDecodingContext {
+
/**
* Perform all actions that need to be done before the encoder's real decoding
* process. Decompression needs to be done if
@@ -44,6 +46,7 @@ public interface HFileBlockDecodingContext {
* ByteBuffer pointed after the header but before the data
* @param onDiskBlock
* on disk data to be decoded
+ * @throws IOException
*/
void prepareDecoding(
int onDiskSizeWithoutHeader,
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockDefaultDecodingContext.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockDefaultDecodingContext.java
index e321a25..97d0e6b 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockDefaultDecodingContext.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockDefaultDecodingContext.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.io.encoding;
import java.io.DataInputStream;
import java.io.IOException;
import java.io.InputStream;
+
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.hbase.io.ByteBuffInputStream;
import org.apache.hadoop.hbase.io.TagCompressionContext;
@@ -40,10 +41,11 @@ import org.apache.yetus.audience.InterfaceAudience;
*
*/
@InterfaceAudience.Private
-public class HFileBlockDefaultDecodingContext implements HFileBlockDecodingContext {
+public class HFileBlockDefaultDecodingContext implements
+ HFileBlockDecodingContext {
private final HFileContext fileContext;
private TagCompressionContext tagCompressionContext;
-
+
public HFileBlockDefaultDecodingContext(HFileContext fileContext) {
this.fileContext = fileContext;
}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockDefaultEncodingContext.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockDefaultEncodingContext.java
index 169f979..d029e1c 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockDefaultEncodingContext.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockDefaultEncodingContext.java
@@ -17,11 +17,13 @@
package org.apache.hadoop.hbase.io.encoding;
import static org.apache.hadoop.hbase.io.compress.Compression.Algorithm.NONE;
+
import java.io.ByteArrayInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.security.SecureRandom;
+
import org.apache.hadoop.hbase.io.ByteArrayOutputStream;
import org.apache.hadoop.hbase.io.TagCompressionContext;
import org.apache.hadoop.hbase.io.compress.Compression;
@@ -34,6 +36,7 @@ import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.compress.CompressionOutputStream;
import org.apache.hadoop.io.compress.Compressor;
import org.apache.yetus.audience.InterfaceAudience;
+
import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
/**
@@ -44,7 +47,8 @@ import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
*
*/
@InterfaceAudience.Private
-public class HFileBlockDefaultEncodingContext implements HFileBlockEncodingContext {
+public class HFileBlockDefaultEncodingContext implements
+ HFileBlockEncodingContext {
private BlockType blockType;
private final DataBlockEncoding encodingAlgo;
@@ -109,6 +113,7 @@ public class HFileBlockDefaultEncodingContext implements HFileBlockEncodingConte
/**
* prepare to start a new encoding.
+ * @throws IOException
*/
public void prepareEncoding(DataOutputStream out) throws IOException {
if (encodingAlgo != null && encodingAlgo != DataBlockEncoding.NONE) {
@@ -128,8 +133,7 @@ public class HFileBlockDefaultEncodingContext implements HFileBlockEncodingConte
}
private Bytes compressAfterEncoding(byte[] uncompressedBytesWithHeaderBuffer,
- int uncompressedBytesWithHeaderOffset, int uncompressedBytesWithHeaderLength,
- byte[] headerBytes)
+ int uncompressedBytesWithHeaderOffset, int uncompressedBytesWithHeaderLength, byte[] headerBytes)
throws IOException {
Encryption.Context cryptoContext = fileContext.getEncryptionContext();
if (cryptoContext != Encryption.Context.NONE) {
@@ -154,8 +158,7 @@ public class HFileBlockDefaultEncodingContext implements HFileBlockEncodingConte
compressedByteStream.reset();
compressionStream.resetState();
compressionStream.write(uncompressedBytesWithHeaderBuffer,
- headerBytes.length + uncompressedBytesWithHeaderOffset,
- uncompressedBytesWithHeaderLength - headerBytes.length);
+ headerBytes.length + uncompressedBytesWithHeaderOffset, uncompressedBytesWithHeaderLength - headerBytes.length);
compressionStream.flush();
compressionStream.finish();
byte[] plaintext = compressedByteStream.toByteArray();
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockEncodingContext.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockEncodingContext.java
index 487dd45..9602229 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockEncodingContext.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockEncodingContext.java
@@ -17,6 +17,7 @@
package org.apache.hadoop.hbase.io.encoding;
import java.io.IOException;
+
import org.apache.hadoop.hbase.io.hfile.BlockType;
import org.apache.hadoop.hbase.io.hfile.HFileContext;
import org.apache.hadoop.hbase.util.Bytes;
@@ -45,6 +46,9 @@ public interface HFileBlockEncodingContext {
/**
* Do any action that needs to be performed after the encoding.
* Compression is also included if a non-null compression algorithm is used
+ *
+ * @param blockType
+ * @throws IOException
*/
void postEncoding(BlockType blockType) throws IOException;
@@ -60,6 +64,7 @@ public interface HFileBlockEncodingContext {
/**
* Sets the encoding state.
+ * @param state
*/
void setEncodingState(EncodingState state);
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/PrefixKeyDeltaEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/PrefixKeyDeltaEncoder.java
index 7af0c8c..176bea3 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/PrefixKeyDeltaEncoder.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/PrefixKeyDeltaEncoder.java
@@ -21,7 +21,9 @@ import java.io.DataOutput;
import java.io.DataOutputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
+
import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.PrivateCellUtil;
@@ -193,8 +195,9 @@ public class PrefixKeyDeltaEncoder extends BufferedDataBlockEncoder {
}
@Override
- public EncodedSeeker createSeeker(final HFileBlockDecodingContext decodingCtx) {
- return new BufferedEncodedSeeker<SeekerState>(decodingCtx) {
+ public EncodedSeeker createSeeker(CellComparator comparator,
+ final HFileBlockDecodingContext decodingCtx) {
+ return new BufferedEncodedSeeker<SeekerState>(comparator, decodingCtx) {
@Override
protected void decodeNext() {
current.keyLength = ByteBuff.readCompressedInt(currentBuffer);
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/RowIndexCodecV1.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/RowIndexCodecV1.java
index c80a0b0..7f491ed 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/RowIndexCodecV1.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/RowIndexCodecV1.java
@@ -22,7 +22,10 @@ import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.List;
+
import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellComparator;
+import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.io.ByteArrayOutputStream;
@@ -68,8 +71,7 @@ public class RowIndexCodecV1 extends AbstractDataBlockEncoder {
+ "encoding context.");
}
- HFileBlockDefaultEncodingContext encodingCtx =
- (HFileBlockDefaultEncodingContext) blkEncodingCtx;
+ HFileBlockDefaultEncodingContext encodingCtx = (HFileBlockDefaultEncodingContext) blkEncodingCtx;
encodingCtx.prepareEncoding(out);
RowIndexEncoderV1 encoder = new RowIndexEncoderV1(out, encodingCtx);
@@ -113,7 +115,8 @@ public class RowIndexCodecV1 extends AbstractDataBlockEncoder {
dup.limit(sourceAsBuffer.position() + onDiskSize);
return dup.slice();
} else {
- RowIndexSeekerV1 seeker = new RowIndexSeekerV1(decodingCtx);
+ RowIndexSeekerV1 seeker = new RowIndexSeekerV1(CellComparatorImpl.COMPARATOR,
+ decodingCtx);
seeker.setCurrentBuffer(new SingleByteBuff(sourceAsBuffer));
List<Cell> kvs = new ArrayList<>();
kvs.add(seeker.getCell());
@@ -147,7 +150,9 @@ public class RowIndexCodecV1 extends AbstractDataBlockEncoder {
}
@Override
- public EncodedSeeker createSeeker(HFileBlockDecodingContext decodingCtx) {
- return new RowIndexSeekerV1(decodingCtx);
+ public EncodedSeeker createSeeker(CellComparator comparator,
+ HFileBlockDecodingContext decodingCtx) {
+ return new RowIndexSeekerV1(comparator, decodingCtx);
}
+
}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/RowIndexEncoderV1.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/RowIndexEncoderV1.java
index 711b9db..2388714 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/RowIndexEncoderV1.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/RowIndexEncoderV1.java
@@ -12,7 +12,9 @@ package org.apache.hadoop.hbase.io.encoding;
import java.io.DataOutputStream;
import java.io.IOException;
+
import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.io.ByteArrayOutputStream;
import org.apache.yetus.audience.InterfaceAudience;
@@ -30,12 +32,10 @@ public class RowIndexEncoderV1 {
private NoneEncoder encoder;
private int startOffset = -1;
private ByteArrayOutputStream rowsOffsetBAOS = new ByteArrayOutputStream(64 * 4);
- private final HFileBlockEncodingContext context;
public RowIndexEncoderV1(DataOutputStream out, HFileBlockDefaultEncodingContext encodingCtx) {
this.out = out;
this.encoder = new NoneEncoder(out, encodingCtx);
- this.context = encodingCtx;
}
public int write(Cell cell) throws IOException {
@@ -56,7 +56,7 @@ public class RowIndexEncoderV1 {
throw new IOException("Key cannot be null or empty");
}
if (lastCell != null) {
- int keyComp = this.context.getHFileContext().getCellComparator().compareRows(lastCell, cell);
+ int keyComp = CellComparatorImpl.COMPARATOR.compareRows(lastCell, cell);
if (keyComp > 0) {
throw new IOException("Added a key not lexically larger than"
+ " previous. Current cell = " + cell + ", lastCell = " + lastCell);
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/RowIndexSeekerV1.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/RowIndexSeekerV1.java
index 7ff7555..9c0532e 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/RowIndexSeekerV1.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/RowIndexSeekerV1.java
@@ -17,6 +17,8 @@
package org.apache.hadoop.hbase.io.encoding;
import java.nio.ByteBuffer;
+
+import org.apache.hadoop.hbase.ByteBufferExtendedCell;
import org.apache.hadoop.hbase.ByteBufferKeyOnlyKeyValue;
import org.apache.hadoop.hbase.ByteBufferKeyValue;
import org.apache.hadoop.hbase.Cell;
@@ -48,11 +50,10 @@ public class RowIndexSeekerV1 extends AbstractEncodedSeeker {
private int rowNumber;
private ByteBuff rowOffsets = null;
- private final CellComparator cellComparator;
- public RowIndexSeekerV1(HFileBlockDecodingContext decodingCtx) {
- super(decodingCtx);
- this.cellComparator = decodingCtx.getHFileContext().getCellComparator();
+ public RowIndexSeekerV1(CellComparator comparator,
+ HFileBlockDecodingContext decodingCtx) {
+ super(comparator, decodingCtx);
}
@Override
@@ -130,7 +131,8 @@ public class RowIndexSeekerV1 extends AbstractEncodedSeeker {
int comp = 0;
while (low <= high) {
mid = low + ((high - low) >> 1);
- comp = this.cellComparator.compareRows(getRow(mid), seekCell);
+ ByteBuffer row = getRow(mid);
+ comp = compareRows(row, seekCell);
if (comp < 0) {
low = mid + 1;
} else if (comp > 0) {
@@ -152,6 +154,19 @@ public class RowIndexSeekerV1 extends AbstractEncodedSeeker {
}
}
+ private int compareRows(ByteBuffer row, Cell seekCell) {
+ if (seekCell instanceof ByteBufferExtendedCell) {
+ return ByteBufferUtils.compareTo(row, row.position(), row.remaining(),
+ ((ByteBufferExtendedCell) seekCell).getRowByteBuffer(),
+ ((ByteBufferExtendedCell) seekCell).getRowPosition(),
+ seekCell.getRowLength());
+ } else {
+ return ByteBufferUtils.compareTo(row, row.position(), row.remaining(),
+ seekCell.getRowArray(), seekCell.getRowOffset(),
+ seekCell.getRowLength());
+ }
+ }
+
private ByteBuffer getRow(int index) {
int offset = rowOffsets.getIntAfterPosition(index * Bytes.SIZEOF_INT);
ByteBuff block = currentBuffer.duplicate();
@@ -176,8 +191,8 @@ public class RowIndexSeekerV1 extends AbstractEncodedSeeker {
}
}
do {
- int comp =
- PrivateCellUtil.compareKeyIgnoresMvcc(this.cellComparator, seekCell, current.currentKey);
+ int comp;
+ comp = PrivateCellUtil.compareKeyIgnoresMvcc(comparator, seekCell, current.currentKey);
if (comp == 0) { // exact match
if (seekBefore) {
if (!previous.isValid()) {
@@ -385,4 +400,5 @@ public class RowIndexSeekerV1 extends AbstractEncodedSeeker {
return ret;
}
}
+
}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContext.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContext.java
index ea4782d..d606497 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContext.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContext.java
@@ -17,8 +17,6 @@
*/
package org.apache.hadoop.hbase.io.hfile;
-import org.apache.hadoop.hbase.CellComparator;
-import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.io.HeapSize;
import org.apache.hadoop.hbase.io.compress.Compression;
@@ -30,10 +28,9 @@ import org.apache.hadoop.hbase.util.ClassSize;
import org.apache.yetus.audience.InterfaceAudience;
/**
- * Read-only HFile Context Information. Meta data that is used by HFileWriter/Readers and by
- * HFileBlocks. Create one using the {@link HFileContextBuilder} (See HFileInfo and the HFile
- * Trailer class).
- * @see HFileContextBuilder
+ * This carries the information on some of the meta data about the HFile. This
+ * meta data is used across the HFileWriter/Readers and the HFileBlocks.
+ * This helps to add new information to the HFile.
*/
@InterfaceAudience.Private
public class HFileContext implements HeapSize, Cloneable {
@@ -45,7 +42,7 @@ public class HFileContext implements HeapSize, Cloneable {
//byte[] headers for column family and table name
2 * ClassSize.ARRAY + 2 * ClassSize.REFERENCE);
- private static final int DEFAULT_BYTES_PER_CHECKSUM = 16 * 1024;
+ public static final int DEFAULT_BYTES_PER_CHECKSUM = 16 * 1024;
/** Whether checksum is enabled or not**/
private boolean usesHBaseChecksum = true;
@@ -70,7 +67,6 @@ public class HFileContext implements HeapSize, Cloneable {
private String hfileName;
private byte[] columnFamily;
private byte[] tableName;
- private CellComparator cellComparator;
//Empty constructor. Go with setters
public HFileContext() {
@@ -78,6 +74,7 @@ public class HFileContext implements HeapSize, Cloneable {
/**
* Copy constructor
+ * @param context
*/
public HFileContext(HFileContext context) {
this.usesHBaseChecksum = context.usesHBaseChecksum;
@@ -94,14 +91,13 @@ public class HFileContext implements HeapSize, Cloneable {
this.hfileName = context.hfileName;
this.columnFamily = context.columnFamily;
this.tableName = context.tableName;
- this.cellComparator = context.cellComparator;
}
HFileContext(boolean useHBaseChecksum, boolean includesMvcc, boolean includesTags,
Compression.Algorithm compressAlgo, boolean compressTags, ChecksumType checksumType,
int bytesPerChecksum, int blockSize, DataBlockEncoding encoding,
Encryption.Context cryptoContext, long fileCreateTime, String hfileName,
- byte[] columnFamily, byte[] tableName, CellComparator cellComparator) {
+ byte[] columnFamily, byte[] tableName) {
this.usesHBaseChecksum = useHBaseChecksum;
this.includesMvcc = includesMvcc;
this.includesTags = includesTags;
@@ -118,14 +114,11 @@ public class HFileContext implements HeapSize, Cloneable {
this.hfileName = hfileName;
this.columnFamily = columnFamily;
this.tableName = tableName;
- // If no cellComparator specified, make a guess based off tablename. If hbase:meta, then should
- // be the meta table comparator. Comparators are per table.
- this.cellComparator = cellComparator != null ? cellComparator : this.tableName != null ?
- CellComparatorImpl.getCellComparator(this.tableName) : CellComparator.getInstance();
}
/**
- * @return true when on-disk blocks are compressed, and/or encrypted; false otherwise.
+ * @return true when on-disk blocks from this file are compressed, and/or encrypted;
+ * false otherwise.
*/
public boolean isCompressedOrEncrypted() {
Compression.Algorithm compressAlgo = getCompression();
@@ -215,11 +208,6 @@ public class HFileContext implements HeapSize, Cloneable {
public byte[] getTableName() {
return this.tableName;
}
-
- public CellComparator getCellComparator() {
- return this.cellComparator;
- }
-
/**
* HeapSize implementation. NOTE : The heap size should be altered when new state variable are
* added.
@@ -275,9 +263,8 @@ public class HFileContext implements HeapSize, Cloneable {
sb.append(", columnFamily=");
sb.append(Bytes.toStringBinary(columnFamily));
}
- sb.append(", cellComparator=");
- sb.append(this.cellComparator);
sb.append("]");
return sb.toString();
}
+
}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContextBuilder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContextBuilder.java
index a44f273..5fa5626 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContextBuilder.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContextBuilder.java
@@ -17,7 +17,6 @@
*/
package org.apache.hadoop.hbase.io.hfile;
-import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
import org.apache.hadoop.hbase.io.crypto.Encryption;
@@ -57,7 +56,6 @@ public class HFileContextBuilder {
private String hfileName = null;
private byte[] columnFamily = null;
private byte[] tableName = null;
- private CellComparator cellComparator;
public HFileContextBuilder() {}
@@ -79,7 +77,6 @@ public class HFileContextBuilder {
this.hfileName = hfc.getHFileName();
this.columnFamily = hfc.getColumnFamily();
this.tableName = hfc.getTableName();
- this.cellComparator = hfc.getCellComparator();
}
public HFileContextBuilder withHBaseCheckSum(boolean useHBaseCheckSum) {
@@ -152,14 +149,9 @@ public class HFileContextBuilder {
return this;
}
- public HFileContextBuilder withCellComparator(CellComparator cellComparator) {
- this.cellComparator = cellComparator;
- return this;
- }
-
public HFileContext build() {
return new HFileContext(usesHBaseChecksum, includesMvcc, includesTags, compression,
compressTags, checksumType, bytesPerChecksum, blocksize, encoding, cryptoContext,
- fileCreateTime, hfileName, columnFamily, tableName, cellComparator);
+ fileCreateTime, hfileName, columnFamily, tableName);
}
}
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
index e48861f..7640c6e 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
@@ -1,4 +1,4 @@
-/*
+/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -21,6 +21,7 @@ import static org.apache.hadoop.hbase.regionserver.HStoreFile.BULKLOAD_TASK_KEY;
import static org.apache.hadoop.hbase.regionserver.HStoreFile.BULKLOAD_TIME_KEY;
import static org.apache.hadoop.hbase.regionserver.HStoreFile.EXCLUDE_FROM_MINOR_COMPACTION_KEY;
import static org.apache.hadoop.hbase.regionserver.HStoreFile.MAJOR_COMPACTION_KEY;
+
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.net.InetSocketAddress;
@@ -36,11 +37,13 @@ import java.util.TreeSet;
import java.util.UUID;
import java.util.function.Function;
import java.util.stream.Collectors;
+
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionLocation;
@@ -88,6 +91,7 @@ import org.apache.hadoop.mapreduce.lib.partition.TotalOrderPartitioner;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+
import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
/**
@@ -188,17 +192,18 @@ public class HFileOutputFormat2
return combineTableNameSuffix(tableName, family);
}
- static <V extends Cell> RecordWriter<ImmutableBytesWritable, V> createRecordWriter(
- final TaskAttemptContext context, final OutputCommitter committer) throws IOException {
+ static <V extends Cell> RecordWriter<ImmutableBytesWritable, V>
+ createRecordWriter(final TaskAttemptContext context, final OutputCommitter committer)
+ throws IOException {
// Get the path of the temporary output file
final Path outputDir = ((FileOutputCommitter)committer).getWorkPath();
final Configuration conf = context.getConfiguration();
- final boolean writeMultipleTables =
- conf.getBoolean(MULTI_TABLE_HFILEOUTPUTFORMAT_CONF_KEY, false);
+ final boolean writeMultipleTables = conf.getBoolean(MULTI_TABLE_HFILEOUTPUTFORMAT_CONF_KEY, false) ;
final String writeTableNames = conf.get(OUTPUT_TABLE_NAME_CONF_KEY);
- if (writeTableNames == null || writeTableNames.isEmpty()) {
- throw new IllegalArgumentException("" + OUTPUT_TABLE_NAME_CONF_KEY + " cannot be empty");
+ if (writeTableNames==null || writeTableNames.isEmpty()) {
+ throw new IllegalArgumentException("Configuration parameter " + OUTPUT_TABLE_NAME_CONF_KEY
+ + " cannot be empty");
}
final FileSystem fs = outputDir.getFileSystem(conf);
// These configs. are from hbase-*.xml
@@ -207,12 +212,18 @@ public class HFileOutputFormat2
// Invented config. Add to hbase-*.xml if other than default compression.
final String defaultCompressionStr = conf.get("hfile.compression",
Compression.Algorithm.NONE.getName());
- final Algorithm defaultCompression = HFileWriterImpl.compressionByName(defaultCompressionStr);
+ final Algorithm defaultCompression = HFileWriterImpl
+ .compressionByName(defaultCompressionStr);
String compressionStr = conf.get(COMPRESSION_OVERRIDE_CONF_KEY);
- final Algorithm overriddenCompression = compressionStr != null ?
- Compression.getCompressionAlgorithmByName(compressionStr): null;
+ final Algorithm overriddenCompression;
+ if (compressionStr != null) {
+ overriddenCompression = Compression.getCompressionAlgorithmByName(compressionStr);
+ } else {
+ overriddenCompression = null;
+ }
final boolean compactionExclude = conf.getBoolean(
"hbase.mapreduce.hfileoutputformat.compaction.exclude", false);
+
final Set<String> allTableNames = Arrays.stream(writeTableNames.split(
Bytes.toString(tableSeparator))).collect(Collectors.toSet());
@@ -225,17 +236,24 @@ public class HFileOutputFormat2
String dataBlockEncodingStr = conf.get(DATABLOCK_ENCODING_OVERRIDE_CONF_KEY);
final Map<byte[], DataBlockEncoding> datablockEncodingMap
= createFamilyDataBlockEncodingMap(conf);
- final DataBlockEncoding overriddenEncoding = dataBlockEncodingStr != null ?
- DataBlockEncoding.valueOf(dataBlockEncodingStr) : null;
+ final DataBlockEncoding overriddenEncoding;
+ if (dataBlockEncodingStr != null) {
+ overriddenEncoding = DataBlockEncoding.valueOf(dataBlockEncodingStr);
+ } else {
+ overriddenEncoding = null;
+ }
return new RecordWriter<ImmutableBytesWritable, V>() {
// Map of families to writers and how much has been output on the writer.
- private final Map<byte[], WriterLength> writers = new TreeMap<>(Bytes.BYTES_COMPARATOR);
- private final Map<byte[], byte[]> previousRows = new TreeMap<>(Bytes.BYTES_COMPARATOR);
+ private final Map<byte[], WriterLength> writers =
+ new TreeMap<>(Bytes.BYTES_COMPARATOR);
+ private final Map<byte[], byte[]> previousRows =
+ new TreeMap<>(Bytes.BYTES_COMPARATOR);
private final long now = EnvironmentEdgeManager.currentTime();
@Override
- public void write(ImmutableBytesWritable row, V cell) throws IOException {
+ public void write(ImmutableBytesWritable row, V cell)
+ throws IOException {
Cell kv = cell;
// null input == user explicitly wants to flush
if (row == null && kv == null) {
@@ -249,15 +267,13 @@ public class HFileOutputFormat2
byte[] tableNameBytes = null;
if (writeMultipleTables) {
tableNameBytes = MultiTableHFileOutputFormat.getTableName(row.get());
- tableNameBytes = TableName.valueOf(tableNameBytes).toBytes();
if (!allTableNames.contains(Bytes.toString(tableNameBytes))) {
- throw new IllegalArgumentException("TableName " + Bytes.toString(tableNameBytes) +
- " not expected");
+ throw new IllegalArgumentException("TableName '" + Bytes.toString(tableNameBytes) +
+ "' not" + " expected");
}
} else {
tableNameBytes = Bytes.toBytes(writeTableNames);
}
- Path tableRelPath = getTableRelativePath(tableNameBytes);
byte[] tableAndFamily = getTableNameSuffixedWithFamily(tableNameBytes, family);
WriterLength wl = this.writers.get(tableAndFamily);
@@ -265,7 +281,8 @@ public class HFileOutputFormat2
if (wl == null) {
Path writerPath = null;
if (writeMultipleTables) {
- writerPath = new Path(outputDir,new Path(tableRelPath, Bytes.toString(family)));
+ writerPath = new Path(outputDir, new Path(Bytes.toString(tableNameBytes), Bytes
+ .toString(family)));
}
else {
writerPath = new Path(outputDir, Bytes.toString(family));
@@ -292,23 +309,33 @@ public class HFileOutputFormat2
connection.getRegionLocator(TableName.valueOf(tableName))) {
loc = locator.getRegionLocation(rowKey);
} catch (Throwable e) {
- LOG.warn("Something wrong locating rowkey {} in {}",
- Bytes.toString(rowKey), tableName, e);
+ LOG.warn("There's something wrong when locating rowkey: " +
+ Bytes.toString(rowKey) + " for tablename: " + tableName, e);
loc = null;
} }
if (null == loc) {
- LOG.trace("Failed get of location, use default writer {}", Bytes.toString(rowKey));
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("failed to get region location, so use default writer for rowkey: " +
+ Bytes.toString(rowKey));
+ }
wl = getNewWriter(tableNameBytes, family, conf, null);
} else {
- LOG.debug("First rowkey: [{}]", Bytes.toString(rowKey));
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("first rowkey: [" + Bytes.toString(rowKey) + "]");
+ }
InetSocketAddress initialIsa =
new InetSocketAddress(loc.getHostname(), loc.getPort());
if (initialIsa.isUnresolved()) {
- LOG.trace("Failed resolve address {}, use default writer", loc.getHostnamePort());
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("failed to resolve bind address: " + loc.getHostname() + ":"
+ + loc.getPort() + ", so use default writer");
+ }
wl = getNewWriter(tableNameBytes, family, conf, null);
} else {
- LOG.debug("Use favored nodes writer: {}", initialIsa.getHostString());
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("use favored nodes writer: " + initialIsa.getHostString());
+ }
wl = getNewWriter(tableNameBytes, family, conf, new InetSocketAddress[] { initialIsa
});
}
@@ -327,16 +354,6 @@ public class HFileOutputFormat2
this.previousRows.put(family, rowKey);
}
- private Path getTableRelativePath(byte[] tableNameBytes) {
- String tableName = Bytes.toString(tableNameBytes);
- String[] tableNameParts = tableName.split(":");
- Path tableRelPath = new Path(tableName.split(":")[0]);
- if (tableNameParts.length > 1) {
- tableRelPath = new Path(tableRelPath, tableName.split(":")[1]);
- }
- return tableRelPath;
- }
-
private void rollWriters(WriterLength writerLength) throws IOException {
if (writerLength != null) {
closeWriter(writerLength);
@@ -349,27 +366,29 @@ public class HFileOutputFormat2
private void closeWriter(WriterLength wl) throws IOException {
if (wl.writer != null) {
- LOG.info("Writer=" + wl.writer.getPath() +
- ((wl.written == 0)? "": ", wrote=" + wl.written));
+ LOG.info(
+ "Writer=" + wl.writer.getPath() + ((wl.written == 0)? "": ", wrote=" + wl.written));
close(wl.writer);
- wl.writer = null;
}
+ wl.writer = null;
wl.written = 0;
}
/*
* Create a new StoreFile.Writer.
+ * @param family
* @return A WriterLength, containing a new StoreFile.Writer.
+ * @throws IOException
*/
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="BX_UNBOXING_IMMEDIATELY_REBOXED",
justification="Not important")
- private WriterLength getNewWriter(byte[] tableName, byte[] family, Configuration conf,
- InetSocketAddress[] favoredNodes) throws IOException {
+ private WriterLength getNewWriter(byte[] tableName, byte[] family, Configuration
+ conf, InetSocketAddress[] favoredNodes) throws IOException {
byte[] tableAndFamily = getTableNameSuffixedWithFamily(tableName, family);
Path familydir = new Path(outputDir, Bytes.toString(family));
if (writeMultipleTables) {
familydir = new Path(outputDir,
- new Path(getTableRelativePath(tableName), Bytes.toString(family)));
+ new Path(Bytes.toString(tableName), Bytes.toString(family)));
}
WriterLength wl = new WriterLength();
Algorithm compression = overriddenCompression;
@@ -387,9 +406,12 @@ public class HFileOutputFormat2
encoding = encoding == null ? datablockEncodingMap.get(tableAndFamily) : encoding;
encoding = encoding == null ? DataBlockEncoding.NONE : encoding;
HFileContextBuilder contextBuilder = new HFileContextBuilder()
- .withCompression(compression).withChecksumType(HStore.getChecksumType(conf))
- .withBytesPerCheckSum(HStore.getBytesPerChecksum(conf)).withBlockSize(blockSize)
- .withColumnFamily(family).withTableName(tableName);
+ .withCompression(compression)
+ .withChecksumType(HStore.getChecksumType(conf))
+ .withBytesPerCheckSum(HStore.getBytesPerChecksum(conf))
+ .withBlockSize(blockSize)
+ .withColumnFamily(family)
+ .withTableName(tableName);
if (HFile.getFormatVersion(conf) >= HFile.MIN_FORMAT_VERSION_WITH_TAGS) {
contextBuilder.withIncludesTags(true);
@@ -398,13 +420,16 @@ public class HFileOutputFormat2
contextBuilder.withDataBlockEncoding(encoding);
HFileContext hFileContext = contextBuilder.build();
if (null == favoredNodes) {
- wl.writer = new StoreFileWriter.Builder(conf, CacheConfig.DISABLED, fs)
- .withOutputDir(familydir).withBloomType(bloomType)
- .withFileContext(hFileContext).build();
+ wl.writer =
+ new StoreFileWriter.Builder(conf, CacheConfig.DISABLED, fs)
+ .withOutputDir(familydir).withBloomType(bloomType)
+ .withComparator(CellComparator.getInstance()).withFileContext(hFileContext).build();
} else {
- wl.writer = new StoreFileWriter.Builder(conf, CacheConfig.DISABLED, new HFileSystem(fs))
- .withOutputDir(familydir).withBloomType(bloomType)
- .withFileContext(hFileContext).withFavoredNodes(favoredNodes).build();
+ wl.writer =
+ new StoreFileWriter.Builder(conf, CacheConfig.DISABLED, new HFileSystem(fs))
+ .withOutputDir(familydir).withBloomType(bloomType)
+ .withComparator(CellComparator.getInstance()).withFileContext(hFileContext)
+ .withFavoredNodes(favoredNodes).build();
}
this.writers.put(tableAndFamily, wl);
@@ -413,17 +438,22 @@ public class HFileOutputFormat2
private void close(final StoreFileWriter w) throws IOException {
if (w != null) {
- w.appendFileInfo(BULKLOAD_TIME_KEY, Bytes.toBytes(System.currentTimeMillis()));
- w.appendFileInfo(BULKLOAD_TASK_KEY, Bytes.toBytes(context.getTaskAttemptID().toString()));
- w.appendFileInfo(MAJOR_COMPACTION_KEY, Bytes.toBytes(true));
- w.appendFileInfo(EXCLUDE_FROM_MINOR_COMPACTION_KEY, Bytes.toBytes(compactionExclude));
+ w.appendFileInfo(BULKLOAD_TIME_KEY,
+ Bytes.toBytes(System.currentTimeMillis()));
+ w.appendFileInfo(BULKLOAD_TASK_KEY,
+ Bytes.toBytes(context.getTaskAttemptID().toString()));
+ w.appendFileInfo(MAJOR_COMPACTION_KEY,
+ Bytes.toBytes(true));
+ w.appendFileInfo(EXCLUDE_FROM_MINOR_COMPACTION_KEY,
+ Bytes.toBytes(compactionExclude));
w.appendTrackedTimestampsToMetadata();
w.close();
}
}
@Override
- public void close(TaskAttemptContext c) throws IOException, InterruptedException {
+ public void close(TaskAttemptContext c)
+ throws IOException, InterruptedException {
for (WriterLength wl: this.writers.values()) {
close(wl.writer);
}
@@ -463,18 +493,21 @@ public class HFileOutputFormat2
throws IOException {
ArrayList<ImmutableBytesWritable> ret = new ArrayList<>();
- for(RegionLocator regionLocator : regionLocators) {
+ for(RegionLocator regionLocator : regionLocators)
+ {
TableName tableName = regionLocator.getName();
LOG.info("Looking up current regions for table " + tableName);
byte[][] byteKeys = regionLocator.getStartKeys();
for (byte[] byteKey : byteKeys) {
byte[] fullKey = byteKey; //HFileOutputFormat2 use case
- if (writeMultipleTables) {
+ if (writeMultipleTables)
+ {
//MultiTableHFileOutputFormat use case
fullKey = combineTableNameSuffix(tableName.getName(), byteKey);
}
if (LOG.isDebugEnabled()) {
- LOG.debug("SplitPoint startkey for " + tableName + ": " + Bytes.toStringBinary(fullKey));
+ LOG.debug("SplitPoint startkey for table [" + tableName + "]: [" + Bytes.toStringBinary
+ (fullKey) + "]");
}
ret.add(new ImmutableBytesWritable(fullKey));
}
@@ -501,8 +534,8 @@ public class HFileOutputFormat2
TreeSet<ImmutableBytesWritable> sorted = new TreeSet<>(startKeys);
ImmutableBytesWritable first = sorted.first();
if (writeMultipleTables) {
- first =
- new ImmutableBytesWritable(MultiTableHFileOutputFormat.getSuffix(sorted.first().get()));
+ first = new ImmutableBytesWritable(MultiTableHFileOutputFormat.getSuffix(sorted.first
+ ().get()));
}
if (!first.equals(HConstants.EMPTY_BYTE_ARRAY)) {
throw new IllegalArgumentException(
@@ -605,20 +638,19 @@ public class HFileOutputFormat2
/* Now get the region start keys for every table required */
List<String> allTableNames = new ArrayList<>(multiTableInfo.size());
- List<RegionLocator> regionLocators = new ArrayList<>(multiTableInfo.size());
- List<TableDescriptor> tableDescriptors = new ArrayList<>(multiTableInfo.size());
+ List<RegionLocator> regionLocators = new ArrayList<>( multiTableInfo.size());
+ List<TableDescriptor> tableDescriptors = new ArrayList<>( multiTableInfo.size());
- for(TableInfo tableInfo : multiTableInfo) {
+ for( TableInfo tableInfo : multiTableInfo )
+ {
regionLocators.add(tableInfo.getRegionLocator());
allTableNames.add(tableInfo.getRegionLocator().getName().getNameAsString());
tableDescriptors.add(tableInfo.getTableDescriptor());
}
- // Record tablenames for creating writer by favored nodes, and decoding compression,
- // block size and other attributes of columnfamily per table
+ // Record tablenames for creating writer by favored nodes, and decoding compression, block size and other attributes of columnfamily per table
conf.set(OUTPUT_TABLE_NAME_CONF_KEY, StringUtils.join(allTableNames, Bytes
.toString(tableSeparator)));
- List<ImmutableBytesWritable> startKeys =
- getRegionStartKeys(regionLocators, writeMultipleTables);
+ List<ImmutableBytesWritable> startKeys = getRegionStartKeys(regionLocators, writeMultipleTables);
// Use table's region boundaries for TOP split points.
LOG.info("Configuring " + startKeys.size() + " reduce partitions " +
"to match current region count for all tables");
@@ -814,11 +846,9 @@ public class HFileOutputFormat2
TotalOrderPartitioner.setPartitionFile(conf, partitionsPath);
}
- @edu.umd.cs.findbugs.annotations.SuppressWarnings(value =
- "RCN_REDUNDANT_NULLCHECK_OF_NONNULL_VALUE")
+ @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "RCN_REDUNDANT_NULLCHECK_OF_NONNULL_VALUE")
@VisibleForTesting
- static String serializeColumnFamilyAttribute(Function<ColumnFamilyDescriptor, String> fn,
- List<TableDescriptor> allTables)
+ static String serializeColumnFamilyAttribute(Function<ColumnFamilyDescriptor, String> fn, List<TableDescriptor> allTables)
throws UnsupportedEncodingException {
StringBuilder attributeValue = new StringBuilder();
int i = 0;
@@ -833,8 +863,8 @@ public class HFileOutputFormat2
attributeValue.append('&');
}
attributeValue.append(URLEncoder.encode(
- Bytes.toString(combineTableNameSuffix(tableDescriptor.getTableName().getName(),
- familyDescriptor.getName())), "UTF-8"));
+ Bytes.toString(combineTableNameSuffix(tableDescriptor.getTableName().getName(), familyDescriptor.getName())),
+ "UTF-8"));
attributeValue.append('=');
attributeValue.append(URLEncoder.encode(fn.apply(familyDescriptor), "UTF-8"));
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java
index 35fb281..3c74d11 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java
@@ -26,25 +26,27 @@ import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
+
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.CellComparatorImpl.MetaCellComparator;
import org.apache.hadoop.hbase.KeyValue;
+import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.io.compress.Compression;
+import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HFileProtos;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.yetus.audience.InterfaceAudience;
+
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.HFileProtos;
/**
* The {@link HFile} has a fixed trailer which contains offsets to other
* variable parts of the file. Also includes basic metadata on this file. The
* trailer size is fixed within a given {@link HFile} format version only, but
* we always store the version number as the last four-byte integer of the file.
- * The version number itself is split into two portions, a major
+ * The version number itself is split into two portions, a major
* version and a minor version. The last three bytes of a file are the major
* version and a single preceding byte is the minor number. The major version
* determines which readers/writers to use to read/write a hfile while a minor
@@ -75,24 +77,16 @@ public class FixedFileTrailer {
*/
private long loadOnOpenDataOffset;
- /**
- * The number of entries in the root data index.
- */
+ /** The number of entries in the root data index. */
private int dataIndexCount;
- /**
- * Total uncompressed size of all blocks of the data index
- */
+ /** Total uncompressed size of all blocks of the data index */
private long uncompressedDataIndexSize;
- /**
- * The number of entries in the meta index
- */
+ /** The number of entries in the meta index */
private int metaIndexCount;
- /**
- * The total uncompressed size of keys/values stored in the file.
- */
+ /** The total uncompressed size of keys/values stored in the file. */
private long totalUncompressedBytes;
/**
@@ -101,9 +95,7 @@ public class FixedFileTrailer {
*/
private long entryCount;
- /**
- * The compression codec used for all blocks.
- */
+ /** The compression codec used for all blocks. */
private Compression.Algorithm compressionCodec = Compression.Algorithm.NONE;
/**
@@ -112,9 +104,7 @@ public class FixedFileTrailer {
*/
private int numDataIndexLevels;
- /**
- * The offset of the first data block.
- */
+ /** The offset of the first data block. */
private long firstDataBlockOffset;
/**
@@ -123,25 +113,17 @@ public class FixedFileTrailer {
*/
private long lastDataBlockOffset;
- /**
- * Raw key comparator class name in version 3
- */
+ /** Raw key comparator class name in version 3 */
// We could write the actual class name from 2.0 onwards and handle BC
private String comparatorClassName = CellComparator.getInstance().getClass().getName();
- /**
- * The encryption key
- */
+ /** The encryption key */
private byte[] encryptionKey;
- /**
- * The {@link HFile} format major version.
- */
+ /** The {@link HFile} format major version. */
private final int majorVersion;
- /**
- * The {@link HFile} format minor version.
- */
+ /** The {@link HFile} format minor version. */
private final int minorVersion;
FixedFileTrailer(int majorVersion, int minorVersion) {
@@ -151,7 +133,7 @@ public class FixedFileTrailer {
}
private static int[] computeTrailerSizeByVersion() {
- int[] versionToSize = new int[HFile.MAX_FORMAT_VERSION + 1];
+ int versionToSize[] = new int[HFile.MAX_FORMAT_VERSION + 1];
// We support only 2 major versions now. ie. V2, V3
versionToSize[2] = 212;
for (int version = 3; version <= HFile.MAX_FORMAT_VERSION; version++) {
@@ -165,13 +147,14 @@ public class FixedFileTrailer {
private static int getMaxTrailerSize() {
int maxSize = 0;
- for (int version = HFile.MIN_FORMAT_VERSION; version <= HFile.MAX_FORMAT_VERSION; ++version) {
+ for (int version = HFile.MIN_FORMAT_VERSION;
+ version <= HFile.MAX_FORMAT_VERSION;
+ ++version)
maxSize = Math.max(getTrailerSize(version), maxSize);
- }
return maxSize;
}
- private static final int[] TRAILER_SIZE = computeTrailerSizeByVersion();
+ private static final int TRAILER_SIZE[] = computeTrailerSizeByVersion();
private static final int MAX_TRAILER_SIZE = getMaxTrailerSize();
private static final int NOT_PB_SIZE = BlockType.MAGIC_LENGTH + Bytes.SIZEOF_INT;
@@ -188,6 +171,9 @@ public class FixedFileTrailer {
* Write the trailer to a data stream. We support writing version 1 for
* testing and for determining version 1 trailer size. It is also easy to see
* what fields changed in version 2.
+ *
+ * @param outputStream
+ * @throws IOException
*/
void serialize(DataOutputStream outputStream) throws IOException {
HFile.checkFormatVersion(majorVersion);
@@ -207,18 +193,18 @@ public class FixedFileTrailer {
@org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting
HFileProtos.FileTrailerProto toProtobuf() {
HFileProtos.FileTrailerProto.Builder builder = HFileProtos.FileTrailerProto.newBuilder()
- .setFileInfoOffset(fileInfoOffset)
- .setLoadOnOpenDataOffset(loadOnOpenDataOffset)
- .setUncompressedDataIndexSize(uncompressedDataIndexSize)
- .setTotalUncompressedBytes(totalUncompressedBytes)
- .setDataIndexCount(dataIndexCount)
- .setMetaIndexCount(metaIndexCount)
- .setEntryCount(entryCount)
- .setNumDataIndexLevels(numDataIndexLevels)
- .setFirstDataBlockOffset(firstDataBlockOffset)
- .setLastDataBlockOffset(lastDataBlockOffset)
- .setComparatorClassName(getHBase1CompatibleName(comparatorClassName))
- .setCompressionCodec(compressionCodec.ordinal());
+ .setFileInfoOffset(fileInfoOffset)
+ .setLoadOnOpenDataOffset(loadOnOpenDataOffset)
+ .setUncompressedDataIndexSize(uncompressedDataIndexSize)
+ .setTotalUncompressedBytes(totalUncompressedBytes)
+ .setDataIndexCount(dataIndexCount)
+ .setMetaIndexCount(metaIndexCount)
+ .setEntryCount(entryCount)
+ .setNumDataIndexLevels(numDataIndexLevels)
+ .setFirstDataBlockOffset(firstDataBlockOffset)
+ .setLastDataBlockOffset(lastDataBlockOffset)
+ .setComparatorClassName(getHBase1CompatibleName(comparatorClassName))
+ .setCompressionCodec(compressionCodec.ordinal());
if (encryptionKey != null) {
builder.setEncryptionKey(UnsafeByteOperations.unsafeWrap(encryptionKey));
}
@@ -253,6 +239,9 @@ public class FixedFileTrailer {
* Deserialize the fixed file trailer from the given stream. The version needs
* to already be specified. Make sure this is consistent with
* {@link #serialize(DataOutputStream)}.
+ *
+ * @param inputStream
+ * @throws IOException
*/
void deserialize(DataInputStream inputStream) throws IOException {
HFile.checkFormatVersion(majorVersion);
@@ -260,7 +249,7 @@ public class FixedFileTrailer {
BlockType.TRAILER.readAndCheck(inputStream);
if (majorVersion > 2
- || (majorVersion == 2 && minorVersion >= HFileReaderImpl.PBUF_TRAILER_MINOR_VERSION)) {
+ || (majorVersion == 2 && minorVersion >= HFileReaderImpl.PBUF_TRAILER_MINOR_VERSION)) {
deserializeFromPB(inputStream);
} else {
deserializeFromWritable(inputStream);
@@ -274,12 +263,14 @@ public class FixedFileTrailer {
/**
* Deserialize the file trailer as protobuf
+ * @param inputStream
+ * @throws IOException
*/
void deserializeFromPB(DataInputStream inputStream) throws IOException {
// read PB and skip padding
int start = inputStream.available();
HFileProtos.FileTrailerProto trailerProto =
- HFileProtos.FileTrailerProto.PARSER.parseDelimitedFrom(inputStream);
+ HFileProtos.FileTrailerProto.PARSER.parseDelimitedFrom(inputStream);
int size = start - inputStream.available();
inputStream.skip(getTrailerSize() - NOT_PB_SIZE - size);
@@ -329,6 +320,8 @@ public class FixedFileTrailer {
/**
* Deserialize the file trailer as writable data
+ * @param input
+ * @throws IOException
*/
void deserializeFromWritable(DataInput input) throws IOException {
fileInfoOffset = input.readLong();
@@ -346,13 +339,12 @@ public class FixedFileTrailer {
// TODO this is a classname encoded into an HFile's trailer. We are going to need to have
// some compat code here.
setComparatorClass(getComparatorClass(Bytes.readStringFixedSize(input,
- MAX_COMPARATOR_NAME_LENGTH)));
+ MAX_COMPARATOR_NAME_LENGTH)));
}
-
+
private void append(StringBuilder sb, String s) {
- if (sb.length() > 0) {
+ if (sb.length() > 0)
sb.append(", ");
- }
sb.append(s);
}
@@ -383,18 +375,18 @@ public class FixedFileTrailer {
/**
* Reads a file trailer from the given file.
*
- * @param istream the input stream with the ability to seek. Does not have to
- * be buffered, as only one read operation is made.
+ * @param istream the input stream with the ability to seek. Does not have to
+ * be buffered, as only one read operation is made.
* @param fileSize the file size. Can be obtained using
- * {@link org.apache.hadoop.fs.FileSystem#getFileStatus(
- *org.apache.hadoop.fs.Path)}.
+ * {@link org.apache.hadoop.fs.FileSystem#getFileStatus(
+ * org.apache.hadoop.fs.Path)}.
* @return the fixed file trailer read
* @throws IOException if failed to read from the underlying stream, or the
- * trailer is corrupted, or the version of the trailer is
- * unsupported
+ * trailer is corrupted, or the version of the trailer is
+ * unsupported
*/
public static FixedFileTrailer readFromStream(FSDataInputStream istream,
- long fileSize) throws IOException {
+ long fileSize) throws IOException {
int bufferSize = MAX_TRAILER_SIZE;
long seekPoint = fileSize - bufferSize;
if (seekPoint < 0) {
@@ -407,7 +399,7 @@ public class FixedFileTrailer {
ByteBuffer buf = ByteBuffer.allocate(bufferSize);
istream.readFully(buf.array(), buf.arrayOffset(),
- buf.arrayOffset() + buf.limit());
+ buf.arrayOffset() + buf.limit());
// Read the version from the last int of the file.
buf.position(buf.limit() - Bytes.SIZEOF_INT);
@@ -423,30 +415,30 @@ public class FixedFileTrailer {
FixedFileTrailer fft = new FixedFileTrailer(majorVersion, minorVersion);
fft.deserialize(new DataInputStream(new ByteArrayInputStream(buf.array(),
- buf.arrayOffset() + bufferSize - trailerSize, trailerSize)));
+ buf.arrayOffset() + bufferSize - trailerSize, trailerSize)));
return fft;
}
public void expectMajorVersion(int expected) {
if (majorVersion != expected) {
throw new IllegalArgumentException("Invalid HFile major version: "
- + majorVersion
- + " (expected: " + expected + ")");
+ + majorVersion
+ + " (expected: " + expected + ")");
}
}
public void expectMinorVersion(int expected) {
if (minorVersion != expected) {
throw new IllegalArgumentException("Invalid HFile minor version: "
- + minorVersion + " (expected: " + expected + ")");
+ + minorVersion + " (expected: " + expected + ")");
}
}
public void expectAtLeastMajorVersion(int lowerBound) {
if (majorVersion < lowerBound) {
throw new IllegalArgumentException("Invalid HFile major version: "
- + majorVersion
- + " (expected: " + lowerBound + " or higher).");
+ + majorVersion
+ + " (expected: " + lowerBound + " or higher).");
}
}
@@ -574,7 +566,7 @@ public class FixedFileTrailer {
* than the new name; writing the new name will make it so newly-written hfiles are not parseable
* by hbase-1.x, a facility we'd like to preserve across rolling upgrade and hbase-1.x clusters
* reading hbase-2.x produce.
- * <p>
+ *
* The Comparators in hbase-2.x work the same as they did in hbase-1.x; they compare
* KeyValues. In hbase-2.x they were renamed making use of the more generic 'Cell'
* nomenclature to indicate that we intend to move away from KeyValues post hbase-2. A naming
@@ -587,9 +579,9 @@ public class FixedFileTrailer {
*
* @param comparator String class name of the Comparator used in this hfile.
* @return What to store in the trailer as our comparator name.
- * @see #getComparatorClass(String)
* @since hbase-2.0.0.
* @deprecated Since hbase-2.0.0. Will be removed in hbase-3.0.0.
+ * @see #getComparatorClass(String)
*/
@Deprecated
private String getHBase1CompatibleName(final String comparator) {
@@ -604,20 +596,20 @@ public class FixedFileTrailer {
@SuppressWarnings("unchecked")
private static Class<? extends CellComparator> getComparatorClass(String comparatorClassName)
- throws IOException {
+ throws IOException {
Class<? extends CellComparator> comparatorKlass;
// for BC
if (comparatorClassName.equals(KeyValue.COMPARATOR.getLegacyKeyComparatorName())
- || comparatorClassName.equals(KeyValue.COMPARATOR.getClass().getName())
- || (comparatorClassName.equals("org.apache.hadoop.hbase.CellComparator"))) {
+ || comparatorClassName.equals(KeyValue.COMPARATOR.getClass().getName())
+ || (comparatorClassName.equals("org.apache.hadoop.hbase.CellComparator"))) {
comparatorKlass = CellComparatorImpl.class;
} else if (comparatorClassName.equals(KeyValue.META_COMPARATOR.getLegacyKeyComparatorName())
- || comparatorClassName.equals(KeyValue.META_COMPARATOR.getClass().getName())
- || (comparatorClassName
- .equals("org.apache.hadoop.hbase.CellComparator$MetaCellComparator"))) {
+ || comparatorClassName.equals(KeyValue.META_COMPARATOR.getClass().getName())
+ || (comparatorClassName
+ .equals("org.apache.hadoop.hbase.CellComparator$MetaCellComparator"))) {
comparatorKlass = MetaCellComparator.class;
} else if (comparatorClassName.equals("org.apache.hadoop.hbase.KeyValue$RawBytesComparator")
- || comparatorClassName.equals("org.apache.hadoop.hbase.util.Bytes$ByteArrayComparator")) {
+ || comparatorClassName.equals("org.apache.hadoop.hbase.util.Bytes$ByteArrayComparator")) {
// When the comparator to be used is Bytes.BYTES_RAWCOMPARATOR, we just return null from here
// Bytes.BYTES_RAWCOMPARATOR is not a CellComparator
comparatorKlass = null;
@@ -632,22 +624,19 @@ public class FixedFileTrailer {
return comparatorKlass;
}
- static CellComparator createComparator(String comparatorClassName) throws IOException {
- if (comparatorClassName.equals(CellComparatorImpl.COMPARATOR.getClass().getName())) {
- return CellComparatorImpl.COMPARATOR;
- } else if (comparatorClassName.equals(
- CellComparatorImpl.META_COMPARATOR.getClass().getName())) {
- return CellComparatorImpl.META_COMPARATOR;
- }
+ public static CellComparator createComparator(
+ String comparatorClassName) throws IOException {
try {
+
Class<? extends CellComparator> comparatorClass = getComparatorClass(comparatorClassName);
- if (comparatorClass != null) {
+ if(comparatorClass != null){
return comparatorClass.getDeclaredConstructor().newInstance();
}
LOG.warn("No Comparator class for " + comparatorClassName + ". Returning Null.");
return null;
} catch (Exception e) {
- throw new IOException("Comparator class " + comparatorClassName + " is not instantiable", e);
+ throw new IOException("Comparator class " + comparatorClassName +
+ " is not instantiable", e);
}
}
@@ -661,7 +650,7 @@ public class FixedFileTrailer {
}
public void setUncompressedDataIndexSize(
- long uncompressedDataIndexSize) {
+ long uncompressedDataIndexSize) {
expectAtLeastMajorVersion(2);
this.uncompressedDataIndexSize = uncompressedDataIndexSize;
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
index 46cec4a..cb6a352 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
@@ -1,4 +1,4 @@
-/*
+/**
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
@@ -26,6 +26,7 @@ import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.atomic.LongAdder;
+
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
@@ -51,6 +52,7 @@ import org.apache.hadoop.io.Writable;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+
import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
@@ -119,7 +121,7 @@ import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
* an HFile instance?
*/
@InterfaceAudience.Private
-public final class HFile {
+public class HFile {
// LOG is being used in HFileBlock and CheckSumUtil
static final Logger LOG = LoggerFactory.getLogger(HFile.class);
@@ -176,11 +178,6 @@ public final class HFile {
static final MetricsIO metrics = new MetricsIO(new MetricsIOWrapperImpl());
/**
- * Shutdown constructor.
- */
- private HFile() {}
-
- /**
* Number of checksum verification failures. It also
* clears the counter.
*/
@@ -225,11 +222,10 @@ public final class HFile {
*/
void addInlineBlockWriter(InlineBlockWriter bloomWriter);
- // The below three methods take Writables. We'd like to undo Writables but undoing the below
- // would be pretty painful. Could take a byte [] or a Message but we want to be backward
- // compatible around hfiles so would need to map between Message and Writable or byte [] and
- // current Writable serialization. This would be a bit of work to little gain. Thats my
- // thinking at moment. St.Ack 20121129
+ // The below three methods take Writables. We'd like to undo Writables but undoing the below would be pretty
+ // painful. Could take a byte [] or a Message but we want to be backward compatible around hfiles so would need
+ // to map between Message and Writable or byte [] and current Writable serialization. This would be a bit of work
+ // to little gain. Thats my thinking at moment. St.Ack 20121129
void appendMetaBlock(String bloomFilterMetaKey, Writable metaWriter);
@@ -262,6 +258,7 @@ public final class HFile {
protected FileSystem fs;
protected Path path;
protected FSDataOutputStream ostream;
+ protected CellComparator comparator = CellComparator.getInstance();
protected InetSocketAddress[] favoredNodes;
private HFileContext fileContext;
protected boolean shouldDropBehind = false;
@@ -285,6 +282,12 @@ public final class HFile {
return this;
}
+ public WriterFactory withComparator(CellComparator comparator) {
+ Preconditions.checkNotNull(comparator);
+ this.comparator = comparator;
+ return this;
+ }
+
public WriterFactory withFavoredNodes(InetSocketAddress[] favoredNodes) {
// Deliberately not checking for null here.
this.favoredNodes = favoredNodes;
@@ -316,7 +319,7 @@ public final class HFile {
LOG.debug("Unable to set drop behind on {}", path.getName());
}
}
- return new HFileWriterImpl(conf, cacheConf, path, ostream, fileContext);
+ return new HFileWriterImpl(conf, cacheConf, path, ostream, comparator, fileContext);
}
}
@@ -346,16 +349,16 @@ public final class HFile {
CacheConfig cacheConf) {
int version = getFormatVersion(conf);
switch (version) {
- case 2:
- throw new IllegalArgumentException("This should never happen. " +
- "Did you change hfile.format.version to read v2? This version of the software writes v3" +
- " hfiles only (but it can read v2 files without having to update hfile.format.version " +
- "in hbase-site.xml)");
- case 3:
- return new HFile.WriterFactory(conf, cacheConf);
- default:
- throw new IllegalArgumentException("Cannot create writer for HFile " +
- "format version " + version);
+ case 2:
+ throw new IllegalArgumentException("This should never happen. " +
+ "Did you change hfile.format.version to read v2? This version of the software writes v3" +
+ " hfiles only (but it can read v2 files without having to update hfile.format.version " +
+ "in hbase-site.xml)");
+ case 3:
+ return new HFile.WriterFactory(conf, cacheConf);
+ default:
+ throw new IllegalArgumentException("Cannot create writer for HFile " +
+ "format version " + version);
}
}
@@ -369,15 +372,18 @@ public final class HFile {
* Read in a file block.
* @param offset offset to read.
* @param onDiskBlockSize size of the block
+ * @param cacheBlock
+ * @param pread
* @param isCompaction is this block being read as part of a compaction
* @param expectedBlockType the block type we are expecting to read with this read operation,
- * or null to read whatever block type is available and avoid checking (that might reduce
- * caching efficiency of encoded data blocks)
+ * or null to read whatever block type is available and avoid checking (that might reduce
+ * caching efficiency of encoded data blocks)
* @param expectedDataBlockEncoding the data block encoding the caller is expecting data blocks
- * to be in, or null to not perform this check and return the block irrespective of the
- * encoding. This check only applies to data blocks and can be set to null when the caller is
- * expecting to read a non-data block and has set expectedBlockType accordingly.
+ * to be in, or null to not perform this check and return the block irrespective of the
+ * encoding. This check only applies to data blocks and can be set to null when the caller is
+ * expecting to read a non-data block and has set expectedBlockType accordingly.
* @return Block wrapped in a ByteBuffer.
+ * @throws IOException
*/
HFileBlock readBlock(long offset, long onDiskBlockSize,
boolean cacheBlock, final boolean pread, final boolean isCompaction,
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
index c82221b..b9f649c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.hbase.io.hfile;
import static org.apache.hadoop.hbase.io.ByteBuffAllocator.HEAP;
+
import java.io.DataInputStream;
import java.io.DataOutput;
import java.io.DataOutputStream;
@@ -28,13 +29,18 @@ import java.util.List;
import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
+
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.io.ByteBuffAllocator;
+import org.apache.hadoop.hbase.io.util.BlockIOUtils;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import org.apache.hadoop.hbase.fs.HFileSystem;
import org.apache.hadoop.hbase.io.ByteArrayOutputStream;
-import org.apache.hadoop.hbase.io.ByteBuffAllocator;
import org.apache.hadoop.hbase.io.ByteBuffInputStream;
import org.apache.hadoop.hbase.io.ByteBufferWriterDataOutputStream;
import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
@@ -44,7 +50,6 @@ import org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext;
import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultDecodingContext;
import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext;
import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext;
-import org.apache.hadoop.hbase.io.util.BlockIOUtils;
import org.apache.hadoop.hbase.nio.ByteBuff;
import org.apache.hadoop.hbase.nio.MultiByteBuff;
import org.apache.hadoop.hbase.nio.SingleByteBuff;
@@ -52,9 +57,7 @@ import org.apache.hadoop.hbase.regionserver.ShipperListener;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ChecksumType;
import org.apache.hadoop.hbase.util.ClassSize;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
+
import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
@@ -358,8 +361,8 @@ public class HFileBlock implements Cacheable {
final long prevBlockOffset = buf.getLong(Header.PREV_BLOCK_OFFSET_INDEX);
// This constructor is called when we deserialize a block from cache and when we read a block in
// from the fs. fileCache is null when deserialized from cache so need to make up one.
- HFileContextBuilder fileContextBuilder = fileContext != null ?
- new HFileContextBuilder(fileContext) : new HFileContextBuilder();
+ HFileContextBuilder fileContextBuilder =
+ fileContext != null ? new HFileContextBuilder(fileContext) : new HFileContextBuilder();
fileContextBuilder.withHBaseCheckSum(usesHBaseChecksum);
int onDiskDataSizeWithHeader;
if (usesHBaseChecksum) {
@@ -405,8 +408,8 @@ public class HFileBlock implements Cacheable {
/**
* @return the on-disk size of the next block (including the header size and any checksums if
- * present) read by peeking into the next block's header; use as a hint when doing
- * a read of the next block when scanning or running over a file.
+ * present) read by peeking into the next block's header; use as a hint when doing
+ * a read of the next block when scanning or running over a file.
*/
int getNextBlockOnDiskSize() {
return nextBlockOnDiskSize;
@@ -463,7 +466,7 @@ public class HFileBlock implements Cacheable {
/**
* @return the uncompressed size of data part (header and checksum excluded).
*/
- int getUncompressedSizeWithoutHeader() {
+ int getUncompressedSizeWithoutHeader() {
return uncompressedSizeWithoutHeader;
}
@@ -703,8 +706,8 @@ public class HFileBlock implements Cacheable {
}
/**
- * Cannot be {@link #UNSET}. Must be a legitimate value. Used re-making the {@link BlockCacheKey}
- * when block is returned to the cache.
+ * Cannot be {@link #UNSET}. Must be a legitimate value. Used re-making the {@link BlockCacheKey} when
+ * block is returned to the cache.
* @return the offset of this block in the file it was read from
*/
long getOffset() {
@@ -890,6 +893,7 @@ public class HFileBlock implements Cacheable {
* Starts writing into the block. The previous block's data is discarded.
*
* @return the stream the user can write their data into
+ * @throws IOException
*/
DataOutputStream startWriting(BlockType newBlockType)
throws IOException {
@@ -919,6 +923,8 @@ public class HFileBlock implements Cacheable {
/**
* Writes the Cell to this block
+ * @param cell
+ * @throws IOException
*/
void write(Cell cell) throws IOException{
expectState(State.WRITING);
@@ -1036,6 +1042,9 @@ public class HFileBlock implements Cacheable {
* Similar to {@link #writeHeaderAndData(FSDataOutputStream)}, but records
* the offset of this block so that it can be referenced in the next block
* of the same type.
+ *
+ * @param out
+ * @throws IOException
*/
void writeHeaderAndData(FSDataOutputStream out) throws IOException {
long offset = out.getPos();
@@ -1053,7 +1062,9 @@ public class HFileBlock implements Cacheable {
* data when not using compression) into the given stream. Can be called in
* the "writing" state or in the "block ready" state. If called in the
* "writing" state, transitions the writer to the "block ready" state.
+ *
* @param out the output stream to write the
+ * @throws IOException
*/
protected void finishBlockAndWriteHeaderAndData(DataOutputStream out)
throws IOException {
@@ -1072,6 +1083,7 @@ public class HFileBlock implements Cacheable {
* the header + data + checksums stored on disk.
*
* @return header and data as they would be stored on disk in a byte array
+ * @throws IOException
*/
byte[] getHeaderAndDataForTest() throws IOException {
ensureBlockReady();
@@ -1156,7 +1168,9 @@ public class HFileBlock implements Cacheable {
* @return the number of bytes written
*/
public int encodedBlockSizeWritten() {
- return state != State.WRITING ? 0 : this.encodedDataSizeWritten;
+ if (state != State.WRITING)
+ return 0;
+ return this.encodedDataSizeWritten;
}
/**
@@ -1167,7 +1181,8 @@ public class HFileBlock implements Cacheable {
* @return the number of bytes written
*/
int blockSizeWritten() {
- return state != State.WRITING ? 0 : this.unencodedDataSizeWritten;
+ if (state != State.WRITING) return 0;
+ return this.unencodedDataSizeWritten;
}
/**
@@ -1220,6 +1235,7 @@ public class HFileBlock implements Cacheable {
*
* @param bw the block-writable object to write as a block
* @param out the file system output stream
+ * @throws IOException
*/
void writeBlock(BlockWritable bw, FSDataOutputStream out)
throws IOException {
@@ -1416,8 +1432,7 @@ public class HFileBlock implements Cacheable {
* next blocks header seems unnecessary given we usually get the block size
* from the hfile index. Review!
*/
- private AtomicReference<PrefetchedHeader> prefetchedHeader =
- new AtomicReference<>(new PrefetchedHeader());
+ private AtomicReference<PrefetchedHeader> prefetchedHeader = new AtomicReference<>(new PrefetchedHeader());
/** The size of the file we are reading from, or -1 if unknown. */
private long fileSize;
@@ -1629,9 +1644,10 @@ public class HFileBlock implements Cacheable {
/**
* @return Check <code>onDiskSizeWithHeaderL</code> size is healthy and then return it as an int
+ * @throws IOException
*/
private static int checkAndGetSizeAsInt(final long onDiskSizeWithHeaderL, final int hdrSize)
- throws IOException {
+ throws IOException {
if ((onDiskSizeWithHeaderL < hdrSize && onDiskSizeWithHeaderL != -1)
|| onDiskSizeWithHeaderL >= Integer.MAX_VALUE) {
throw new IOException("Invalid onDisksize=" + onDiskSizeWithHeaderL
@@ -1644,10 +1660,11 @@ public class HFileBlock implements Cacheable {
/**
* Verify the passed in onDiskSizeWithHeader aligns with what is in the header else something
* is not right.
+ * @throws IOException
*/
private void verifyOnDiskSizeMatchesHeader(final int passedIn, final ByteBuff headerBuf,
- final long offset, boolean verifyChecksum)
- throws IOException {
+ final long offset, boolean verifyChecksum)
+ throws IOException {
// Assert size provided aligns with what is in the header
int fromHeader = getOnDiskSizeWithHeader(headerBuf, verifyChecksum);
if (passedIn != fromHeader) {
@@ -1811,8 +1828,7 @@ public class HFileBlock implements Cacheable {
@Override
public void setIncludesMemStoreTS(boolean includesMemstoreTS) {
- this.fileContext = new HFileContextBuilder(this.fileContext)
- .withIncludesMvcc(includesMemstoreTS).build();
+ this.fileContext.setIncludesMvcc(includesMemstoreTS);
}
@Override
@@ -2058,7 +2074,7 @@ public class HFileBlock implements Cacheable {
/**
* @return This HFileBlocks fileContext which will a derivative of the
- * fileContext for the file from which this block's data was originally read.
+ * fileContext for the file from which this block's data was originally read.
*/
HFileContext getHFileContext() {
return this.fileContext;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java
index 6bd1d3c..a75aea3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java
@@ -1,4 +1,4 @@
-/*
+/**
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
@@ -32,6 +32,7 @@ import java.util.Map;
import java.util.Set;
import java.util.SortedMap;
import java.util.TreeMap;
+
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
@@ -50,14 +51,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.BytesBytesP
import org.apache.hadoop.hbase.shaded.protobuf.generated.HFileProtos;
/**
- * Metadata Map of attributes for HFile written out as HFile Trailer. Created by the Writer and
- * added to the tail of the file just before close. Metadata includes core attributes such as last
- * key seen, comparator used writing the file, etc. Clients can add their own attributes via
- * {@link #append(byte[], byte[], boolean)} and they'll be persisted and available at read time.
- * Reader creates the HFileInfo on open by reading the tail of the HFile. The parse of the HFile
- * trailer also creates a {@link HFileContext}, a read-only data structure that includes bulk of
- * the HFileInfo and extras that is safe to pass around when working on HFiles.
- * @see HFileContext
+ * Metadata for HFile. Conjured by the writer. Read in by the reader.
*/
@InterfaceAudience.Private
public class HFileInfo implements SortedMap<byte[], byte[]> {
@@ -67,6 +61,7 @@ public class HFileInfo implements SortedMap<byte[], byte[]> {
static final byte [] AVG_KEY_LEN = Bytes.toBytes(RESERVED_PREFIX + "AVG_KEY_LEN");
static final byte [] AVG_VALUE_LEN = Bytes.toBytes(RESERVED_PREFIX + "AVG_VALUE_LEN");
static final byte [] CREATE_TIME_TS = Bytes.toBytes(RESERVED_PREFIX + "CREATE_TIME_TS");
+ static final byte [] COMPARATOR = Bytes.toBytes(RESERVED_PREFIX + "COMPARATOR");
static final byte [] TAGS_COMPRESSED = Bytes.toBytes(RESERVED_PREFIX + "TAGS_COMPRESSED");
public static final byte [] MAX_TAGS_LEN = Bytes.toBytes(RESERVED_PREFIX + "MAX_TAGS_LEN");
private final SortedMap<byte [], byte []> map = new TreeMap<>(Bytes.BYTES_COMPARATOR);
@@ -383,8 +378,7 @@ public class HFileInfo implements SortedMap<byte[], byte[]> {
HFileContextBuilder builder = new HFileContextBuilder()
.withHBaseCheckSum(true)
.withHFileName(path.getName())
- .withCompression(trailer.getCompressionCodec())
- .withCellComparator(trailer.createComparator(trailer.getComparatorClassName()));
+ .withCompression(trailer.getCompressionCodec());
// Check for any key material available
byte[] keyBytes = trailer.getEncryptionKey();
if (keyBytes != null) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
index 018d075..1b2a1d8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
@@ -22,6 +22,7 @@ import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Optional;
+
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
@@ -48,10 +49,12 @@ import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.IdLock;
import org.apache.hadoop.hbase.util.ObjectIntPair;
import org.apache.hadoop.io.WritableUtils;
+
import org.apache.htrace.core.TraceScope;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+
import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
/**
@@ -82,6 +85,9 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable {
*/
protected HFileDataBlockEncoder dataBlockEncoder = NoOpDataBlockEncoder.INSTANCE;
+ /** Key comparator */
+ protected CellComparator comparator = CellComparator.getInstance();
+
/** Block cache configuration. */
protected final CacheConfig cacheConf;
@@ -139,6 +145,8 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable {
this.primaryReplicaReader = context.isPrimaryReplicaReader();
this.fileInfo = fileInfo;
this.trailer = fileInfo.getTrailer();
+ // Comparator class name is stored in the trailer in version 2.
+ this.comparator = trailer.createComparator();
this.hfileContext = fileInfo.getHFileContext();
this.fsBlockReader = new HFileBlock.FSReaderImpl(context, hfileContext,
cacheConf.getByteBuffAllocator());
@@ -230,7 +238,7 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable {
/** @return comparator */
@Override
public CellComparator getComparator() {
- return this.hfileContext.getCellComparator();
+ return comparator;
}
@VisibleForTesting
@@ -390,9 +398,8 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable {
}
protected void assertSeeked() {
- if (!isSeeked()) {
+ if (!isSeeked())
throw new NotSeekedException(reader.getPath());
- }
}
@Override
@@ -469,6 +476,7 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable {
/**
* Read mvcc. Does checks to see if we even need to read the mvcc at all.
+ * @param offsetFromPos
*/
protected void readMvccVersion(final int offsetFromPos) {
// See if we even need to decode mvcc.
@@ -485,6 +493,7 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable {
/**
* Actually do the mvcc read. Does no checks.
+ * @param offsetFromPos
*/
private void _readMvccVersion(int offsetFromPos) {
// This is Bytes#bytesToVint inlined so can save a few instructions in this hot method; i.e.
@@ -672,6 +681,7 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable {
* 0 if we are at the given key, 1 if we are past the given key
* -2 if the key is earlier than the first key of the file while
* using a faked index key
+ * @throws IOException
*/
public int seekTo(Cell key, boolean rewind) throws IOException {
HFileBlockIndex.BlockIndexReader indexReader = reader.getDataBlockIndexReader();
@@ -737,6 +747,7 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable {
* data block is found.
*
* @return the next block, or null if there are no more data blocks
+ * @throws IOException
*/
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NP_NULL_ON_SOME_PATH",
justification="Yeah, unnecessary null check; could do w/ clean up")
@@ -753,7 +764,8 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable {
}
if (block.getOffset() < 0) {
releaseIfNotCurBlock(block);
- throw new IOException("Invalid block offset=" + block + ", path=" + reader.getPath());
+ throw new IOException(
+ "Invalid block file offset: " + block + ", path=" + reader.getPath());
}
// We are reading the next block without block type validation, because
// it might turn out to be a non-data block.
@@ -775,9 +787,8 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable {
@Override
public Cell getCell() {
- if (!isSeeked()) {
+ if (!isSeeked())
return null;
- }
Cell ret;
int cellBufSize = getKVBufSize();
@@ -876,6 +887,7 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable {
/**
* Set our selves up for the next 'next' invocation, set up next block.
* @return True is more to read else false if at the end.
+ * @throws IOException
*/
private boolean positionForNextBlock() throws IOException {
// Methods are small so they get inlined because they are 'hot'.
@@ -931,6 +943,7 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable {
*
* @return false if empty file; i.e. a call to next would return false and
* the current key and value are undefined.
+ * @throws IOException
*/
@Override
public boolean seekTo() throws IOException {
@@ -963,8 +976,8 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable {
isCompaction, true, BlockType.DATA, getEffectiveDataBlockEncoding());
if (newBlock.getOffset() < 0) {
releaseIfNotCurBlock(newBlock);
- throw new IOException("Invalid offset=" + newBlock.getOffset() +
- ", path=" + reader.getPath());
+ throw new IOException(
+ "Invalid block offset: " + newBlock.getOffset() + ", path=" + reader.getPath());
}
updateCurrentBlock(newBlock);
}
@@ -982,6 +995,7 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable {
}
/**
+ * @param v
* @return True if v <= 0 or v > current block buffer limit.
*/
protected final boolean checkKeyLen(final int v) {
@@ -989,6 +1003,7 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable {
}
/**
+ * @param v
* @return True if v < 0 or v > current block buffer limit.
*/
protected final boolean checkLen(final int v) {
@@ -1159,9 +1174,10 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable {
// so blocks with the old encoding still linger in cache for some
// period of time. This event should be rare as it only happens on
// schema definition change.
- LOG.info("Evicting cached block with key {} because data block encoding mismatch; " +
- "expected {}, actual {}, path={}", cacheKey, actualDataBlockEncoding,
- expectedDataBlockEncoding, path);
+ LOG.info("Evicting cached block with key " + cacheKey
+ + " because of a data block encoding mismatch" + "; expected: "
+ + expectedDataBlockEncoding + ", actual: " + actualDataBlockEncoding + ", path="
+ + path);
// This is an error scenario. so here we need to release the block.
returnAndEvictBlock(cache, cacheKey, cachedBlock);
}
@@ -1179,8 +1195,10 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable {
}
/**
+ * @param metaBlockName
* @param cacheBlock Add block to cache, if found
* @return block wrapped in a ByteBuffer, with header skipped
+ * @throws IOException
*/
@Override
public HFileBlock getMetaBlock(String metaBlockName, boolean cacheBlock)
@@ -1195,9 +1213,8 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable {
byte[] mbname = Bytes.toBytes(metaBlockName);
int block = metaBlockIndexReader.rootBlockContainingKey(mbname,
0, mbname.length);
- if (block == -1) {
+ if (block == -1)
return null;
- }
long blockSize = metaBlockIndexReader.getRootBlockDataSize(block);
// Per meta key from any given file, synchronize reads for said block. This
@@ -1404,6 +1421,7 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable {
/**
* @return Midkey for this file. We work with block boundaries only so
* returned midkey is an approximation only.
+ * @throws IOException
*/
@Override
public Optional<Cell> midKey() throws IOException {
@@ -1440,7 +1458,8 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable {
DataBlockEncoding encoding = reader.getDataBlockEncoding();
dataBlockEncoder = encoding.getEncoder();
decodingCtx = dataBlockEncoder.newDataBlockDecodingContext(meta);
- seeker = dataBlockEncoder.createSeeker(decodingCtx);
+ seeker = dataBlockEncoder.createSeeker(
+ reader.getComparator(), decodingCtx);
}
@Override
@@ -1459,6 +1478,7 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable {
* @param newBlock the block to make current, and read by {@link HFileReaderImpl#readBlock},
* it's a totally new block with new allocated {@link ByteBuff}, so if no further
* reference to this block, we should release it carefully.
+ * @throws CorruptHFileException
*/
@Override
protected void updateCurrentBlock(HFileBlock newBlock) throws CorruptHFileException {
@@ -1470,9 +1490,9 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable {
short dataBlockEncoderId = newBlock.getDataBlockEncodingId();
if (!DataBlockEncoding.isCorrectEncoder(dataBlockEncoder, dataBlockEncoderId)) {
String encoderCls = dataBlockEncoder.getClass().getName();
- throw new CorruptHFileException("Encoder " + encoderCls +
- " doesn't support data block encoding " +
- DataBlockEncoding.getNameFromId(dataBlockEncoderId) + ",path=" + reader.getPath());
+ throw new CorruptHFileException("Encoder " + encoderCls
+ + " doesn't support data block encoding "
+ + DataBlockEncoding.getNameFromId(dataBlockEncoderId) + ",path=" + reader.getPath());
}
updateCurrBlockRef(newBlock);
ByteBuff encodedBuffer = getEncodedBuffer(newBlock);
@@ -1589,7 +1609,7 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable {
}
private DataInput getBloomFilterMetadata(BlockType blockType)
- throws IOException {
+ throws IOException {
if (blockType != BlockType.GENERAL_BLOOM_META &&
blockType != BlockType.DELETE_FAMILY_BLOOM_META) {
throw new RuntimeException("Block Type: " + blockType.toString() +
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java
index 604ac1f..66a6c00 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java
@@ -25,6 +25,7 @@ import java.net.InetSocketAddress;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.List;
+
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
@@ -64,8 +65,7 @@ public class HFileWriterImpl implements HFile.Writer {
private static final long UNSET = -1;
/** if this feature is enabled, preCalculate encoded data size before real encoding happens*/
- public static final String UNIFIED_ENCODED_BLOCKSIZE_RATIO =
- "hbase.writer.unified.encoded.blocksize.ratio";
+ public static final String UNIFIED_ENCODED_BLOCKSIZE_RATIO = "hbase.writer.unified.encoded.blocksize.ratio";
/** Block size limit after encoding, used to unify encoded block Cache entry size*/
private final int encodedBlockSizeLimit;
@@ -94,6 +94,9 @@ public class HFileWriterImpl implements HFile.Writer {
/** Total uncompressed bytes, maybe calculate a compression ratio later. */
protected long totalUncompressedBytes = 0;
+ /** Key comparator. Used to ensure we write in order. */
+ protected final CellComparator comparator;
+
/** Meta block names. */
protected List<byte[]> metaNames = new ArrayList<>();
@@ -162,7 +165,8 @@ public class HFileWriterImpl implements HFile.Writer {
protected long maxMemstoreTS = 0;
public HFileWriterImpl(final Configuration conf, CacheConfig cacheConf, Path path,
- FSDataOutputStream outputStream, HFileContext fileContext) {
+ FSDataOutputStream outputStream,
+ CellComparator comparator, HFileContext fileContext) {
this.outputStream = outputStream;
this.path = path;
this.name = path != null ? path.getName() : outputStream.toString();
@@ -173,6 +177,8 @@ public class HFileWriterImpl implements HFile.Writer {
} else {
this.blockEncoder = NoOpDataBlockEncoder.INSTANCE;
}
+ this.comparator = comparator != null ? comparator : CellComparator.getInstance();
+
closeOutputStream = path != null;
this.cacheConf = cacheConf;
float encodeBlockSizeRatio = conf.getFloat(UNIFIED_ENCODED_BLOCKSIZE_RATIO, 1f);
@@ -181,6 +187,7 @@ public class HFileWriterImpl implements HFile.Writer {
if (LOG.isTraceEnabled()) {
LOG.trace("Writer" + (path != null ? " for " + path : "") +
" initialized with cacheConf: " + cacheConf +
+ " comparator: " + comparator.getClass().getSimpleName() +
" fileContext: " + fileContext);
}
}
@@ -207,9 +214,10 @@ public class HFileWriterImpl implements HFile.Writer {
*
* @param trailer fixed file trailer
* @param out the data output to write the file info to
+ * @throws IOException
*/
protected final void writeFileInfo(FixedFileTrailer trailer, DataOutputStream out)
- throws IOException {
+ throws IOException {
trailer.setFileInfoOffset(outputStream.getPos());
finishFileInfo();
long startTime = System.currentTimeMillis();
@@ -231,8 +239,7 @@ public class HFileWriterImpl implements HFile.Writer {
throw new IOException("Key cannot be null or empty");
}
if (lastCell != null) {
- int keyComp = PrivateCellUtil.compareKeyIgnoresMvcc(this.hFileContext.getCellComparator(),
- lastCell, cell);
+ int keyComp = PrivateCellUtil.compareKeyIgnoresMvcc(comparator, lastCell, cell);
if (keyComp > 0) {
String message = getLexicalErrorMessage(cell);
throw new IOException(message);
@@ -278,9 +285,8 @@ public class HFileWriterImpl implements HFile.Writer {
}
public static Compression.Algorithm compressionByName(String algoName) {
- if (algoName == null) {
+ if (algoName == null)
return HFile.DEFAULT_COMPRESSION_ALGORITHM;
- }
return Compression.getCompressionAlgorithmByName(algoName);
}
@@ -312,15 +318,17 @@ public class HFileWriterImpl implements HFile.Writer {
// Meta data block index writer
metaBlockIndexWriter = new HFileBlockIndex.BlockIndexWriter();
- LOG.trace("Initialized with {}", cacheConf);
+ if (LOG.isTraceEnabled()) LOG.trace("Initialized with " + cacheConf);
}
/**
* At a block boundary, write all the inline blocks and opens new block.
+ *
+ * @throws IOException
*/
protected void checkBlockBoundary() throws IOException {
- // For encoder like prefixTree, encoded size is not available, so we have to compare both
- // encoded size and unencoded size to blocksize limit.
+ //for encoder like prefixTree, encoded size is not available, so we have to compare both encoded size
+ //and unencoded size to blocksize limit.
if (blockWriter.encodedBlockSizeWritten() >= encodedBlockSizeLimit
|| blockWriter.blockSizeWritten() >= hFileContext.getBlocksize()) {
finishBlock();
@@ -331,9 +339,7 @@ public class HFileWriterImpl implements HFile.Writer {
/** Clean up the data block that is currently being written.*/
private void finishBlock() throws IOException {
- if (!blockWriter.isWriting() || blockWriter.blockSizeWritten() == 0) {
- return;
- }
+ if (!blockWriter.isWriting() || blockWriter.blockSizeWritten() == 0) return;
// Update the first data block offset if UNSET; used scanning.
if (firstDataBlockOffset == UNSET) {
@@ -344,7 +350,7 @@ public class HFileWriterImpl implements HFile.Writer {
blockWriter.writeHeaderAndData(outputStream);
int onDiskSize = blockWriter.getOnDiskSizeWithHeader();
Cell indexEntry =
- getMidpoint(this.hFileContext.getCellComparator(), lastCellOfPreviousBlock, firstCellInBlock);
+ getMidpoint(this.comparator, lastCellOfPreviousBlock, firstCellInBlock);
dataBlockIndexWriter.addEntry(PrivateCellUtil.getCellKeySerializedAsKeyValueKey(indexEntry),
lastDataBlockOffset, onDiskSize);
totalUncompressedBytes += blockWriter.getUncompressedSizeWithHeader();
@@ -358,6 +364,11 @@ public class HFileWriterImpl implements HFile.Writer {
* <code>right</code> but that is shorter; i.e. takes up less space. This
* trick is used building HFile block index. Its an optimization. It does not
* always work. In this case we'll just return the <code>right</code> cell.
+ *
+ * @param comparator
+ * Comparator to use.
+ * @param left
+ * @param right
* @return A cell that sorts between <code>left</code> and <code>right</code>.
*/
public static Cell getMidpoint(final CellComparator comparator, final Cell left,
@@ -396,9 +407,7 @@ public class HFileWriterImpl implements HFile.Writer {
left.getRowLength(), right.getRowArray(), right.getRowOffset(), right.getRowLength());
}
// If midRow is null, just return 'right'. Can't do optimization.
- if (midRow == null) {
- return right;
- }
+ if (midRow == null) return right;
return PrivateCellUtil.createFirstOnRow(midRow);
}
// Rows are same. Compare on families.
@@ -419,9 +428,7 @@ public class HFileWriterImpl implements HFile.Writer {
right.getFamilyLength());
}
// If midRow is null, just return 'right'. Can't do optimization.
- if (midRow == null) {
- return right;
- }
+ if (midRow == null) return right;
// Return new Cell where we use right row and then a mid sort family.
return PrivateCellUtil.createFirstOnRowFamily(right, midRow, 0, midRow.length);
}
@@ -443,9 +450,7 @@ public class HFileWriterImpl implements HFile.Writer {
right.getQualifierLength());
}
// If midRow is null, just return 'right'. Can't do optimization.
- if (midRow == null) {
- return right;
- }
+ if (midRow == null) return right;
// Return new Cell where we use right row and family and then a mid sort qualifier.
return PrivateCellUtil.createFirstOnRowCol(right, midRow, 0, midRow.length);
}
@@ -454,6 +459,12 @@ public class HFileWriterImpl implements HFile.Writer {
}
/**
+ * @param leftArray
+ * @param leftOffset
+ * @param leftLength
+ * @param rightArray
+ * @param rightOffset
+ * @param rightLength
* @return Return a new array that is between left and right and minimally
* sized else just return null as indicator that we could not create a
* mid point.
@@ -555,6 +566,8 @@ public class HFileWriterImpl implements HFile.Writer {
/**
* Ready a new block for writing.
+ *
+ * @throws IOException
*/
protected void newBlock() throws IOException {
// This is where the next block begins.
@@ -665,7 +678,7 @@ public class HFileWriterImpl implements HFile.Writer {
dataBlockIndexWriter.getTotalUncompressedSize());
trailer.setFirstDataBlockOffset(firstDataBlockOffset);
trailer.setLastDataBlockOffset(lastDataBlockOffset);
- trailer.setComparatorClass(this.hFileContext.getCellComparator().getClass());
+ trailer.setComparatorClass(comparator.getClass());
trailer.setDataIndexCount(dataBlockIndexWriter.getNumRootEntries());
@@ -691,9 +704,8 @@ public class HFileWriterImpl implements HFile.Writer {
private void addBloomFilter(final BloomFilterWriter bfw,
final BlockType blockType) {
- if (bfw.getKeyCount() <= 0) {
+ if (bfw.getKeyCount() <= 0)
return;
- }
if (blockType != BlockType.GENERAL_BLOOM_META &&
blockType != BlockType.DELETE_FAMILY_BLOOM_META) {
@@ -710,9 +722,8 @@ public class HFileWriterImpl implements HFile.Writer {
public void writeToBlock(DataOutput out) throws IOException {
bfw.getMetaWriter().write(out);
Writable dataWriter = bfw.getDataWriter();
- if (dataWriter != null) {
+ if (dataWriter != null)
dataWriter.write(out);
- }
}
});
}
@@ -728,6 +739,7 @@ public class HFileWriterImpl implements HFile.Writer {
*
* @param cell
* Cell to add. Cannot be empty nor null.
+ * @throws IOException
*/
@Override
public void append(final Cell cell) throws IOException {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java
index 1c00e25..1d36446 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java
@@ -1,4 +1,4 @@
-/*
+/**
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
@@ -35,11 +35,13 @@ import java.util.concurrent.RejectedExecutionHandler;
import java.util.concurrent.SynchronousQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
+
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.PrivateCellUtil;
import org.apache.hadoop.hbase.TableName;
@@ -122,6 +124,7 @@ public final class MobUtils {
* Parses the string to a date.
* @param dateString The string format of a date, it's yyyymmdd.
* @return A date.
+ * @throws ParseException
*/
public static Date parseDate(String dateString) throws ParseException {
return LOCAL_FORMAT.get().parse(dateString);
@@ -285,6 +288,7 @@ public final class MobUtils {
* @param columnDescriptor The descriptor of the current column family.
* @param cacheConfig The cacheConfig that disables the block cache.
* @param current The current time.
+ * @throws IOException
*/
public static void cleanExpiredMobFiles(FileSystem fs, Configuration conf, TableName tableName,
ColumnFamilyDescriptor columnDescriptor, CacheConfig cacheConfig, long current)
@@ -380,6 +384,7 @@ public final class MobUtils {
* Gets the qualified root dir of the mob files.
* @param conf The current configuration.
* @return The qualified root dir.
+ * @throws IOException
*/
public static Path getQualifiedMobRootDir(Configuration conf) throws IOException {
Path hbaseDir = new Path(conf.get(HConstants.HBASE_DIR));
@@ -500,6 +505,7 @@ public final class MobUtils {
* @param tableDir The table directory.
* @param family The name of the column family.
* @param storeFiles The files to be deleted.
+ * @throws IOException
*/
public static void removeMobFiles(Configuration conf, FileSystem fs, TableName tableName,
Path tableDir, byte[] family, Collection<HStoreFile> storeFiles) throws IOException {
@@ -550,6 +556,7 @@ public final class MobUtils {
* @param cryptoContext The encryption context.
* @param isCompaction If the writer is used in compaction.
* @return The writer for the mob file.
+ * @throws IOException
*/
public static StoreFileWriter createWriter(Configuration conf, FileSystem fs,
ColumnFamilyDescriptor family, String date, Path basePath, long maxKeyCount,
@@ -573,6 +580,7 @@ public final class MobUtils {
* @param cryptoContext The encryption context.
* @param isCompaction If the writer is used in compaction.
* @return The writer for the mob file.
+ * @throws IOException
*/
public static StoreFileWriter createRefFileWriter(Configuration conf, FileSystem fs,
ColumnFamilyDescriptor family, Path basePath, long maxKeyCount, CacheConfig cacheConfig,
@@ -599,6 +607,7 @@ public final class MobUtils {
* @param cryptoContext The encryption context.
* @param isCompaction If the writer is used in compaction.
* @return The writer for the mob file.
+ * @throws IOException
*/
public static StoreFileWriter createWriter(Configuration conf, FileSystem fs,
ColumnFamilyDescriptor family, String date, Path basePath, long maxKeyCount,
@@ -624,6 +633,7 @@ public final class MobUtils {
* @param cacheConfig The current cache config.
* @param cryptoContext The encryption context.
* @return The writer for the del file.
+ * @throws IOException
*/
public static StoreFileWriter createDelFileWriter(Configuration conf, FileSystem fs,
ColumnFamilyDescriptor family, String date, Path basePath, long maxKeyCount,
@@ -650,9 +660,10 @@ public final class MobUtils {
* @param cryptoContext The encryption context.
* @param isCompaction If the writer is used in compaction.
* @return The writer for the mob file.
+ * @throws IOException
*/
public static StoreFileWriter createWriter(Configuration conf, FileSystem fs,
- ColumnFamilyDescriptor family, MobFileName mobFileName, Path basePath, long maxKeyCount,
+ ColumnFamilyDescriptor family, MobFileName mobFileName, Path basePath, long maxKeyCount,
Compression.Algorithm compression, CacheConfig cacheConfig, Encryption.Context cryptoContext,
boolean isCompaction)
throws IOException {
@@ -678,6 +689,7 @@ public final class MobUtils {
* @param bloomType The bloom filter type.
* @param isCompaction If the writer is used in compaction.
* @return The writer for the mob file.
+ * @throws IOException
*/
public static StoreFileWriter createWriter(Configuration conf, FileSystem fs,
ColumnFamilyDescriptor family, Path path, long maxKeyCount,
@@ -706,7 +718,8 @@ public final class MobUtils {
.withCreateTime(EnvironmentEdgeManager.currentTime()).build();
StoreFileWriter w = new StoreFileWriter.Builder(conf, writerCacheConf, fs)
- .withFilePath(path).withBloomType(bloomType)
+ .withFilePath(path)
+ .withComparator(CellComparator.getInstance()).withBloomType(bloomType)
.withMaxKeyCount(maxKeyCount).withFileContext(hFileContext).build();
return w;
}
@@ -719,6 +732,7 @@ public final class MobUtils {
* @param targetPath The directory path where the source file is renamed to.
* @param cacheConfig The current cache config.
* @return The target file path the source file is renamed to.
+ * @throws IOException
*/
public static Path commitFile(Configuration conf, FileSystem fs, final Path sourceFile,
Path targetPath, CacheConfig cacheConfig) throws IOException {
@@ -823,8 +837,7 @@ public final class MobUtils {
* @param allFiles Whether add all mob files into the compaction.
*/
public static void doMobCompaction(Configuration conf, FileSystem fs, TableName tableName,
- ColumnFamilyDescriptor hcd, ExecutorService pool, boolean allFiles,
- LockManager.MasterLock lock)
+ ColumnFamilyDescriptor hcd, ExecutorService pool, boolean allFiles, LockManager.MasterLock lock)
throws IOException {
String className = conf.get(MobConstants.MOB_COMPACTOR_CLASS_KEY,
PartitionedMobCompactor.class.getName());
@@ -930,8 +943,7 @@ public final class MobUtils {
* @param fileDate The date string parsed from the mob file name.
* @return True if the mob file is expired.
*/
- public static boolean isMobFileExpired(ColumnFamilyDescriptor column, long current,
- String fileDate) {
+ public static boolean isMobFileExpired(ColumnFamilyDescriptor column, long current, String fileDate) {
if (column.getMinVersions() > 0) {
return false;
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
index a0ad3e8..6b11a11 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
@@ -52,6 +52,7 @@ import java.util.function.Predicate;
import java.util.function.ToLongFunction;
import java.util.stream.Collectors;
import java.util.stream.LongStream;
+
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@@ -131,8 +132,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDes
* not be called directly but by an HRegion manager.
*/
@InterfaceAudience.Private
-public class HStore implements Store, HeapSize, StoreConfigInformation,
- PropagatingConfigurationObserver {
+public class HStore implements Store, HeapSize, StoreConfigInformation, PropagatingConfigurationObserver {
public static final String MEMSTORE_CLASS_NAME = "hbase.regionserver.memstore.class";
public static final String COMPACTCHECKER_INTERVAL_MULTIPLIER_KEY =
"hbase.server.compactchecker.interval.multiplier";
@@ -237,8 +237,11 @@ public class HStore implements Store, HeapSize, StoreConfigInformation,
/**
* Constructor
+ * @param region
* @param family HColumnDescriptor for this column
- * @param confParam configuration object failed. Can be null.
+ * @param confParam configuration object
+ * failed. Can be null.
+ * @throws IOException
*/
protected HStore(final HRegion region, final ColumnFamilyDescriptor family,
final Configuration confParam, boolean warmup) throws IOException {
@@ -396,6 +399,7 @@ public class HStore implements Store, HeapSize, StoreConfigInformation,
}
/**
+ * @param family
* @return TTL in seconds of the specified family
*/
public static long determineTTLFromFamily(final ColumnFamilyDescriptor family) {
@@ -517,7 +521,6 @@ public class HStore implements Store, HeapSize, StoreConfigInformation,
* @param hri {@link RegionInfo} for the region.
* @param family {@link ColumnFamilyDescriptor} describing the column family
* @return Path to family/Store home directory.
- * @deprecated Since 05/05/2013, HBase-7808, hbase-1.0.0
*/
@Deprecated
public static Path getStoreHomedir(final Path tabledir,
@@ -530,7 +533,6 @@ public class HStore implements Store, HeapSize, StoreConfigInformation,
* @param encodedName Encoded region name.
* @param family {@link ColumnFamilyDescriptor} describing the column family
* @return Path to family/Store home directory.
- * @deprecated Since 05/05/2013, HBase-7808, hbase-1.0.0
*/
@Deprecated
public static Path getStoreHomedir(final Path tabledir,
@@ -556,6 +558,7 @@ public class HStore implements Store, HeapSize, StoreConfigInformation,
/**
* Creates an unsorted list of StoreFile loaded in parallel
* from the given directory.
+ * @throws IOException
*/
private List<HStoreFile> loadStoreFiles(boolean warmup) throws IOException {
Collection<StoreFileInfo> files = fs.getStoreFiles(getColumnFamilyName());
@@ -571,8 +574,7 @@ public class HStore implements Store, HeapSize, StoreConfigInformation,
ThreadPoolExecutor storeFileOpenerThreadPool =
this.region.getStoreFileOpenAndCloseThreadPool("StoreFileOpenerThread-" +
this.getColumnFamilyName());
- CompletionService<HStoreFile> completionService =
- new ExecutorCompletionService<>(storeFileOpenerThreadPool);
+ CompletionService<HStoreFile> completionService = new ExecutorCompletionService<>(storeFileOpenerThreadPool);
int totalValidStoreFile = 0;
for (StoreFileInfo storeFileInfo : files) {
@@ -594,13 +596,9 @@ public class HStore implements Store, HeapSize, StoreConfigInformation,
compactedStoreFiles.addAll(storeFile.getCompactedStoreFiles());
}
} catch (InterruptedException e) {
- if (ioe == null) {
- ioe = new InterruptedIOException(e.getMessage());
- }
+ if (ioe == null) ioe = new InterruptedIOException(e.getMessage());
} catch (ExecutionException e) {
- if (ioe == null) {
- ioe = new IOException(e.getCause());
- }
+ if (ioe == null) ioe = new IOException(e.getCause());
}
}
} finally {
@@ -652,6 +650,7 @@ public class HStore implements Store, HeapSize, StoreConfigInformation,
/**
* Replaces the store files that the store has with the given files. Mainly used by secondary
* region replicas to keep up to date with the primary region files.
+ * @throws IOException
*/
public void refreshStoreFiles(Collection<String> newFiles) throws IOException {
List<StoreFileInfo> storeFiles = new ArrayList<>(newFiles.size());
@@ -666,20 +665,15 @@ public class HStore implements Store, HeapSize, StoreConfigInformation,
* been opened, and removes the store file readers for store files no longer
* available. Mainly used by secondary region replicas to keep up to date with
* the primary region files.
+ * @throws IOException
*/
private void refreshStoreFilesInternal(Collection<StoreFileInfo> newFiles) throws IOException {
StoreFileManager sfm = storeEngine.getStoreFileManager();
Collection<HStoreFile> currentFiles = sfm.getStorefiles();
Collection<HStoreFile> compactedFiles = sfm.getCompactedfiles();
- if (currentFiles == null) {
- currentFiles = Collections.emptySet();
- }
- if (newFiles == null) {
- newFiles = Collections.emptySet();
- }
- if (compactedFiles == null) {
- compactedFiles = Collections.emptySet();
- }
+ if (currentFiles == null) currentFiles = Collections.emptySet();
+ if (newFiles == null) newFiles = Collections.emptySet();
+ if (compactedFiles == null) compactedFiles = Collections.emptySet();
HashMap<StoreFileInfo, HStoreFile> currentFilesSet = new HashMap<>(currentFiles.size());
for (HStoreFile sf : currentFiles) {
@@ -874,13 +868,12 @@ public class HStore implements Store, HeapSize, StoreConfigInformation,
}
prevCell = cell;
} while (scanner.next());
- LOG.info("Full verification complete for bulk load hfile: " + srcPath.toString() +
- " took " + (EnvironmentEdgeManager.currentTime() - verificationStartTime) + " ms");
+ LOG.info("Full verification complete for bulk load hfile: " + srcPath.toString()
+ + " took " + (EnvironmentEdgeManager.currentTime() - verificationStartTime)
+ + " ms");
}
} finally {
- if (reader != null) {
- reader.close();
- }
+ if (reader != null) reader.close();
}
}
@@ -888,6 +881,7 @@ public class HStore implements Store, HeapSize, StoreConfigInformation,
* This method should only be called from Region. It is assumed that the ranges of values in the
* HFile fit within the stores assigned region. (assertBulkLoadHFileOk checks this)
*
+ * @param srcPathStr
* @param seqNum sequence Id associated with the HFile
*/
public Pair<Path, Path> preBulkLoadHFile(String srcPathStr, long seqNum) throws IOException {
@@ -999,17 +993,13 @@ public class HStore implements Store, HeapSize, StoreConfigInformation,
ioe.initCause(e);
}
} catch (ExecutionException e) {
- if (ioe == null) {
- ioe = new IOException(e.getCause());
- }
+ if (ioe == null) ioe = new IOException(e.getCause());
}
}
} finally {
storeFileCloserThreadPool.shutdownNow();
}
- if (ioe != null) {
- throw ioe;
- }
+ if (ioe != null) throw ioe;
}
LOG.trace("Closed {}", this);
return result;
@@ -1037,6 +1027,9 @@ public class HStore implements Store, HeapSize, StoreConfigInformation,
/**
* Write out current snapshot. Presumes {@link #snapshot()} has been called previously.
* @param logCacheFlushId flush sequence number
+ * @param snapshot
+ * @param status
+ * @param throughputController
* @return The path name of the tmp file to which the store was flushed
* @throws IOException if exception occurs during process
*/
@@ -1088,7 +1081,10 @@ public class HStore implements Store, HeapSize, StoreConfigInformation,
/**
* @param path The pathname of the tmp file into which the store was flushed
+ * @param logCacheFlushId
+ * @param status
* @return store file created.
+ * @throws IOException
*/
private HStoreFile commitFile(Path path, long logCacheFlushId, MonitoredTask status)
throws IOException {
@@ -1111,6 +1107,7 @@ public class HStore implements Store, HeapSize, StoreConfigInformation,
}
/**
+ * @param maxKeyCount
* @param compression Compression algorithm to use
* @param isCompaction whether we are creating a new file in a compaction
* @param includeMVCCReadpoint - whether to include MVCC or not
@@ -1165,6 +1162,7 @@ public class HStore implements Store, HeapSize, StoreConfigInformation,
StoreFileWriter.Builder builder = new StoreFileWriter.Builder(conf, writerCacheConf,
this.getFileSystem())
.withOutputDir(familyTempDir)
+ .withComparator(comparator)
.withBloomType(family.getBloomFilterType())
.withMaxKeyCount(maxKeyCount)
.withFavoredNodes(favoredNodes)
@@ -1194,7 +1192,6 @@ public class HStore implements Store, HeapSize, StoreConfigInformation,
.withColumnFamily(family.getName())
.withTableName(region.getTableDescriptor()
.getTableName().getName())
- .withCellComparator(this.comparator)
.build();
return hFileContext;
}
@@ -1207,6 +1204,8 @@ public class HStore implements Store, HeapSize, StoreConfigInformation,
/**
* Change storeFiles adding into place the Reader produced by this new flush.
* @param sfs Store files
+ * @param snapshotId
+ * @throws IOException
* @return Whether compaction is required.
*/
private boolean updateStorefiles(List<HStoreFile> sfs, long snapshotId) throws IOException {
@@ -1238,6 +1237,7 @@ public class HStore implements Store, HeapSize, StoreConfigInformation,
/**
* Notify all observers that set of Readers has changed.
+ * @throws IOException
*/
private void notifyChangedReadersObservers(List<HStoreFile> sfs) throws IOException {
for (ChangedReadersObserver o : this.changedReaderObservers) {
@@ -1454,6 +1454,7 @@ public class HStore implements Store, HeapSize, StoreConfigInformation,
* See HBASE-2231 for details.
*
* @param compaction compaction details obtained from requestCompaction()
+ * @throws IOException
* @return Storefile we compacted into or null if we failed or opted out early.
*/
public List<HStoreFile> compact(CompactionContext compaction,
@@ -1520,8 +1521,8 @@ public class HStore implements Store, HeapSize, StoreConfigInformation,
return sfs;
}
- private List<HStoreFile> moveCompactedFilesIntoPlace(CompactionRequestImpl cr,
- List<Path> newFiles, User user) throws IOException {
+ private List<HStoreFile> moveCompactedFilesIntoPlace(CompactionRequestImpl cr, List<Path> newFiles,
+ User user) throws IOException {
List<HStoreFile> sfs = new ArrayList<>(newFiles.size());
for (Path newFile : newFiles) {
assert newFile != null;
@@ -1559,8 +1560,7 @@ public class HStore implements Store, HeapSize, StoreConfigInformation,
newFiles.stream().map(HStoreFile::getPath).collect(Collectors.toList());
RegionInfo info = this.region.getRegionInfo();
CompactionDescriptor compactionDescriptor = ProtobufUtil.toCompactionDescriptor(info,
- family.getName(), inputPaths, outputPaths,
- fs.getStoreDir(getColumnFamilyDescriptor().getNameAsString()));
+ family.getName(), inputPaths, outputPaths, fs.getStoreDir(getColumnFamilyDescriptor().getNameAsString()));
// Fix reaching into Region to get the maxWaitForSeqId.
// Does this method belong in Region altogether given it is making so many references up there?
// Could be Region#writeCompactionMarker(compactionDescriptor);
@@ -1664,6 +1664,7 @@ public class HStore implements Store, HeapSize, StoreConfigInformation,
* Call to complete a compaction. Its for the case where we find in the WAL a compaction
* that was not finished. We could find one recovering a WAL after a regionserver crash.
* See HBASE-2231.
+ * @param compaction
*/
public void replayCompactionMarker(CompactionDescriptor compaction, boolean pickCompactionFiles,
boolean removeFiles) throws IOException {
@@ -1916,9 +1917,7 @@ public class HStore implements Store, HeapSize, StoreConfigInformation,
}
private void removeUnneededFiles() throws IOException {
- if (!conf.getBoolean("hbase.store.delete.expired.storefile", true)) {
- return;
- }
+ if (!conf.getBoolean("hbase.store.delete.expired.storefile", true)) return;
if (getColumnFamilyDescriptor().getMinVersions() > 0) {
LOG.debug("Skipping expired store file removal due to min version being {}",
getColumnFamilyDescriptor().getMinVersions());
@@ -2121,6 +2120,7 @@ public class HStore implements Store, HeapSize, StoreConfigInformation,
* @param readPt the read point of the current scane
* @param includeMemstoreScanner whether the current scanner should include memstorescanner
* @return list of scanners recreated on the current Scanners
+ * @throws IOException
*/
public List<KeyValueScanner> recreateScanners(List<KeyValueScanner> currentFileScanners,
boolean cacheBlocks, boolean usePread, boolean isCompaction, ScanQueryMatcher matcher,
@@ -2319,6 +2319,7 @@ public class HStore implements Store, HeapSize, StoreConfigInformation,
* This operation is atomic on each KeyValue (row/family/qualifier) but not necessarily atomic
* across all of them.
* @param readpoint readpoint below which we can safely remove duplicate KVs
+ * @throws IOException
*/
public void upsert(Iterable<Cell> cells, long readpoint, MemStoreSizing memstoreSizing)
throws IOException {
@@ -2432,6 +2433,7 @@ public class HStore implements Store, HeapSize, StoreConfigInformation,
* snapshot depending on dropMemstoreSnapshot argument.
* @param fileNames names of the flushed files
* @param dropMemstoreSnapshot whether to drop the prepared memstore snapshot
+ * @throws IOException
*/
@Override
public void replayFlush(List<String> fileNames, boolean dropMemstoreSnapshot)
@@ -2463,6 +2465,7 @@ public class HStore implements Store, HeapSize, StoreConfigInformation,
/**
* Abort the snapshot preparation. Drops the snapshot if any.
+ * @throws IOException
*/
@Override
public void abort() throws IOException {
@@ -2717,8 +2720,7 @@ public class HStore implements Store, HeapSize, StoreConfigInformation,
LOG.debug("Moving the files {} to archive", filesToRemove);
// Only if this is successful it has to be removed
try {
- this.fs.removeStoreFiles(this.getColumnFamilyDescriptor().getNameAsString(),
- filesToRemove);
+ this.fs.removeStoreFiles(this.getColumnFamilyDescriptor().getNameAsString(), filesToRemove);
} catch (FailedArchiveException fae) {
// Even if archiving some files failed, we still need to clear out any of the
// files which were successfully archived. Otherwise we will receive a
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java
index 02665a3..7b4fb4a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java
@@ -26,6 +26,7 @@ import static org.apache.hadoop.hbase.regionserver.HStoreFile.MAJOR_COMPACTION_K
import static org.apache.hadoop.hbase.regionserver.HStoreFile.MAX_SEQ_ID_KEY;
import static org.apache.hadoop.hbase.regionserver.HStoreFile.MOB_CELLS_COUNT;
import static org.apache.hadoop.hbase.regionserver.HStoreFile.TIMERANGE_KEY;
+
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.Collection;
@@ -36,13 +37,15 @@ import java.util.UUID;
import java.util.function.Supplier;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
+
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.PrivateCellUtil;
+import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.io.hfile.HFile;
@@ -59,7 +62,9 @@ import org.apache.hadoop.hbase.util.RowPrefixFixedLengthBloomContext;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+
import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
+
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
/**
@@ -89,6 +94,7 @@ public class StoreFileWriter implements CellSink, ShipperListener {
* @param fs file system to write to
* @param path file name to create
* @param conf user configuration
+ * @param comparator key comparator
* @param bloomType bloom filter setting
* @param maxKeys the expected maximum number of keys to be added. Was used
* for Bloom filter size in {@link HFile} format version 1.
@@ -99,14 +105,15 @@ public class StoreFileWriter implements CellSink, ShipperListener {
* @throws IOException problem writing to FS
*/
private StoreFileWriter(FileSystem fs, Path path, final Configuration conf, CacheConfig cacheConf,
- BloomType bloomType, long maxKeys, InetSocketAddress[] favoredNodes, HFileContext fileContext,
- boolean shouldDropCacheBehind, Supplier<Collection<HStoreFile>> compactedFilesSupplier)
- throws IOException {
+ final CellComparator comparator, BloomType bloomType, long maxKeys,
+ InetSocketAddress[] favoredNodes, HFileContext fileContext, boolean shouldDropCacheBehind,
+ Supplier<Collection<HStoreFile>> compactedFilesSupplier) throws IOException {
this.compactedFilesSupplier = compactedFilesSupplier;
this.timeRangeTracker = TimeRangeTracker.create(TimeRangeTracker.Type.NON_SYNC);
// TODO : Change all writers to be specifically created for compaction context
writer = HFile.getWriterFactory(conf, cacheConf)
.withPath(fs, path)
+ .withComparator(comparator)
.withFavoredNodes(favoredNodes)
.withFileContext(fileContext)
.withShouldDropCacheBehind(shouldDropCacheBehind)
@@ -128,16 +135,14 @@ public class StoreFileWriter implements CellSink, ShipperListener {
// init bloom context
switch (bloomType) {
case ROW:
- bloomContext =
- new RowBloomContext(generalBloomFilterWriter, fileContext.getCellComparator());
+ bloomContext = new RowBloomContext(generalBloomFilterWriter, comparator);
break;
case ROWCOL:
- bloomContext =
- new RowColBloomContext(generalBloomFilterWriter, fileContext.getCellComparator());
+ bloomContext = new RowColBloomContext(generalBloomFilterWriter, comparator);
break;
case ROWPREFIX_FIXED_LENGTH:
- bloomContext = new RowPrefixFixedLengthBloomContext(generalBloomFilterWriter,
- fileContext.getCellComparator(), Bytes.toInt(bloomParam));
+ bloomContext = new RowPrefixFixedLengthBloomContext(generalBloomFilterWriter, comparator,
+ Bytes.toInt(bloomParam));
break;
default:
throw new IOException(
@@ -154,8 +159,7 @@ public class StoreFileWriter implements CellSink, ShipperListener {
this.deleteFamilyBloomFilterWriter = BloomFilterFactory
.createDeleteBloomAtWrite(conf, cacheConf,
(int) Math.min(maxKeys, Integer.MAX_VALUE), writer);
- deleteFamilyBloomContext =
- new RowBloomContext(deleteFamilyBloomFilterWriter, fileContext.getCellComparator());
+ deleteFamilyBloomContext = new RowBloomContext(deleteFamilyBloomFilterWriter, comparator);
} else {
deleteFamilyBloomFilterWriter = null;
}
@@ -392,6 +396,7 @@ public class StoreFileWriter implements CellSink, ShipperListener {
}
/**
+ * @param fs
* @param dir Directory to create file in.
* @return random filename inside passed <code>dir</code>
*/
@@ -409,6 +414,7 @@ public class StoreFileWriter implements CellSink, ShipperListener {
private final CacheConfig cacheConf;
private final FileSystem fs;
+ private CellComparator comparator = CellComparator.getInstance();
private BloomType bloomType = BloomType.NONE;
private long maxKeyCount = 0;
private Path dir;
@@ -467,6 +473,12 @@ public class StoreFileWriter implements CellSink, ShipperListener {
return this;
}
+ public Builder withComparator(CellComparator comparator) {
+ Preconditions.checkNotNull(comparator);
+ this.comparator = comparator;
+ return this;
+ }
+
public Builder withBloomType(BloomType bloomType) {
Preconditions.checkNotNull(bloomType);
this.bloomType = bloomType;
@@ -533,7 +545,11 @@ public class StoreFileWriter implements CellSink, ShipperListener {
}
}
- return new StoreFileWriter(fs, filePath, conf, cacheConf, bloomType, maxKeyCount,
+ if (comparator == null) {
+ comparator = CellComparator.getInstance();
+ }
+
+ return new StoreFileWriter(fs, filePath, conf, cacheConf, comparator, bloomType, maxKeyCount,
favoredNodes, fileContext, shouldDropCacheBehind, compactedFilesSupplier);
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredHFilesOutputSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredHFilesOutputSink.java
index 8d69b02..be21ec6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredHFilesOutputSink.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredHFilesOutputSink.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.hbase.wal;
import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME;
+
import java.io.IOException;
import java.io.InterruptedIOException;
import java.util.HashMap;
@@ -28,6 +29,7 @@ import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.atomic.AtomicInteger;
+
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@@ -206,7 +208,7 @@ public class BoundedRecoveredHFilesOutputSink extends OutputSink {
.withFilePath(outputFile);
HFileContextBuilder hFileContextBuilder = new HFileContextBuilder();
if (isMetaTable) {
- hFileContextBuilder.withCellComparator(CellComparatorImpl.META_COMPARATOR);
+ writerBuilder.withComparator(CellComparatorImpl.META_COMPARATOR);
} else {
configContextForNonMetaWriter(tableName, familyName, hFileContextBuilder, writerBuilder);
}
@@ -222,9 +224,9 @@ public class BoundedRecoveredHFilesOutputSink extends OutputSink {
TableDescriptor tableDesc = tableDescCache.get(tableName);
ColumnFamilyDescriptor cfd = tableDesc.getColumnFamily(Bytes.toBytesBinary(familyName));
hFileContextBuilder.withCompression(cfd.getCompressionType()).withBlockSize(cfd.getBlocksize())
- .withCompressTags(cfd.isCompressTags()).withDataBlockEncoding(cfd.getDataBlockEncoding())
- .withCellComparator(CellComparatorImpl.COMPARATOR);
- writerBuilder.withBloomType(cfd.getBloomFilterType());
+ .withCompressTags(cfd.isCompressTags()).withDataBlockEncoding(cfd.getDataBlockEncoding());
+ writerBuilder.withBloomType(cfd.getBloomFilterType())
+ .withComparator(CellComparatorImpl.COMPARATOR);
}
private void checkPathValid(Path outputFile) throws IOException {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java
index 4e9a39f..4c5be46 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java
@@ -366,6 +366,7 @@ public class HFilePerformanceEvaluation {
writer = HFile.getWriterFactoryNoCache(conf)
.withPath(fs, mf)
.withFileContext(hFileContext)
+ .withComparator(CellComparator.getInstance())
.create();
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java
index 3f21f0c..569ede2 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java
@@ -1,4 +1,4 @@
-/*
+/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -206,8 +206,8 @@ public class TestDataBlockEncoders {
.withIncludesTags(includesTags)
.withCompression(Compression.Algorithm.NONE)
.build();
- DataBlockEncoder.EncodedSeeker seeker =
- encoder.createSeeker(encoder.newDataBlockDecodingContext(meta));
+ DataBlockEncoder.EncodedSeeker seeker = encoder.createSeeker(CellComparatorImpl.COMPARATOR,
+ encoder.newDataBlockDecodingContext(meta));
seeker.setCurrentBuffer(new SingleByteBuff(encodedBuffer));
encodedSeekers.add(seeker);
}
@@ -279,8 +279,8 @@ public class TestDataBlockEncoders {
.withIncludesTags(includesTags)
.withCompression(Compression.Algorithm.NONE)
.build();
- DataBlockEncoder.EncodedSeeker seeker =
- encoder.createSeeker(encoder.newDataBlockDecodingContext(meta));
+ DataBlockEncoder.EncodedSeeker seeker = encoder.createSeeker(CellComparatorImpl.COMPARATOR,
+ encoder.newDataBlockDecodingContext(meta));
seeker.setCurrentBuffer(new SingleByteBuff(encodedBuffer));
int i = 0;
do {
@@ -341,8 +341,8 @@ public class TestDataBlockEncoders {
HFileContext meta =
new HFileContextBuilder().withHBaseCheckSum(false).withIncludesMvcc(includesMemstoreTS)
.withIncludesTags(includesTags).withCompression(Compression.Algorithm.NONE).build();
- DataBlockEncoder.EncodedSeeker seeker =
- encoder.createSeeker(encoder.newDataBlockDecodingContext(meta));
+ DataBlockEncoder.EncodedSeeker seeker = encoder.createSeeker(CellComparatorImpl.COMPARATOR,
+ encoder.newDataBlockDecodingContext(meta));
seeker.setCurrentBuffer(new SingleByteBuff(encodedBuffer));
Cell cell = seeker.getCell();
Assert.assertEquals(expectedKV.getLength(), ((KeyValue) cell).getLength());
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestSeekToBlockWithEncoders.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestSeekToBlockWithEncoders.java
index d9f22bc..e7b1426 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestSeekToBlockWithEncoders.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestSeekToBlockWithEncoders.java
@@ -25,6 +25,7 @@ import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseCommonTestingUtility;
import org.apache.hadoop.hbase.HConstants;
@@ -289,8 +290,8 @@ public class TestSeekToBlockWithEncoders {
HFILEBLOCK_DUMMY_HEADER, meta);
ByteBuffer encodedBuffer = TestDataBlockEncoders.encodeKeyValues(encoding, kvs,
encodingContext, this.useOffheapData);
- DataBlockEncoder.EncodedSeeker seeker =
- encoder.createSeeker(encoder.newDataBlockDecodingContext(meta));
+ DataBlockEncoder.EncodedSeeker seeker = encoder.createSeeker(CellComparatorImpl.COMPARATOR,
+ encoder.newDataBlockDecodingContext(meta));
seeker.setCurrentBuffer(new SingleByteBuff(encodedBuffer));
encodedSeekers.add(seeker);
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java
index 609ff9d..cb7042a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.ArrayBackedTag;
+import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseCommonTestingUtility;
import org.apache.hadoop.hbase.HBaseTestingUtility;
@@ -393,7 +394,7 @@ public class TestCacheOnWrite {
.withDataBlockEncoding(NoOpDataBlockEncoder.INSTANCE.getDataBlockEncoding())
.withIncludesTags(useTags).build();
StoreFileWriter sfw = new StoreFileWriter.Builder(conf, cacheConf, fs)
- .withOutputDir(storeFileParentDir)
+ .withOutputDir(storeFileParentDir).withComparator(CellComparatorImpl.COMPARATOR)
.withFileContext(meta)
.withBloomType(BLOOM_TYPE).withMaxKeyCount(NUM_KV).build();
byte[] cf = Bytes.toBytes("fam");
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestFixedFileTrailer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestFixedFileTrailer.java
index 14006a8..d25ce47 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestFixedFileTrailer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestFixedFileTrailer.java
@@ -131,10 +131,6 @@ public class TestFixedFileTrailer {
t.createComparator(KeyValue.META_COMPARATOR.getClass().getName()).getClass());
assertEquals(CellComparatorImpl.MetaCellComparator.class, t.createComparator(
CellComparatorImpl.MetaCellComparator.META_COMPARATOR.getClass().getName()).getClass());
- assertEquals(CellComparatorImpl.META_COMPARATOR.getClass(), t.createComparator(
- CellComparatorImpl.MetaCellComparator.META_COMPARATOR.getClass().getName()).getClass());
- assertEquals(CellComparatorImpl.COMPARATOR.getClass(), t.createComparator(
- CellComparatorImpl.MetaCellComparator.COMPARATOR.getClass().getName()).getClass());
assertNull(t.createComparator(Bytes.BYTES_RAWCOMPARATOR.getClass().getName()));
assertNull(t.createComparator("org.apache.hadoop.hbase.KeyValue$RawBytesComparator"));
} catch (IOException e) {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java
index 1ec5929..e0817ec 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java
@@ -50,6 +50,7 @@ import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellBuilder;
import org.apache.hadoop.hbase.CellBuilderFactory;
import org.apache.hadoop.hbase.CellBuilderType;
+import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseClassTestRule;
@@ -275,7 +276,7 @@ public class TestHFile {
HFileContext meta = new HFileContextBuilder().withBlockSize(64 * 1024).build();
StoreFileWriter sfw =
new StoreFileWriter.Builder(conf, fs).withOutputDir(storeFileParentDir)
- .withFileContext(meta).build();
+ .withComparator(CellComparatorImpl.COMPARATOR).withFileContext(meta).build();
final int rowLen = 32;
Random RNG = new Random();
@@ -358,7 +359,7 @@ public class TestHFile {
.withCompressTags(false)
.build();
HFileWriterImpl writer = new HFileWriterImpl(conf, cacheConf, path, mockedOutputStream,
- fileContext);
+ CellComparator.getInstance(), fileContext);
CellBuilder cellBuilder = CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY);
byte[] row = Bytes.toBytes("foo");
byte[] qualifier = Bytes.toBytes("qualifier");
@@ -514,6 +515,7 @@ public class TestHFile {
Writer writer = HFile.getWriterFactory(conf, cacheConf)
.withOutputStream(fout)
.withFileContext(meta)
+ .withComparator(CellComparatorImpl.COMPARATOR)
.create();
LOG.info(Objects.toString(writer));
writeRecords(writer, useTags);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileReaderImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileReaderImpl.java
index 8c3a632..41568b3 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileReaderImpl.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileReaderImpl.java
@@ -24,6 +24,7 @@ import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.KeyValue;
@@ -65,8 +66,9 @@ public class TestHFileReaderImpl {
HFileContext context =
new HFileContextBuilder().withBlockSize(blocksize).withIncludesTags(true).build();
Configuration conf = TEST_UTIL.getConfiguration();
- HFile.Writer writer = HFile.getWriterFactoryNoCache(conf)
- .withOutputStream(fout).withFileContext(context).create();
+ HFile.Writer writer =
+ HFile.getWriterFactoryNoCache(conf).withOutputStream(fout).withFileContext(context)
+ .withComparator(CellComparatorImpl.COMPARATOR).create();
// 4 bytes * 3 * 2 for each key/value +
// 3 for keys, 15 for values = 42 (woot)
writer.append(toKV("c"));
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileScannerImplReferenceCount.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileScannerImplReferenceCount.java
index e326ecc..47cbd85 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileScannerImplReferenceCount.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileScannerImplReferenceCount.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
@@ -164,7 +165,7 @@ public class TestHFileScannerImplReferenceCount {
.withCompression(compression).withDataBlockEncoding(encoding).build();
try (HFile.Writer writer =
new HFile.WriterFactory(conf, new CacheConfig(conf)).withPath(fs, hfilePath)
- .withFileContext(context).create()) {
+ .withFileContext(context).withComparator(CellComparatorImpl.COMPARATOR).create()) {
Random rand = new Random(9713312); // Just a fixed seed.
for (int i = 0; i < cellCount; ++i) {
byte[] keyBytes = Bytes.add(Bytes.toBytes(i), SUFFIX);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java
index 711c41c..095af0b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java
@@ -27,6 +27,7 @@ import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RawLocalFileSystem;
+import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.KeyValue;
@@ -140,6 +141,7 @@ public class TestHFileSeek extends TestCase {
Writer writer = HFile.getWriterFactoryNoCache(conf)
.withOutputStream(fout)
.withFileContext(context)
+ .withComparator(CellComparatorImpl.COMPARATOR)
.create();
try {
BytesWritable key = new BytesWritable();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java
index 3cec836..4f4d36b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.ArrayBackedTag;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
+import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseCommonTestingUtility;
import org.apache.hadoop.hbase.HBaseTestingUtility;
@@ -131,6 +132,7 @@ public class TestHFileWriterV3 {
HFile.Writer writer = new HFile.WriterFactory(conf, cacheConfig)
.withPath(fs, hfilePath)
.withFileContext(context)
+ .withComparator(CellComparatorImpl.COMPARATOR)
.create();
Random rand = new Random(9713312); // Just a fixed seed.
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java
index da62e88..1365680 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java
@@ -26,6 +26,7 @@ import java.util.Random;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
@@ -144,6 +145,7 @@ public class TestPrefetch {
.build();
StoreFileWriter sfw = new StoreFileWriter.Builder(conf, cacheConf, fs)
.withOutputDir(storeFileParentDir)
+ .withComparator(CellComparatorImpl.COMPARATOR)
.withFileContext(meta)
.build();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java
index de68578..1ae861c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java
@@ -25,6 +25,7 @@ import java.util.List;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.ArrayBackedTag;
+import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
@@ -68,6 +69,8 @@ public class TestReseekTo {
TEST_UTIL.getConfiguration(), cacheConf)
.withOutputStream(fout)
.withFileContext(context)
+ // NOTE: This test is dependent on this deprecated nonstandard comparator
+ .withComparator(CellComparatorImpl.COMPARATOR)
.create();
int numberOfKeys = 1000;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java
index e45383a..9ab1f24 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.ArrayBackedTag;
import org.apache.hadoop.hbase.ByteBufferKeyValue;
import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
@@ -122,7 +123,8 @@ public class TestSeekTo {
.withIncludesTags(true).build();
Configuration conf = TEST_UTIL.getConfiguration();
HFile.Writer writer = HFile.getWriterFactoryNoCache(conf).withOutputStream(fout)
- .withFileContext(context).create();
+ .withFileContext(context)
+ .withComparator(CellComparatorImpl.COMPARATOR).create();
// 4 bytes * 3 * 2 for each key/value +
// 3 for keys, 15 for values = 42 (woot)
writer.append(toKV("c", tagUsage));
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestIgnoreUnknownFamily.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestIgnoreUnknownFamily.java
index 89ec68a..a99ea7a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestIgnoreUnknownFamily.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestIgnoreUnknownFamily.java
@@ -23,7 +23,7 @@ import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
+import org.apache.hadoop.hbase.io.hfile.HFileContext;
import org.apache.hadoop.hbase.master.MasterFileSystem;
import org.apache.hadoop.hbase.regionserver.StoreFileWriter;
import org.apache.hadoop.hbase.testclassification.MasterTests;
@@ -84,7 +84,7 @@ public class TestIgnoreUnknownFamily {
Path familyDir = new Path(regionDir, Bytes.toString(UNKNOWN_FAMILY));
StoreFileWriter writer =
new StoreFileWriter.Builder(mfs.getConfiguration(), mfs.getFileSystem())
- .withOutputDir(familyDir).withFileContext(new HFileContextBuilder().build()).build();
+ .withOutputDir(familyDir).withFileContext(new HFileContext()).build();
writer.close();
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoad.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoad.java
index 599a7a3..0e3fac9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoad.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoad.java
@@ -50,7 +50,7 @@ import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.io.hfile.HFile;
-import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
+import org.apache.hadoop.hbase.io.hfile.HFileContext;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
@@ -279,7 +279,7 @@ public class TestBulkLoad {
FSDataOutputStream out = new FSDataOutputStream(new FileOutputStream(hFileLocation), null);
try {
hFileFactory.withOutputStream(out);
- hFileFactory.withFileContext(new HFileContextBuilder().build());
+ hFileFactory.withFileContext(new HFileContext());
HFile.Writer writer = hFileFactory.create();
try {
writer.append(new KeyValue(CellUtil.createCell(randomBytes,
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoadReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoadReplication.java
index 33ab4e3..063c70b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoadReplication.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoadReplication.java
@@ -335,7 +335,7 @@ public class TestBulkLoadReplication extends TestReplicationBase {
new FSDataOutputStream(new FileOutputStream(hFileLocation), null);
try {
hFileFactory.withOutputStream(out);
- hFileFactory.withFileContext(new HFileContextBuilder().build());
+ hFileFactory.withFileContext(new HFileContext());
HFile.Writer writer = hFileFactory.create();
try {
writer.append(new KeyValue(cellBuilder.build()));
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java
index 93a5237..3da0341 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java
@@ -24,7 +24,9 @@ import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.List;
+import java.util.Objects;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
@@ -38,8 +40,6 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.RegionInfo;
-import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
@@ -58,8 +58,7 @@ import org.slf4j.LoggerFactory;
/**
* TestGet is a medley of tests of get all done up as a single test.
- * It was originally written to test a method since removed, getClosestAtOrBefore
- * but the test is retained because it runs some interesting exercises.
+ * This class
*/
@Category({RegionServerTests.class, MediumTests.class})
public class TestGetClosestAtOrBefore {
@@ -86,6 +85,7 @@ public class TestGetClosestAtOrBefore {
@Test
public void testUsingMetaAndBinary() throws IOException {
+ FileSystem filesystem = FileSystem.get(conf);
Path rootdir = UTIL.getDataTestDirOnTestFS();
// Up flush size else we bind up when we use default catalog flush of 16k.
TableDescriptorBuilder metaBuilder = UTIL.getMetaTableDescriptorBuilder()
@@ -100,14 +100,13 @@ public class TestGetClosestAtOrBefore {
final int last = 128;
final int interval = 2;
for (int i = 0; i <= last; i += interval) {
- RegionInfo hri = RegionInfoBuilder.newBuilder(htd.getTableName())
- .setStartKey(i == 0 ? HConstants.EMPTY_BYTE_ARRAY : Bytes.toBytes((byte)i))
- .setEndKey(i == last ? HConstants.EMPTY_BYTE_ARRAY :
- Bytes.toBytes((byte)i + interval)).build();
+ HRegionInfo hri = new HRegionInfo(htd.getTableName(),
+ i == 0 ? HConstants.EMPTY_BYTE_ARRAY : Bytes.toBytes((byte) i),
+ i == last ? HConstants.EMPTY_BYTE_ARRAY : Bytes.toBytes((byte) i + interval));
+
Put put =
MetaTableAccessor.makePutFromRegionInfo(hri, EnvironmentEdgeManager.currentTime());
put.setDurability(Durability.SKIP_WAL);
- LOG.info("Put {}", put);
mr.put(put);
}
}
@@ -115,7 +114,7 @@ public class TestGetClosestAtOrBefore {
try {
List<Cell> keys = new ArrayList<>();
while (s.next(keys)) {
- LOG.info("Scan {}", keys);
+ LOG.info(Objects.toString(keys));
keys.clear();
}
} finally {
@@ -131,14 +130,13 @@ public class TestGetClosestAtOrBefore {
findRow(mr, 'C', 46, 46);
findRow(mr, 'C', 43, 42);
// Now delete 'C' and make sure I don't get entries from 'B'.
- byte[] firstRowInC = RegionInfo.createRegionName(TableName.valueOf("" + 'C'),
+ byte[] firstRowInC = HRegionInfo.createRegionName(TableName.valueOf("" + 'C'),
HConstants.EMPTY_BYTE_ARRAY, HConstants.ZEROES, false);
- Scan scan = new Scan().withStartRow(firstRowInC);
+ Scan scan = new Scan(firstRowInC);
s = mr.getScanner(scan);
try {
List<Cell> keys = new ArrayList<>();
while (s.next(keys)) {
- LOG.info("Delete {}", keys);
mr.delete(new Delete(CellUtil.cloneRow(keys.get(0))));
keys.clear();
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
index c2e1e3b..7567871 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
@@ -61,7 +61,7 @@ import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.executor.ExecutorService;
import org.apache.hadoop.hbase.io.hfile.HFile;
-import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
+import org.apache.hadoop.hbase.io.hfile.HFileContext;
import org.apache.hadoop.hbase.regionserver.HRegion.FlushResultImpl;
import org.apache.hadoop.hbase.regionserver.HRegion.PrepareFlushResult;
import org.apache.hadoop.hbase.regionserver.throttle.NoLimitThroughputController;
@@ -1655,7 +1655,7 @@ public class TestHRegionReplayEvents {
FSDataOutputStream out = TEST_UTIL.getTestFileSystem().create(testFile);
try {
hFileFactory.withOutputStream(out);
- hFileFactory.withFileContext(new HFileContextBuilder().build());
+ hFileFactory.withFileContext(new HFileContext());
HFile.Writer writer = hFileFactory.create();
try {
writer.append(new KeyValue(CellUtil.createCell(valueBytes, family, valueBytes, 0L,
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerWithBulkload.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerWithBulkload.java
index 94491f9..f30f084 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerWithBulkload.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerWithBulkload.java
@@ -43,7 +43,6 @@ import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.io.hfile.HFileContext;
-import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles;
@@ -162,7 +161,7 @@ public class TestScannerWithBulkload {
Path path = new Path(pathStr);
HFile.WriterFactory wf = HFile.getWriterFactoryNoCache(TEST_UTIL.getConfiguration());
Assert.assertNotNull(wf);
- HFileContext context = new HFileContextBuilder().build();
+ HFileContext context = new HFileContext();
HFile.Writer writer = wf.withPath(fs, path).withFileContext(context).create();
KeyValue kv = new KeyValue(Bytes.toBytes("row1"), Bytes.toBytes("col"), Bytes.toBytes("q"), l,
Bytes.toBytes("version2"));
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScannerClosure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScannerClosure.java
index 63e8360..610d0b1 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScannerClosure.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScannerClosure.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.CellComparator;
+import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
@@ -183,7 +184,7 @@ public class TestStoreScannerClosure {
Path storeFileParentDir = new Path(TEST_UTIL.getDataTestDir(), "TestHFile");
HFileContext meta = new HFileContextBuilder().withBlockSize(64 * 1024).build();
StoreFileWriter sfw = new StoreFileWriter.Builder(CONF, fs).withOutputDir(storeFileParentDir)
- .withFileContext(meta).build();
+ .withComparator(CellComparatorImpl.COMPARATOR).withFileContext(meta).build();
final int rowLen = 32;
Random RNG = new Random();