You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by ap...@apache.org on 2021/10/19 21:24:03 UTC

[hbase] branch branch-2 updated (7e574c9 -> 42ff3ac)

This is an automated email from the ASF dual-hosted git repository.

apurtell pushed a change to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git.


    from 7e574c9  HBASE-26190 High rate logging of BucketAllocatorException: Allocation too big (#3752)
     new b6bb180  HBASE-26316 Per-table or per-CF compression codec setting overrides (#3730)
     new 8ac0b5e  HBASE-26353 Support loadable dictionaries in hbase-compression-zstd (#3748)
     new 42ff3ac  HBASE-26349 Improve recent change to IntegrationTestLoadCommonCrawl (#3744)

The 3 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../{CompressionUtil.java => CanReinit.java}       |  19 +--
 .../hadoop/hbase/io/compress/Compression.java      |  34 ----
 .../hadoop/hbase/io/compress/CompressionUtil.java  |   2 +
 .../hadoop/hbase/io/compress/DictionaryCache.java  | 164 ++++++++++++++++++
 .../io/encoding/AbstractDataBlockEncoder.java      |  11 +-
 .../hadoop/hbase/io/encoding/DataBlockEncoder.java |  10 +-
 .../hadoop/hbase/io/encoding/EncodedDataBlock.java |  16 +-
 .../encoding/HFileBlockDefaultDecodingContext.java |  27 ++-
 .../encoding/HFileBlockDefaultEncodingContext.java |  16 +-
 .../hbase/io/compress/CompressionTestBase.java     |  69 +++++---
 .../compress/aircompressor/HadoopCompressor.java   |   3 +-
 .../aircompressor/TestHFileCompressionLz4.java     |  10 +-
 .../aircompressor/TestHFileCompressionLzo.java     |  10 +-
 .../aircompressor/TestHFileCompressionSnappy.java  |  10 +-
 .../aircompressor/TestHFileCompressionZstd.java    |  10 +-
 .../hbase/io/compress/lz4/Lz4Compressor.java       |   3 +-
 .../io/compress/lz4/TestHFileCompressionLz4.java   |  10 +-
 .../hbase/io/compress/xerial/SnappyCompressor.java |   3 +-
 .../xerial/TestHFileCompressionSnappy.java         |  10 +-
 .../io/compress/xz/TestHFileCompressionLzma.java   |  29 +++-
 .../hadoop/hbase/io/compress/zstd/ZstdCodec.java   |  15 +-
 .../hbase/io/compress/zstd/ZstdCompressor.java     |  36 +++-
 .../hbase/io/compress/zstd/ZstdDecompressor.java   |  59 +++++--
 .../io/compress/zstd/TestHFileCompressionZstd.java |  29 +++-
 .../hbase/io/compress/zstd/TestZstdCodec.java      |  68 +++++++-
 .../src/test/resources/zstd.test.data              | Bin 0 -> 1024000 bytes
 .../src/test/resources/zstd.test.dict              | Bin 0 -> 112640 bytes
 .../hbase/test/IntegrationTestLoadCommonCrawl.java | 184 ++++++++++++---------
 .../hadoop/hbase/mapreduce/HFileInputFormat.java   |   2 +-
 .../mapreduce/TestCellBasedHFileOutputFormat2.java |   2 +-
 .../hbase/mapreduce/TestHFileOutputFormat2.java    |   4 +-
 .../TestImportTSVWithVisibilityLabels.java         |   2 +-
 .../hadoop/hbase/mapreduce/TestImportTsv.java      |   2 +-
 .../org/apache/hadoop/hbase/io/hfile/HFile.java    |   5 +-
 .../apache/hadoop/hbase/io/hfile/HFileBlock.java   |  31 ++--
 .../hbase/io/hfile/HFileDataBlockEncoder.java      |  11 +-
 .../hbase/io/hfile/HFileDataBlockEncoderImpl.java  |  14 +-
 .../hadoop/hbase/io/hfile/HFilePrettyPrinter.java  |   2 +-
 .../hadoop/hbase/io/hfile/HFileReaderImpl.java     |  23 +--
 .../hadoop/hbase/io/hfile/HFileWriterImpl.java     |   2 +-
 .../hbase/io/hfile/NoOpDataBlockEncoder.java       |  10 +-
 .../store/region/HFileProcedurePrettyPrinter.java  |   2 +-
 .../apache/hadoop/hbase/regionserver/HStore.java   |   2 +-
 .../hadoop/hbase/regionserver/StoreFileReader.java |   9 +-
 .../apache/hadoop/hbase/util/CompressionTest.java  |   2 +-
 .../hadoop/hbase/HFilePerformanceEvaluation.java   |   8 +-
 .../hadoop/hbase/io/compress/HFileTestBase.java    |  26 +--
 .../hbase/io/encoding/TestDataBlockEncoders.java   |  45 ++---
 .../io/encoding/TestSeekToBlockWithEncoders.java   |  16 +-
 .../hadoop/hbase/io/hfile/TestCacheOnWrite.java    |   2 +-
 .../apache/hadoop/hbase/io/hfile/TestChecksum.java |  29 ++--
 .../apache/hadoop/hbase/io/hfile/TestHFile.java    |   4 +-
 .../hadoop/hbase/io/hfile/TestHFileBlock.java      |  42 +++--
 .../hadoop/hbase/io/hfile/TestHFileBlockIndex.java |   8 +-
 .../hbase/io/hfile/TestHFileDataBlockEncoder.java  |  26 +--
 .../hadoop/hbase/io/hfile/TestHFileEncryption.java |  14 +-
 .../TestHFileInlineToRootChunkConversion.java      |   2 +-
 .../hadoop/hbase/io/hfile/TestHFileReaderImpl.java |   6 +-
 .../hfile/TestHFileScannerImplReferenceCount.java  |   6 +-
 .../hadoop/hbase/io/hfile/TestHFileSeek.java       |   2 +-
 .../hadoop/hbase/io/hfile/TestHFileWriterV3.java   |   2 +-
 .../hfile/TestHFileWriterV3WithDataEncoders.java   |   4 +-
 .../apache/hadoop/hbase/io/hfile/TestReseekTo.java |   2 +-
 .../io/hfile/TestSeekBeforeWithInlineBlocks.java   |   2 +-
 .../apache/hadoop/hbase/io/hfile/TestSeekTo.java   |   6 +-
 .../hbase/regionserver/DataBlockEncodingTool.java  |   8 +-
 .../regionserver/TestCacheOnWriteInSchema.java     |   2 +-
 .../hbase/tool/TestLoadIncrementalHFiles.java      |   2 +-
 68 files changed, 857 insertions(+), 389 deletions(-)
 copy hbase-common/src/main/java/org/apache/hadoop/hbase/io/compress/{CompressionUtil.java => CanReinit.java} (73%)
 create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/io/compress/DictionaryCache.java
 create mode 100644 hbase-compression/hbase-compression-zstd/src/test/resources/zstd.test.data
 create mode 100644 hbase-compression/hbase-compression-zstd/src/test/resources/zstd.test.dict

[hbase] 02/03: HBASE-26353 Support loadable dictionaries in hbase-compression-zstd (#3748)

Posted by ap...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

apurtell pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 8ac0b5ed7fd499d22a74993d03991d9e477df7ae
Author: Andrew Purtell <ap...@apache.org>
AuthorDate: Tue Oct 19 13:37:24 2021 -0700

    HBASE-26353 Support loadable dictionaries in hbase-compression-zstd (#3748)
    
    ZStandard supports initialization of compressors and decompressors with a
    precomputed dictionary, which can dramatically improve and speed up compression
    of tables with small values. For more details, please see
    
      The Case For Small Data Compression
      https://github.com/facebook/zstd#the-case-for-small-data-compression
    
    Signed-off-by: Duo Zhang <zh...@apache.org>
---
 .../hadoop/hbase/io/compress/CompressionUtil.java  |   2 +
 .../hadoop/hbase/io/compress/DictionaryCache.java  | 164 +++++++++++++++++++++
 .../hbase/io/compress/CompressionTestBase.java     |  69 ++++++---
 .../hadoop/hbase/io/compress/zstd/ZstdCodec.java   |  15 +-
 .../hbase/io/compress/zstd/ZstdCompressor.java     |  23 ++-
 .../hbase/io/compress/zstd/ZstdDecompressor.java   |  24 ++-
 .../hbase/io/compress/zstd/TestZstdCodec.java      |  68 ++++++++-
 .../src/test/resources/zstd.test.data              | Bin 0 -> 1024000 bytes
 .../src/test/resources/zstd.test.dict              | Bin 0 -> 112640 bytes
 9 files changed, 331 insertions(+), 34 deletions(-)

diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/compress/CompressionUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/compress/CompressionUtil.java
index becff76..1ebf124 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/compress/CompressionUtil.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/compress/CompressionUtil.java
@@ -21,6 +21,8 @@ import org.apache.yetus.audience.InterfaceAudience;
 @InterfaceAudience.Private
 public class CompressionUtil {
 
+  private CompressionUtil() { }
+
   /**
    * Round up to the next power of two, unless the value would become negative (ints
    * are signed), in which case just return Integer.MAX_VALUE.
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/compress/DictionaryCache.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/compress/DictionaryCache.java
new file mode 100644
index 0000000..5e19e93
--- /dev/null
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/compress/DictionaryCache.java
@@ -0,0 +1,164 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.hbase.io.compress;
+
+import java.io.ByteArrayOutputStream;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.apache.hbase.thirdparty.com.google.common.cache.CacheBuilder;
+import org.apache.hbase.thirdparty.com.google.common.cache.CacheLoader;
+import org.apache.hbase.thirdparty.com.google.common.cache.LoadingCache;
+
+/**
+ * A utility class for managing compressor/decompressor dictionary loading and caching of load
+ * results. Useful for any codec that can support changing dictionaries at runtime,
+ * such as ZStandard.
+ */
+@InterfaceAudience.Private
+public class DictionaryCache {
+
+  public static final String DICTIONARY_MAX_SIZE_KEY = "hbase.io.compress.dictionary.max.size";
+  public static final int DEFAULT_DICTIONARY_MAX_SIZE = 10 * 1024 * 1024;
+  public static final String RESOURCE_SCHEME = "resource://";
+
+  private static final Logger LOG = LoggerFactory.getLogger(DictionaryCache.class);
+  private static volatile LoadingCache<String, byte[]> CACHE;
+
+  private DictionaryCache() { }
+
+  /**
+   * Load a dictionary or return a previously cached load.
+   * @param conf configuration
+   * @param path the hadoop Path where the dictionary is located, as a String
+   * @return the dictionary bytes if successful, null otherwise
+   */
+  public static byte[] getDictionary(final Configuration conf, final String path)
+      throws IOException {
+    if (path == null || path.isEmpty()) {
+      return null;
+    }
+    // Create the dictionary loading cache if we haven't already
+    if (CACHE == null) {
+      synchronized (DictionaryCache.class) {
+        if (CACHE == null) {
+          final int maxSize = conf.getInt(DICTIONARY_MAX_SIZE_KEY, DEFAULT_DICTIONARY_MAX_SIZE);
+          CACHE = CacheBuilder.newBuilder()
+            .maximumSize(100)
+            .expireAfterAccess(10, TimeUnit.MINUTES)
+            .build(
+              new CacheLoader<String, byte[]>() {
+                public byte[] load(String s) throws Exception {
+                  byte[] bytes;
+                  if (path.startsWith(RESOURCE_SCHEME)) {
+                    bytes = loadFromResource(conf, path, maxSize);
+                  } else {
+                    bytes = loadFromHadoopFs(conf, path, maxSize);
+                  }
+                  LOG.info("Loaded dictionary from {} (size {})", s, bytes.length);
+                  return bytes;
+                }
+              });
+        }
+      }
+    }
+
+    // Get or load the dictionary for the given path
+    try {
+      return CACHE.get(path);
+    } catch (ExecutionException e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  // Visible for testing
+  public static byte[] loadFromResource(final Configuration conf, final String s,
+      final int maxSize) throws IOException {
+    if (!s.startsWith(RESOURCE_SCHEME)) {
+      throw new IllegalArgumentException("Path does not start with " + RESOURCE_SCHEME);
+    }
+    final String path = s.substring(RESOURCE_SCHEME.length(), s.length());
+    final InputStream in = DictionaryCache.class.getClassLoader().getResourceAsStream(path);
+    if (in == null) {
+      throw new FileNotFoundException("Resource " + path + " not found");
+    }
+    final ByteArrayOutputStream baos = new ByteArrayOutputStream();
+    final byte[] buffer = new byte[8192];
+    try {
+      int n, len = 0;
+      do {
+        n = in.read(buffer);
+        if (n > 0) {
+          len += n;
+          if (len > maxSize) {
+            throw new IllegalArgumentException("Dictionary " + s + " is too large" +
+              ", limit=" + maxSize);
+          }
+          baos.write(buffer, 0, n);
+        }
+      } while (n > 0);
+    } finally {
+      in.close();
+    }
+    return baos.toByteArray();
+  }
+
+  private static byte[] loadFromHadoopFs(final Configuration conf, final String s,
+      final int maxSize) throws IOException {
+    final Path path = new Path(s);
+    final FileSystem fs = FileSystem.get(path.toUri(), conf);
+    final FileStatus stat = fs.getFileStatus(path);
+    if (!stat.isFile()) {
+      throw new IllegalArgumentException(s + " is not a file");
+    }
+    if (stat.getLen() > maxSize) {
+      throw new IllegalArgumentException("Dictionary " + s + " is too large" +
+        ", size=" + stat.getLen() + ", limit=" + maxSize);
+    }
+    final ByteArrayOutputStream baos = new ByteArrayOutputStream();
+    final byte[] buffer = new byte[8192];
+    try (final FSDataInputStream in = fs.open(path)) {
+      int n;
+      do {
+        n = in.read(buffer);
+        if (n > 0) {
+          baos.write(buffer, 0, n);
+        }
+      } while (n > 0);
+    }
+    return baos.toByteArray();
+  }
+
+  // Visible for testing
+  public static boolean contains(String dictionaryPath) {
+    if (CACHE != null) {
+      return CACHE.asMap().containsKey(dictionaryPath);
+    }
+    return false;
+  }
+
+}
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/compress/CompressionTestBase.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/compress/CompressionTestBase.java
index 616bf0b..e259e1b 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/compress/CompressionTestBase.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/compress/CompressionTestBase.java
@@ -17,12 +17,10 @@
 package org.apache.hadoop.hbase.io.compress;
 
 import static org.junit.Assert.assertTrue;
-
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
 import java.util.Arrays;
 import java.util.Random;
-
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
@@ -31,6 +29,8 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.compress.CompressionCodec;
 import org.apache.hadoop.io.compress.CompressionInputStream;
 import org.apache.hadoop.io.compress.CompressionOutputStream;
+import org.apache.hadoop.io.compress.Compressor;
+import org.apache.hadoop.io.compress.Decompressor;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -39,11 +39,11 @@ public class CompressionTestBase {
 
   protected static final Logger LOG = LoggerFactory.getLogger(CompressionTestBase.class);
 
-  static final int LARGE_SIZE = 10 * 1024 * 1024;
-  static final int VERY_LARGE_SIZE = 100 * 1024 * 1024;
-  static final int BLOCK_SIZE = 4096;
+  protected static final int LARGE_SIZE = 10 * 1024 * 1024;
+  protected static final int VERY_LARGE_SIZE = 100 * 1024 * 1024;
+  protected static final int BLOCK_SIZE = 4096;
 
-  static final byte[] SMALL_INPUT;
+  protected static final byte[] SMALL_INPUT;
   static {
     // 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597
     SMALL_INPUT = new byte[1+1+2+3+5+8+13+21+34+55+89+144+233+377+610+987+1597];
@@ -67,15 +67,21 @@ public class CompressionTestBase {
     Arrays.fill(SMALL_INPUT, off, (off+=1597), (byte)'Q');
   }
 
-  protected void codecTest(final CompressionCodec codec, final byte[][] input)
-      throws Exception {
+  protected void codecTest(final CompressionCodec codec, final byte[][] input) throws Exception {
+    codecTest(codec, input, null);
+  }
+
+  protected void codecTest(final CompressionCodec codec, final byte[][] input,
+      final Integer expectedCompressedSize) throws Exception {
     // We do this in Compression.java
     ((Configurable)codec).getConf().setInt("io.file.buffer.size", 32 * 1024);
     // Compress
+    long start = EnvironmentEdgeManager.currentTime();
+    Compressor compressor = codec.createCompressor();
+    compressor.reinit(((Configurable)codec).getConf());
     ByteArrayOutputStream baos = new ByteArrayOutputStream();
-    CompressionOutputStream out = codec.createOutputStream(baos);
+    CompressionOutputStream out = codec.createOutputStream(baos, compressor);
     int inLen = 0;
-    long start = EnvironmentEdgeManager.currentTime();
     for (int i = 0; i < input.length; i++) {
       out.write(input[i]);
       inLen += input[i].length;
@@ -85,9 +91,18 @@ public class CompressionTestBase {
     final byte[] compressed = baos.toByteArray();
     LOG.info("{} compressed {} bytes to {} bytes in {} ms", codec.getClass().getSimpleName(),
       inLen, compressed.length, end - start);
+    if (expectedCompressedSize != null) {
+      assertTrue("Expected compressed size does not match: (expected=" + expectedCompressedSize +
+        ", actual=" + compressed.length + ")", expectedCompressedSize == compressed.length);
+    }
     // Decompress
     final byte[] plain = new byte[inLen];
-    CompressionInputStream in = codec.createInputStream(new ByteArrayInputStream(compressed));
+    Decompressor decompressor = codec.createDecompressor();
+    if (decompressor instanceof CanReinit) {
+      ((CanReinit)decompressor).reinit(((Configurable)codec).getConf());
+    }
+    CompressionInputStream in = codec.createInputStream(new ByteArrayInputStream(compressed),
+      decompressor);
     start = EnvironmentEdgeManager.currentTime();
     IOUtils.readFully(in, plain, 0, plain.length);
     in.close();
@@ -113,29 +128,37 @@ public class CompressionTestBase {
   /**
    * Test with a large input (1MB) divided into blocks of 4KB.
    */
-  protected void codecLargeTest(final CompressionCodec codec, final double sigma) throws Exception {
-    RandomDistribution.DiscreteRNG zipf =
+  protected void codecLargeTest(final CompressionCodec codec, final double sigma)
+      throws Exception {
+    RandomDistribution.DiscreteRNG rng =
       new RandomDistribution.Zipf(new Random(), 0, Byte.MAX_VALUE, sigma);
     final byte[][] input = new byte[LARGE_SIZE/BLOCK_SIZE][BLOCK_SIZE];
-    for (int i = 0; i < input.length; i++) {
-      for (int j = 0; j < input[i].length; j++) {
-        input[i][j] = (byte)zipf.nextInt();
-      }
-    }
+    fill(rng, input);
     codecTest(codec, input);
   }
 
   /**
    * Test with a very large input (100MB) as a single input buffer.
    */
-  protected void codecVeryLargeTest(final CompressionCodec codec, final double sigma) throws Exception {
-    RandomDistribution.DiscreteRNG zipf =
+  protected void codecVeryLargeTest(final CompressionCodec codec, final double sigma)
+      throws Exception {
+    RandomDistribution.DiscreteRNG rng =
         new RandomDistribution.Zipf(new Random(), 0, Byte.MAX_VALUE, sigma);
     final byte[][] input = new byte[1][VERY_LARGE_SIZE];
-    for (int i = 0; i < VERY_LARGE_SIZE; i++) {
-      input[0][i] = (byte)zipf.nextInt();
-    }
+    fill(rng, input);
     codecTest(codec, input);
   }
 
+  protected static void fill(RandomDistribution.DiscreteRNG rng, byte[][] input) {
+    for (int i = 0; i < input.length; i++) {
+      fill(rng, input[i]);
+    }
+  }
+
+  protected static void fill(RandomDistribution.DiscreteRNG rng, byte[] input) {
+    for (int i = 0; i < input.length; i++) {
+      input[i] = (byte) rng.nextInt();
+    }
+  }
+
 }
diff --git a/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCodec.java b/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCodec.java
index f933896..62a1189 100644
--- a/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCodec.java
+++ b/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCodec.java
@@ -23,6 +23,7 @@ import java.io.OutputStream;
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.hbase.io.compress.DictionaryCache;
 import org.apache.hadoop.io.compress.BlockCompressorStream;
 import org.apache.hadoop.io.compress.BlockDecompressorStream;
 import org.apache.hadoop.io.compress.CompressionCodec;
@@ -42,6 +43,7 @@ public class ZstdCodec implements Configurable, CompressionCodec {
 
   public static final String ZSTD_LEVEL_KEY = "hbase.io.compress.zstd.level";
   public static final String ZSTD_BUFFER_SIZE_KEY = "hbase.io.compress.zstd.buffersize";
+  public static final String ZSTD_DICTIONARY_KEY = "hbase.io.compress.zstd.dictionary";
 
   private Configuration conf;
 
@@ -61,12 +63,12 @@ public class ZstdCodec implements Configurable, CompressionCodec {
 
   @Override
   public Compressor createCompressor() {
-    return new ZstdCompressor(getLevel(conf), getBufferSize(conf));
+    return new ZstdCompressor(getLevel(conf), getBufferSize(conf), getDictionary(conf));
   }
 
   @Override
   public Decompressor createDecompressor() {
-    return new ZstdDecompressor(getBufferSize(conf));
+    return new ZstdDecompressor(getBufferSize(conf), getDictionary(conf));
   }
 
   @Override
@@ -124,4 +126,13 @@ public class ZstdCodec implements Configurable, CompressionCodec {
     return size > 0 ? size : 256 * 1024; // Don't change this default
   }
 
+  static byte[] getDictionary(final Configuration conf) {
+    String path = conf.get(ZSTD_DICTIONARY_KEY);
+    try {
+      return DictionaryCache.getDictionary(conf, path);
+    } catch (IOException e) {
+      throw new RuntimeException("Unable to load dictionary at " + path, e);
+    }
+  }
+
 }
diff --git a/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCompressor.java b/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCompressor.java
index 16ec438..b25a5d2 100644
--- a/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCompressor.java
+++ b/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCompressor.java
@@ -28,6 +28,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.github.luben.zstd.Zstd;
+import com.github.luben.zstd.ZstdDictCompress;
 
 /**
  * Hadoop compressor glue for zstd-jni.
@@ -40,13 +41,21 @@ public class ZstdCompressor implements CanReinit, Compressor {
   protected ByteBuffer inBuf, outBuf;
   protected boolean finish, finished;
   protected long bytesRead, bytesWritten;
+  protected ZstdDictCompress dict;
 
-  ZstdCompressor(final int level, final int bufferSize) {
+  ZstdCompressor(final int level, final int bufferSize, final byte[] dictionary) {
     this.level = level;
     this.bufferSize = bufferSize;
     this.inBuf = ByteBuffer.allocateDirect(bufferSize);
     this.outBuf = ByteBuffer.allocateDirect(bufferSize);
     this.outBuf.position(bufferSize);
+    if (dictionary != null) {
+      this.dict = new ZstdDictCompress(dictionary, level);
+    }
+  }
+
+  ZstdCompressor(final int level, final int bufferSize) {
+    this(level, bufferSize, null);
   }
 
   @Override
@@ -74,7 +83,12 @@ public class ZstdCompressor implements CanReinit, Compressor {
         } else {
           outBuf.clear();
         }
-        int written = Zstd.compress(outBuf, inBuf, level);
+        int written;
+        if (dict != null) {
+          written = Zstd.compress(outBuf, inBuf, dict);
+        } else {
+          written = Zstd.compress(outBuf, inBuf, level);
+        }
         bytesWritten += written;
         inBuf.clear();
         LOG.trace("compress: compressed {} -> {} (level {})", uncompressed, written, level);
@@ -133,6 +147,11 @@ public class ZstdCompressor implements CanReinit, Compressor {
     if (conf != null) {
       // Level might have changed
       level = ZstdCodec.getLevel(conf);
+      // Dictionary may have changed
+      byte[] b = ZstdCodec.getDictionary(conf);
+      if (b != null) {
+        dict = new ZstdDictCompress(b, level);
+      }
       // Buffer size might have changed
       int newBufferSize = ZstdCodec.getBufferSize(conf);
       if (bufferSize != newBufferSize) {
diff --git a/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdDecompressor.java b/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdDecompressor.java
index a3d77f5..473eac7 100644
--- a/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdDecompressor.java
+++ b/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdDecompressor.java
@@ -26,6 +26,7 @@ import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import com.github.luben.zstd.Zstd;
+import com.github.luben.zstd.ZstdDictDecompress;
 
 /**
  * Hadoop decompressor glue for zstd-java.
@@ -38,12 +39,20 @@ public class ZstdDecompressor implements CanReinit, Decompressor {
   protected int bufferSize;
   protected int inLen;
   protected boolean finished;
+  protected ZstdDictDecompress dict;
 
-  ZstdDecompressor(final int bufferSize) {
+  ZstdDecompressor(final int bufferSize, final byte[] dictionary) {
     this.bufferSize = bufferSize;
     this.inBuf = ByteBuffer.allocateDirect(bufferSize);
     this.outBuf = ByteBuffer.allocateDirect(bufferSize);
     this.outBuf.position(bufferSize);
+    if (dictionary != null) {
+      this.dict = new ZstdDictDecompress(dictionary);
+    }
+  }
+
+  ZstdDecompressor(final int bufferSize) {
+    this(bufferSize, null);
   }
 
   @Override
@@ -60,7 +69,11 @@ public class ZstdDecompressor implements CanReinit, Decompressor {
       inLen -= remaining;
       outBuf.clear();
       int written;
-      written = Zstd.decompress(outBuf, inBuf);
+      if (dict != null) {
+        written = Zstd.decompress(outBuf, inBuf, dict);
+      } else {
+        written = Zstd.decompress(outBuf, inBuf);
+      }
       inBuf.clear();
       LOG.trace("decompress: decompressed {} -> {}", remaining, written);
       outBuf.flip();
@@ -117,7 +130,7 @@ public class ZstdDecompressor implements CanReinit, Decompressor {
   @Override
   public void setDictionary(final byte[] b, final int off, final int len) {
     LOG.trace("setDictionary: off={} len={}", off, len);
-    throw new UnsupportedOperationException("setDictionary not supported");
+    this.dict = new ZstdDictDecompress(b, off, len);
   }
 
   @Override
@@ -143,6 +156,11 @@ public class ZstdDecompressor implements CanReinit, Decompressor {
   public void reinit(final Configuration conf) {
     LOG.trace("reinit");
     if (conf != null) {
+      // Dictionary may have changed
+      byte[] b = ZstdCodec.getDictionary(conf);
+      if (b != null) {
+        dict = new ZstdDictDecompress(b);
+      }
       // Buffer size might have changed
       int newBufferSize = ZstdCodec.getBufferSize(conf);
       if (bufferSize != newBufferSize) {
diff --git a/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestZstdCodec.java b/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestZstdCodec.java
index 6bcb2aa..7745c9a 100644
--- a/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestZstdCodec.java
+++ b/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestZstdCodec.java
@@ -16,11 +16,20 @@
  */
 package org.apache.hadoop.hbase.io.compress.zstd;
 
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.util.Random;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.io.compress.CompressionTestBase;
+import org.apache.hadoop.hbase.io.compress.DictionaryCache;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.RandomDistribution;
 import org.junit.ClassRule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
@@ -33,20 +42,20 @@ public class TestZstdCodec extends CompressionTestBase {
       HBaseClassTestRule.forClass(TestZstdCodec.class);
 
   @Test
-  public void testzstdCodecSmall() throws Exception {
+  public void testZstdCodecSmall() throws Exception {
     codecSmallTest(new ZstdCodec());
   }
 
   @Test
-  public void testzstdCodecLarge() throws Exception {
+  public void testZstdCodecLarge() throws Exception {
     codecLargeTest(new ZstdCodec(), 1.1); // poor compressability
     codecLargeTest(new ZstdCodec(),   2);
     codecLargeTest(new ZstdCodec(),  10); // very high compressability
   }
 
   @Test
-  public void testzstdCodecVeryLarge() throws Exception {
-    Configuration conf = new Configuration();
+  public void testZstdCodecVeryLarge() throws Exception {
+    Configuration conf = HBaseConfiguration.create();
     // ZStandard levels range from 1 to 22.
     // Level 22 might take up to a minute to complete. 3 is the Hadoop default, and will be fast.
     conf.setInt(CommonConfigurationKeys.IO_COMPRESSION_CODEC_ZSTD_LEVEL_KEY, 3);
@@ -55,4 +64,55 @@ public class TestZstdCodec extends CompressionTestBase {
     codecVeryLargeTest(codec, 3); // like text
   }
 
+  @Test
+  public void testZstdCodecWithDictionary() throws Exception {
+    // zstd.test.data compressed with zstd.test.dict at level 3 will produce a result of
+    // 365533 bytes
+    final int expectedCompressedSize = 365533;
+    Configuration conf = HBaseConfiguration.create();
+    conf.setInt(CommonConfigurationKeys.IO_COMPRESSION_CODEC_ZSTD_LEVEL_KEY, 3);
+    // Configure for dictionary available in test resources
+    final String dictionaryPath = DictionaryCache.RESOURCE_SCHEME + "zstd.test.dict";
+    conf.set(ZstdCodec.ZSTD_DICTIONARY_KEY, dictionaryPath);
+    // Load test data from test resources
+    // This will throw an IOException if the test data cannot be loaded
+    final byte[] testData = DictionaryCache.loadFromResource(conf,
+      DictionaryCache.RESOURCE_SCHEME + "zstd.test.data", /* maxSize */ 1024*1024);
+    assertNotNull("Failed to load test data", testData);
+    // Run the test
+    // This will throw an IOException of some kind if there is a problem loading or using the
+    // dictionary.
+    ZstdCodec codec = new ZstdCodec();
+    codec.setConf(conf);
+    codecTest(codec, new byte[][] { testData }, expectedCompressedSize);
+    // Assert that the dictionary was actually loaded
+    assertTrue("Dictionary was not loaded by codec", DictionaryCache.contains(dictionaryPath));
+  }
+
+  //
+  // For generating the test data in src/test/resources/
+  //
+
+  public static void main(String[] args) throws IOException {
+    // Write 1000 1k blocks for training to the specified file
+    // Train with:
+    //   zstd --train-fastcover=k=32,b=8 -B1024 -o <dictionary_file> <input_file>
+    if (args.length < 1) {
+      System.err.println("Usage: TestZstdCodec <outFile>");
+      System.exit(-1);
+    }
+    final RandomDistribution.DiscreteRNG rng =
+      new RandomDistribution.Zipf(new Random(), 0, Byte.MAX_VALUE, 2);
+    final File outFile = new File(args[0]);
+    final byte[] buffer = new byte[1024];
+    System.out.println("Generating " + outFile);
+    try (FileOutputStream os = new FileOutputStream(outFile)) {
+      for (int i = 0; i < 1000; i++) {
+        fill(rng, buffer);
+        os.write(buffer);
+      }
+    }
+    System.out.println("Done");
+  }
+
 }
diff --git a/hbase-compression/hbase-compression-zstd/src/test/resources/zstd.test.data b/hbase-compression/hbase-compression-zstd/src/test/resources/zstd.test.data
new file mode 100644
index 0000000..a497af5
Binary files /dev/null and b/hbase-compression/hbase-compression-zstd/src/test/resources/zstd.test.data differ
diff --git a/hbase-compression/hbase-compression-zstd/src/test/resources/zstd.test.dict b/hbase-compression/hbase-compression-zstd/src/test/resources/zstd.test.dict
new file mode 100644
index 0000000..e5b79d2
Binary files /dev/null and b/hbase-compression/hbase-compression-zstd/src/test/resources/zstd.test.dict differ

[hbase] 03/03: HBASE-26349 Improve recent change to IntegrationTestLoadCommonCrawl (#3744)

Posted by ap...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

apurtell pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 42ff3ac22ea3050a290be8049e6ffc28754bdc53
Author: Andrew Purtell <ap...@apache.org>
AuthorDate: Tue Oct 19 13:45:55 2021 -0700

    HBASE-26349 Improve recent change to IntegrationTestLoadCommonCrawl (#3744)
    
    Use a hybrid logical clock for timestamping entries.
    
    Using BufferedMutator without HLC was not good because we assign client timestamps,
    and the store loop is fast enough that on rare occasion two temporally adjacent URLs
    in the set of WARCs are equivalent and the timestamp does not advance, leading later
    to a rare false positive CORRUPT finding.
    
    While making changes, support direct S3N paths as input paths on the command line.
    
    Signed-off-by: Viraj Jasani <vj...@apache.org>
---
 .../hbase/test/IntegrationTestLoadCommonCrawl.java | 184 ++++++++++++---------
 1 file changed, 104 insertions(+), 80 deletions(-)

diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestLoadCommonCrawl.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestLoadCommonCrawl.java
index d98cb8f..7681da0 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestLoadCommonCrawl.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestLoadCommonCrawl.java
@@ -26,12 +26,12 @@ import java.io.InputStreamReader;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.nio.charset.StandardCharsets;
+import java.util.ArrayList;
 import java.util.HashSet;
-import java.util.LinkedList;
 import java.util.List;
 import java.util.Set;
+import java.util.concurrent.atomic.AtomicLong;
 import java.util.zip.GZIPInputStream;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.FSDataInputStream;
@@ -84,7 +84,6 @@ import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-
 import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine;
 
 /**
@@ -163,17 +162,17 @@ public class IntegrationTestLoadCommonCrawl extends IntegrationTestBase {
     REFERENCED, UNREFERENCED, CORRUPT
   }
 
-  Path warcFileInputDir = null;
-  Path outputDir = null;
-  String[] args;
+  protected Path warcFileInputDir = null;
+  protected Path outputDir = null;
+  protected String[] args;
 
-  protected int runLoader(Path warcFileInputDir, Path outputDir) throws Exception {
+  protected int runLoader(final Path warcFileInputDir, final Path outputDir) throws Exception {
     Loader loader = new Loader();
     loader.setConf(conf);
     return loader.run(warcFileInputDir, outputDir);
   }
 
-  protected int runVerify(Path inputDir) throws Exception {
+  protected int runVerify(final Path inputDir) throws Exception {
     Verify verify = new Verify();
     verify.setConf(conf);
     return verify.run(inputDir);
@@ -208,7 +207,7 @@ public class IntegrationTestLoadCommonCrawl extends IntegrationTestBase {
   }
 
   @Override
-  protected void processOptions(CommandLine cmd) {
+  protected void processOptions(final CommandLine cmd) {
     processBaseOptions(cmd);
     args = cmd.getArgs();
   }
@@ -232,7 +231,7 @@ public class IntegrationTestLoadCommonCrawl extends IntegrationTestBase {
     }
   }
 
-  static TableName getTablename(Configuration c) {
+  static TableName getTablename(final Configuration c) {
     return TableName.valueOf(c.get(TABLE_NAME_KEY, DEFAULT_TABLE_NAME));
   }
 
@@ -421,7 +420,7 @@ public class IntegrationTestLoadCommonCrawl extends IntegrationTestBase {
     private static final Logger LOG = LoggerFactory.getLogger(Loader.class);
     private static final String USAGE = "Loader <warInputDir | warFileList> <outputDir>";
 
-    void createSchema(TableName tableName) throws IOException {
+    void createSchema(final TableName tableName) throws IOException {
 
       try (Connection conn = ConnectionFactory.createConnection(getConf());
            Admin admin = conn.getAdmin()) {
@@ -477,24 +476,24 @@ public class IntegrationTestLoadCommonCrawl extends IntegrationTestBase {
       }
     }
 
-    int run(Path warcFileInput, Path outputDir)
+    int run(final Path warcFileInput, final Path outputDir)
         throws IOException, ClassNotFoundException, InterruptedException {
 
       createSchema(getTablename(getConf()));
 
-      Job job = Job.getInstance(getConf());
+      final Job job = Job.getInstance(getConf());
       job.setJobName(Loader.class.getName());
       job.setNumReduceTasks(0);
       job.setJarByClass(getClass());
       job.setMapperClass(LoaderMapper.class);
       job.setInputFormatClass(WARCInputFormat.class);
-      FileSystem fs = FileSystem.get(warcFileInput.toUri(), getConf());
+      final FileSystem fs = FileSystem.get(warcFileInput.toUri(), getConf());
       if (fs.getFileStatus(warcFileInput).isDirectory()) {
         LOG.info("Using directory as WARC input path: " + warcFileInput);
         FileInputFormat.setInputPaths(job, warcFileInput);
-      } else {
+      } else if (warcFileInput.toUri().getScheme().equals("file")) {
         LOG.info("Getting WARC input paths from file: " + warcFileInput);
-        List<Path> paths = new LinkedList<Path>();
+        final List<Path> paths = new ArrayList<Path>();
         try (FSDataInputStream is = fs.open(warcFileInput)) {
           InputStreamReader reader;
           if (warcFileInput.getName().toLowerCase().endsWith(".gz")) {
@@ -511,6 +510,8 @@ public class IntegrationTestLoadCommonCrawl extends IntegrationTestBase {
         }
         LOG.info("Read " + paths.size() + " WARC input paths from " + warcFileInput);
         FileInputFormat.setInputPaths(job, paths.toArray(new Path[paths.size()]));
+      } else {
+        FileInputFormat.setInputPaths(job, warcFileInput);
       }
       job.setOutputFormatClass(SequenceFileOutputFormat.class);
       SequenceFileOutputFormat.setOutputPath(job, outputDir);
@@ -550,20 +551,19 @@ public class IntegrationTestLoadCommonCrawl extends IntegrationTestBase {
     public static class LoaderMapper
         extends Mapper<LongWritable, WARCWritable, HBaseKeyWritable, BytesWritable> {
 
-      Configuration conf;
-      Connection conn;
-      BufferedMutator mutator;
+      protected Configuration conf;
+      protected Connection conn;
+      protected BufferedMutator mutator;
 
       @Override
-      protected void setup(Context context) throws IOException, InterruptedException {
+      protected void setup(final Context context) throws IOException, InterruptedException {
         conf = context.getConfiguration();
         conn = ConnectionFactory.createConnection(conf);
         mutator = conn.getBufferedMutator(getTablename(conf));
-        mutator.setWriteBufferPeriodicFlush(10 * 1000); // default is 1 sec, increase to 10
       }
 
       @Override
-      protected void cleanup(Context context) throws IOException, InterruptedException {
+      protected void cleanup(final Context context) throws IOException, InterruptedException {
         try {
           mutator.close();
         } catch (Exception e) {
@@ -577,16 +577,15 @@ public class IntegrationTestLoadCommonCrawl extends IntegrationTestBase {
       }
 
       @Override
-      protected void map(LongWritable key, WARCWritable value, Context output)
+      protected void map(final LongWritable key, final WARCWritable value, final Context output)
           throws IOException, InterruptedException {
-        WARCRecord.Header warcHeader = value.getRecord().getHeader();
-        String recordID = warcHeader.getRecordID();
-        String targetURI = warcHeader.getTargetURI();
+        final WARCRecord.Header warcHeader = value.getRecord().getHeader();
+        final String recordID = warcHeader.getRecordID();
+        final String targetURI = warcHeader.getTargetURI();
         if (warcHeader.getRecordType().equals("response") && targetURI != null) {
-          String contentType = warcHeader.getField("WARC-Identified-Payload-Type");
+          final String contentType = warcHeader.getField("WARC-Identified-Payload-Type");
           if (contentType != null) {
             LOG.info("Processing uri=\"" + targetURI + "\", id=" + recordID);
-            long now = EnvironmentEdgeManager.currentTime();
 
             // Make row key
 
@@ -604,62 +603,63 @@ public class IntegrationTestLoadCommonCrawl extends IntegrationTestBase {
 
             // Get the content and calculate the CRC64
 
-            byte[] content = value.getRecord().getContent();
-            CRC64 crc = new CRC64();
+            final byte[] content = value.getRecord().getContent();
+            final CRC64 crc = new CRC64();
             crc.update(content);
-            long crc64 = crc.getValue();
+            final long crc64 = crc.getValue();
 
             // Store to HBase
 
-            Put put = new Put(rowKey);
-            put.addColumn(CONTENT_FAMILY_NAME, CONTENT_QUALIFIER, now, content);
-            put.addColumn(INFO_FAMILY_NAME, CONTENT_LENGTH_QUALIFIER, now,
+            final long ts = getCurrentTime();
+            final Put put = new Put(rowKey);
+            put.addColumn(CONTENT_FAMILY_NAME, CONTENT_QUALIFIER, ts, content);
+            put.addColumn(INFO_FAMILY_NAME, CONTENT_LENGTH_QUALIFIER, ts,
               Bytes.toBytes(content.length));
-            put.addColumn(INFO_FAMILY_NAME, CONTENT_TYPE_QUALIFIER, now,
+            put.addColumn(INFO_FAMILY_NAME, CONTENT_TYPE_QUALIFIER, ts,
               Bytes.toBytes(contentType));
-            put.addColumn(INFO_FAMILY_NAME, CRC_QUALIFIER, now, Bytes.toBytes(crc64));
-            put.addColumn(INFO_FAMILY_NAME, RECORD_ID_QUALIFIER, now, Bytes.toBytes(recordID));
-            put.addColumn(INFO_FAMILY_NAME, TARGET_URI_QUALIFIER, now, Bytes.toBytes(targetURI));
-            put.addColumn(INFO_FAMILY_NAME, DATE_QUALIFIER, now,
+            put.addColumn(INFO_FAMILY_NAME, CRC_QUALIFIER, ts, Bytes.toBytes(crc64));
+            put.addColumn(INFO_FAMILY_NAME, RECORD_ID_QUALIFIER, ts, Bytes.toBytes(recordID));
+            put.addColumn(INFO_FAMILY_NAME, TARGET_URI_QUALIFIER, ts, Bytes.toBytes(targetURI));
+            put.addColumn(INFO_FAMILY_NAME, DATE_QUALIFIER, ts,
               Bytes.toBytes(warcHeader.getDateString()));
-            String ipAddr = warcHeader.getField("WARC-IP-Address");
+            final String ipAddr = warcHeader.getField("WARC-IP-Address");
             if (ipAddr != null) {
-              put.addColumn(INFO_FAMILY_NAME, IP_ADDRESS_QUALIFIER, now, Bytes.toBytes(ipAddr));
+              put.addColumn(INFO_FAMILY_NAME, IP_ADDRESS_QUALIFIER, ts, Bytes.toBytes(ipAddr));
             }
             mutator.mutate(put);
 
             // Write records out for later verification, one per HBase field except for the
             // content record, which will be verified by CRC64.
 
-            output.write(new HBaseKeyWritable(rowKey, INFO_FAMILY_NAME, CRC_QUALIFIER, now),
+            output.write(new HBaseKeyWritable(rowKey, INFO_FAMILY_NAME, CRC_QUALIFIER, ts),
               new BytesWritable(Bytes.toBytes(crc64)));
             output.write(new HBaseKeyWritable(rowKey, INFO_FAMILY_NAME, CONTENT_LENGTH_QUALIFIER,
-              now), new BytesWritable(Bytes.toBytes(content.length)));
+              ts), new BytesWritable(Bytes.toBytes(content.length)));
             output.write(new HBaseKeyWritable(rowKey, INFO_FAMILY_NAME, CONTENT_TYPE_QUALIFIER,
-              now), new BytesWritable(Bytes.toBytes(contentType)));
+              ts), new BytesWritable(Bytes.toBytes(contentType)));
             output.write(new HBaseKeyWritable(rowKey, INFO_FAMILY_NAME, RECORD_ID_QUALIFIER,
-              now), new BytesWritable(Bytes.toBytes(recordID)));
+              ts), new BytesWritable(Bytes.toBytes(recordID)));
             output.write(new HBaseKeyWritable(rowKey, INFO_FAMILY_NAME, TARGET_URI_QUALIFIER,
-              now), new BytesWritable(Bytes.toBytes(targetURI)));
-            output.write(new HBaseKeyWritable(rowKey, INFO_FAMILY_NAME, DATE_QUALIFIER, now),
+              ts), new BytesWritable(Bytes.toBytes(targetURI)));
+            output.write(new HBaseKeyWritable(rowKey, INFO_FAMILY_NAME, DATE_QUALIFIER, ts),
               new BytesWritable(Bytes.toBytes(warcHeader.getDateString())));
             if (ipAddr != null) {
               output.write(new HBaseKeyWritable(rowKey, INFO_FAMILY_NAME, IP_ADDRESS_QUALIFIER,
-                now), new BytesWritable(Bytes.toBytes(ipAddr)));
+                ts), new BytesWritable(Bytes.toBytes(ipAddr)));
             }
           }
         }
       }
 
-      private byte[] rowKeyFromTargetURI(String targetUri)
+      private byte[] rowKeyFromTargetURI(final String targetUri)
           throws URISyntaxException, IllegalArgumentException {
-        URI uri = new URI(targetUri);
+        final URI uri = new URI(targetUri);
         // Ignore the scheme
         // Reverse the components of the hostname
         String reversedHost;
         if (uri.getHost() != null) {
-          StringBuffer sb = new StringBuffer();
-          String[] hostComponents = uri.getHost().split("\\.");
+          final StringBuilder sb = new StringBuilder();
+          final String[] hostComponents = uri.getHost().split("\\.");
           for (int i = hostComponents.length - 1; i >= 0; i--) {
             sb.append(hostComponents[i]);
             if (i != 0) {
@@ -670,7 +670,7 @@ public class IntegrationTestLoadCommonCrawl extends IntegrationTestBase {
         } else {
           throw new IllegalArgumentException("URI is missing host component");
         }
-        StringBuffer sb = new StringBuffer();
+        final StringBuilder sb = new StringBuilder();
         sb.append(reversedHost);
         if (uri.getPort() >= 0) {
           sb.append(':');
@@ -700,7 +700,7 @@ public class IntegrationTestLoadCommonCrawl extends IntegrationTestBase {
 
   public static class OneFilePerMapperSFIF<K, V> extends SequenceFileInputFormat<K, V> {
     @Override
-    protected boolean isSplitable(JobContext context, Path filename) {
+    protected boolean isSplitable(final JobContext context, final Path filename) {
       return false;
     }
   }
@@ -710,7 +710,8 @@ public class IntegrationTestLoadCommonCrawl extends IntegrationTestBase {
     public static final Logger LOG = LoggerFactory.getLogger(Verify.class);
     public static final String USAGE = "Verify <inputDir>";
 
-    int run(Path inputDir) throws IOException, ClassNotFoundException, InterruptedException {
+    int run(final Path inputDir)
+        throws IOException, ClassNotFoundException, InterruptedException {
       Job job = Job.getInstance(getConf());
       job.setJobName(Verify.class.getName());
       job.setJarByClass(getClass());
@@ -725,10 +726,18 @@ public class IntegrationTestLoadCommonCrawl extends IntegrationTestBase {
       if (!success) {
         LOG.error("Failure during job " + job.getJobID());
       }
-      Counters counters = job.getCounters();
+      final Counters counters = job.getCounters();
       for (Counts c: Counts.values()) {
         LOG.info(c + ": " + counters.findCounter(c).getValue());
       }
+      if (counters.findCounter(Counts.UNREFERENCED).getValue() > 0) {
+        LOG.error("Nonzero UNREFERENCED count from job " + job.getJobID());
+        success = false;
+      }
+      if (counters.findCounter(Counts.CORRUPT).getValue() > 0) {
+        LOG.error("Nonzero CORRUPT count from job " + job.getJobID());
+        success = false;
+      }
       return success ? 0 : 1;
     }
 
@@ -749,44 +758,51 @@ public class IntegrationTestLoadCommonCrawl extends IntegrationTestBase {
     public static class VerifyMapper
         extends Mapper<HBaseKeyWritable, BytesWritable, NullWritable, NullWritable> {
 
-      Connection conn;
-      Table table;
+      private Connection conn;
+      private Table table;
 
       @Override
-      protected void setup(Context context) throws IOException, InterruptedException {
+      protected void setup(final Context context) throws IOException, InterruptedException {
         conn = ConnectionFactory.createConnection(context.getConfiguration());
         table = conn.getTable(getTablename(conn.getConfiguration()));
       }
 
       @Override
-      protected void cleanup(Context context) throws IOException ,InterruptedException {
-        table.close();
-        conn.close();
+      protected void cleanup(final Context context) throws IOException, InterruptedException {
+        try {
+          table.close();
+        } catch (Exception e) {
+          LOG.warn("Exception closing Table", e);
+        }
+        try {
+          conn.close();
+        } catch (Exception e) {
+          LOG.warn("Exception closing Connection", e);
+        }
       }
 
       @Override
-      protected void map(HBaseKeyWritable key, BytesWritable value, Context output)
-          throws IOException, InterruptedException {
-
-        byte[] row = Bytes.copy(key.getRowArray(), key.getRowOffset(), key.getRowLength());
-        byte[] family = Bytes.copy(key.getFamilyArray(), key.getFamilyOffset(),
+      protected void map(final HBaseKeyWritable key, final BytesWritable value,
+          final Context output) throws IOException, InterruptedException {
+        final byte[] row = Bytes.copy(key.getRowArray(), key.getRowOffset(), key.getRowLength());
+        final byte[] family = Bytes.copy(key.getFamilyArray(), key.getFamilyOffset(),
           key.getFamilyLength());
-        byte[] qualifier = Bytes.copy(key.getQualifierArray(), key.getQualifierOffset(),
+        final byte[] qualifier = Bytes.copy(key.getQualifierArray(), key.getQualifierOffset(),
           key.getQualifierLength());
-        long ts = key.getTimestamp();
-        int retries = VERIFICATION_READ_RETRIES;
+        final long ts = key.getTimestamp();
 
+        int retries = VERIFICATION_READ_RETRIES;
         while (true) {
 
           if (Bytes.equals(INFO_FAMILY_NAME, family) &&
               Bytes.equals(CRC_QUALIFIER, qualifier)) {
 
-            long expectedCRC64 = Bytes.toLong(value.getBytes(), 0, value.getLength());
-            Result result = table.get(new Get(row)
+            final long expectedCRC64 = Bytes.toLong(value.getBytes(), 0, value.getLength());
+            final Result result = table.get(new Get(row)
               .addColumn(CONTENT_FAMILY_NAME, CONTENT_QUALIFIER)
               .addColumn(INFO_FAMILY_NAME, CRC_QUALIFIER)
               .setTimestamp(ts));
-            byte[] content = result.getValue(CONTENT_FAMILY_NAME, CONTENT_QUALIFIER);
+            final byte[] content = result.getValue(CONTENT_FAMILY_NAME, CONTENT_QUALIFIER);
             if (content == null) {
               if (retries-- > 0) {
                 continue;
@@ -795,18 +811,15 @@ public class IntegrationTestLoadCommonCrawl extends IntegrationTestBase {
               output.getCounter(Counts.UNREFERENCED).increment(1);
               return;
             } else {
-              CRC64 crc = new CRC64();
+              final CRC64 crc = new CRC64();
               crc.update(content);
               if (crc.getValue() != expectedCRC64) {
-                if (retries-- > 0) {
-                  continue;
-                }
                 LOG.error("Row " + Bytes.toStringBinary(row) + ": corrupt content");
                 output.getCounter(Counts.CORRUPT).increment(1);
                 return;
               }
             }
-            byte[] crc = result.getValue(INFO_FAMILY_NAME, CRC_QUALIFIER);
+            final byte[] crc = result.getValue(INFO_FAMILY_NAME, CRC_QUALIFIER);
             if (crc == null) {
               if (retries-- > 0) {
                 continue;
@@ -826,10 +839,10 @@ public class IntegrationTestLoadCommonCrawl extends IntegrationTestBase {
 
           } else {
 
-            Result result = table.get(new Get(row)
+            final Result result = table.get(new Get(row)
               .addColumn(family, qualifier)
               .setTimestamp(ts));
-            byte[] bytes = result.getValue(family, qualifier);
+            final byte[] bytes = result.getValue(family, qualifier);
             if (bytes == null) {
               if (retries-- > 0) {
                 continue;
@@ -862,4 +875,15 @@ public class IntegrationTestLoadCommonCrawl extends IntegrationTestBase {
     }
   }
 
+  private static final AtomicLong counter = new AtomicLong();
+
+  private static long getCurrentTime() {
+    // Typical hybrid logical clock scheme.
+    // Take the current time, shift by 16 bits and zero those bits, and replace those bits
+    // with the low 16 bits of the atomic counter. Mask off the high bit too because timestamps
+    // cannot be negative.
+    return ((EnvironmentEdgeManager.currentTime() << 16) & 0x7fff_ffff_ffff_0000L) |
+        (counter.getAndIncrement() & 0xffffL);
+  }
+
 }

[hbase] 01/03: HBASE-26316 Per-table or per-CF compression codec setting overrides (#3730)

Posted by ap...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

apurtell pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit b6bb18022f2ba80ec13a96d28178709bc45c6803
Author: Andrew Purtell <ap...@apache.org>
AuthorDate: Tue Oct 19 12:04:25 2021 -0700

    HBASE-26316 Per-table or per-CF compression codec setting overrides (#3730)
    
    We get and retain Compressor instances in HFileBlockDefaultEncodingContext,
    and could in theory call Compressor#reinit when setting up the context,
    to update compression parameters like level and buffer size, but we do
    not plumb through the CompoundConfiguration from the Store into the
    encoding context. As a consequence we can only update codec parameters
    globally in system site conf files.
    
    Fine grained configurability is important for algorithms like ZStandard
    (ZSTD), which offers more than 20 compression levels, where at level 1
    it is almost as fast as LZ4, and where at higher levels it utilizes
    computationally expensive techniques to rival LZMA at compression ratio
    but trades off significantly for reduced compresson throughput. The ZSTD
    level that should be set for a given column family or table will vary by
    use case.
    
    Signed-off-by: Viraj Jasani <vj...@apache.org>
    
    Conflicts:
    	hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdDecompressor.java
    	hbase-server/src/test/java/org/apache/hadoop/hbase/io/compress/HFileTestBase.java
    	hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java
    	hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestSeekToBlockWithEncoders.java
---
 .../apache/hadoop/hbase/io/compress/CanReinit.java | 31 +++++++++++++++
 .../hadoop/hbase/io/compress/Compression.java      | 34 ----------------
 .../io/encoding/AbstractDataBlockEncoder.java      | 11 ++++--
 .../hadoop/hbase/io/encoding/DataBlockEncoder.java | 10 ++++-
 .../hadoop/hbase/io/encoding/EncodedDataBlock.java | 16 +++++---
 .../encoding/HFileBlockDefaultDecodingContext.java | 27 +++++++++++--
 .../encoding/HFileBlockDefaultEncodingContext.java | 16 ++++++--
 .../compress/aircompressor/HadoopCompressor.java   |  3 +-
 .../aircompressor/TestHFileCompressionLz4.java     | 10 ++++-
 .../aircompressor/TestHFileCompressionLzo.java     | 10 ++++-
 .../aircompressor/TestHFileCompressionSnappy.java  | 10 ++++-
 .../aircompressor/TestHFileCompressionZstd.java    | 10 ++++-
 .../hbase/io/compress/lz4/Lz4Compressor.java       |  3 +-
 .../io/compress/lz4/TestHFileCompressionLz4.java   | 10 ++++-
 .../hbase/io/compress/xerial/SnappyCompressor.java |  3 +-
 .../xerial/TestHFileCompressionSnappy.java         | 10 ++++-
 .../io/compress/xz/TestHFileCompressionLzma.java   | 29 +++++++++++++-
 .../hbase/io/compress/zstd/ZstdCompressor.java     | 17 ++++----
 .../hbase/io/compress/zstd/ZstdDecompressor.java   | 41 ++++++++++++++------
 .../io/compress/zstd/TestHFileCompressionZstd.java | 29 +++++++++++++-
 .../hadoop/hbase/mapreduce/HFileInputFormat.java   |  2 +-
 .../mapreduce/TestCellBasedHFileOutputFormat2.java |  2 +-
 .../hbase/mapreduce/TestHFileOutputFormat2.java    |  4 +-
 .../TestImportTSVWithVisibilityLabels.java         |  2 +-
 .../hadoop/hbase/mapreduce/TestImportTsv.java      |  2 +-
 .../org/apache/hadoop/hbase/io/hfile/HFile.java    |  5 ++-
 .../apache/hadoop/hbase/io/hfile/HFileBlock.java   | 31 ++++++++-------
 .../hbase/io/hfile/HFileDataBlockEncoder.java      | 11 ++++--
 .../hbase/io/hfile/HFileDataBlockEncoderImpl.java  | 14 ++++---
 .../hadoop/hbase/io/hfile/HFilePrettyPrinter.java  |  2 +-
 .../hadoop/hbase/io/hfile/HFileReaderImpl.java     | 23 ++++++-----
 .../hadoop/hbase/io/hfile/HFileWriterImpl.java     |  2 +-
 .../hbase/io/hfile/NoOpDataBlockEncoder.java       | 10 +++--
 .../store/region/HFileProcedurePrettyPrinter.java  |  2 +-
 .../apache/hadoop/hbase/regionserver/HStore.java   |  2 +-
 .../hadoop/hbase/regionserver/StoreFileReader.java |  9 +++--
 .../apache/hadoop/hbase/util/CompressionTest.java  |  2 +-
 .../hadoop/hbase/HFilePerformanceEvaluation.java   |  8 ++--
 .../hadoop/hbase/io/compress/HFileTestBase.java    | 26 +++++--------
 .../hbase/io/encoding/TestDataBlockEncoders.java   | 45 ++++++++++++----------
 .../io/encoding/TestSeekToBlockWithEncoders.java   | 16 +++++---
 .../hadoop/hbase/io/hfile/TestCacheOnWrite.java    |  2 +-
 .../apache/hadoop/hbase/io/hfile/TestChecksum.java | 29 ++++++++------
 .../apache/hadoop/hbase/io/hfile/TestHFile.java    |  4 +-
 .../hadoop/hbase/io/hfile/TestHFileBlock.java      | 42 +++++++++++---------
 .../hadoop/hbase/io/hfile/TestHFileBlockIndex.java |  8 ++--
 .../hbase/io/hfile/TestHFileDataBlockEncoder.java  | 26 +++++++------
 .../hadoop/hbase/io/hfile/TestHFileEncryption.java | 14 +++----
 .../TestHFileInlineToRootChunkConversion.java      |  2 +-
 .../hadoop/hbase/io/hfile/TestHFileReaderImpl.java |  6 +--
 .../hfile/TestHFileScannerImplReferenceCount.java  |  6 +--
 .../hadoop/hbase/io/hfile/TestHFileSeek.java       |  2 +-
 .../hadoop/hbase/io/hfile/TestHFileWriterV3.java   |  2 +-
 .../hfile/TestHFileWriterV3WithDataEncoders.java   |  4 +-
 .../apache/hadoop/hbase/io/hfile/TestReseekTo.java |  2 +-
 .../io/hfile/TestSeekBeforeWithInlineBlocks.java   |  2 +-
 .../apache/hadoop/hbase/io/hfile/TestSeekTo.java   |  6 +--
 .../hbase/regionserver/DataBlockEncodingTool.java  |  8 ++--
 .../regionserver/TestCacheOnWriteInSchema.java     |  2 +-
 .../hbase/tool/TestLoadIncrementalHFiles.java      |  2 +-
 60 files changed, 451 insertions(+), 268 deletions(-)

diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/compress/CanReinit.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/compress/CanReinit.java
new file mode 100644
index 0000000..186ad10
--- /dev/null
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/compress/CanReinit.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.hbase.io.compress;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * This is a marker interface that indicates if a compressor or decompressor
+ * type can support reinitialization via reinit(Configuration conf).
+ */
+@InterfaceAudience.Private
+public interface CanReinit {
+
+  void reinit(Configuration conf);
+
+}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/compress/Compression.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/compress/Compression.java
index d411fd7..8bff294 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/compress/Compression.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/compress/Compression.java
@@ -22,12 +22,9 @@ import java.io.FilterOutputStream;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
-
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.io.util.BlockIOUtils;
-import org.apache.hadoop.hbase.nio.ByteBuff;
 import org.apache.hadoop.io.compress.CodecPool;
 import org.apache.hadoop.io.compress.CompressionCodec;
 import org.apache.hadoop.io.compress.CompressionInputStream;
@@ -522,37 +519,6 @@ public final class Compression {
   }
 
   /**
-   * Decompresses data from the given stream using the configured compression algorithm. It will
-   * throw an exception if the dest buffer does not have enough space to hold the decompressed data.
-   * @param dest the output buffer
-   * @param bufferedBoundedStream a stream to read compressed data from, bounded to the exact amount
-   *          of compressed data
-   * @param uncompressedSize uncompressed data size, header not included
-   * @param compressAlgo compression algorithm used
-   * @throws IOException if any IO error happen
-   */
-  public static void decompress(ByteBuff dest, InputStream bufferedBoundedStream,
-      int uncompressedSize, Compression.Algorithm compressAlgo) throws IOException {
-    if (dest.remaining() < uncompressedSize) {
-      throw new IllegalArgumentException("Output buffer does not have enough space to hold "
-          + uncompressedSize + " decompressed bytes, available: " + dest.remaining());
-    }
-
-    Decompressor decompressor = null;
-    try {
-      decompressor = compressAlgo.getDecompressor();
-      try (InputStream is =
-          compressAlgo.createDecompressionStream(bufferedBoundedStream, decompressor, 0)) {
-        BlockIOUtils.readFullyWithHeapBuffer(is, dest, uncompressedSize);
-      }
-    } finally {
-      if (decompressor != null) {
-        compressAlgo.returnDecompressor(decompressor);
-      }
-    }
-  }
-
-  /**
    * Load a codec implementation for an algorithm using the supplied configuration.
    * @param conf the configuration to use
    * @param algo the algorithm to implement
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/AbstractDataBlockEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/AbstractDataBlockEncoder.java
index e96b800..9fa8677 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/AbstractDataBlockEncoder.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/AbstractDataBlockEncoder.java
@@ -18,6 +18,8 @@ package org.apache.hadoop.hbase.io.encoding;
 
 import java.io.IOException;
 import java.nio.ByteBuffer;
+
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.ByteBufferKeyOnlyKeyValue;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
@@ -29,14 +31,15 @@ import org.apache.yetus.audience.InterfaceAudience;
 public abstract class AbstractDataBlockEncoder implements DataBlockEncoder {
 
   @Override
-  public HFileBlockEncodingContext newDataBlockEncodingContext(
+  public HFileBlockEncodingContext newDataBlockEncodingContext(Configuration conf,
       DataBlockEncoding encoding, byte[] header, HFileContext meta) {
-    return new HFileBlockDefaultEncodingContext(encoding, header, meta);
+    return new HFileBlockDefaultEncodingContext(conf, encoding, header, meta);
   }
 
   @Override
-  public HFileBlockDecodingContext newDataBlockDecodingContext(HFileContext meta) {
-    return new HFileBlockDefaultDecodingContext(meta);
+  public HFileBlockDecodingContext newDataBlockDecodingContext(Configuration conf,
+      HFileContext meta) {
+    return new HFileBlockDefaultDecodingContext(conf, meta);
   }
 
   protected void postEncoding(HFileBlockEncodingContext encodingCtx)
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java
index a6aafead..98225bd 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java
@@ -20,6 +20,8 @@ import java.io.DataInputStream;
 import java.io.DataOutputStream;
 import java.io.IOException;
 import java.nio.ByteBuffer;
+
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellComparator;
 import org.apache.hadoop.hbase.io.hfile.HFileContext;
@@ -91,6 +93,8 @@ public interface DataBlockEncoder {
   /**
    * Creates a encoder specific encoding context
    *
+   * @param conf
+   *          store configuration
    * @param encoding
    *          encoding strategy used
    * @param headerBytes
@@ -100,18 +104,20 @@ public interface DataBlockEncoder {
    *          HFile meta data
    * @return a newly created encoding context
    */
-  HFileBlockEncodingContext newDataBlockEncodingContext(
+  HFileBlockEncodingContext newDataBlockEncodingContext(Configuration conf,
       DataBlockEncoding encoding, byte[] headerBytes, HFileContext meta);
 
   /**
    * Creates an encoder specific decoding context, which will prepare the data
    * before actual decoding
    *
+   * @param conf
+   *          store configuration
    * @param meta
    *          HFile meta data        
    * @return a newly created decoding context
    */
-  HFileBlockDecodingContext newDataBlockDecodingContext(HFileContext meta);
+  HFileBlockDecodingContext newDataBlockDecodingContext(Configuration conf, HFileContext meta);
 
   /**
    * An interface which enable to seek while underlying data is encoded.
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/EncodedDataBlock.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/EncodedDataBlock.java
index 2f8a19b..a2a7fc2 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/EncodedDataBlock.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/EncodedDataBlock.java
@@ -27,6 +27,7 @@ import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
 import org.apache.commons.lang3.NotImplementedException;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
@@ -57,6 +58,7 @@ public class EncodedDataBlock {
   private HFileContext meta;
 
   private final DataBlockEncoding encoding;
+  private final Configuration conf;
 
   // The is for one situation that there are some cells includes tags and others are not.
   // isTagsLenZero stores if cell tags length is zero before doing encoding since we need
@@ -68,21 +70,23 @@ public class EncodedDataBlock {
 
   /**
    * Create a buffer which will be encoded using dataBlockEncoder.
+   * @param conf store configuration
    * @param dataBlockEncoder Algorithm used for compression.
    * @param encoding encoding type used
-   * @param rawKVs
-   * @param meta
+   * @param rawKVs raw KVs
+   * @param meta hfile context
    */
-  public EncodedDataBlock(DataBlockEncoder dataBlockEncoder, DataBlockEncoding encoding,
-      byte[] rawKVs, HFileContext meta) {
+  public EncodedDataBlock(Configuration conf, DataBlockEncoder dataBlockEncoder,
+      DataBlockEncoding encoding, byte[] rawKVs, HFileContext meta) {
     Preconditions.checkNotNull(encoding,
         "Cannot create encoded data block with null encoder");
     this.dataBlockEncoder = dataBlockEncoder;
     this.encoding = encoding;
-    encodingCtx = dataBlockEncoder.newDataBlockEncodingContext(encoding,
+    encodingCtx = dataBlockEncoder.newDataBlockEncodingContext(conf, encoding,
         HConstants.HFILEBLOCK_DUMMY_HEADER, meta);
     this.rawKVs = rawKVs;
     this.meta = meta;
+    this.conf = conf;
   }
 
   /**
@@ -115,7 +119,7 @@ public class EncodedDataBlock {
         if (decompressedData == null) {
           try {
             decompressedData = dataBlockEncoder.decodeKeyValues(dis, dataBlockEncoder
-                .newDataBlockDecodingContext(meta));
+                .newDataBlockDecodingContext(conf, meta));
           } catch (IOException e) {
             throw new RuntimeException("Problem with data block encoder, " +
                 "most likely it requested more bytes than are available.", e);
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockDefaultDecodingContext.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockDefaultDecodingContext.java
index e321a25..5c7f6ed 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockDefaultDecodingContext.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockDefaultDecodingContext.java
@@ -20,8 +20,10 @@ import java.io.DataInputStream;
 import java.io.IOException;
 import java.io.InputStream;
 import org.apache.commons.io.IOUtils;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.io.ByteBuffInputStream;
 import org.apache.hadoop.hbase.io.TagCompressionContext;
+import org.apache.hadoop.hbase.io.compress.CanReinit;
 import org.apache.hadoop.hbase.io.compress.Compression;
 import org.apache.hadoop.hbase.io.crypto.Cipher;
 import org.apache.hadoop.hbase.io.crypto.Decryptor;
@@ -30,6 +32,7 @@ import org.apache.hadoop.hbase.io.hfile.HFileContext;
 import org.apache.hadoop.hbase.io.util.BlockIOUtils;
 import org.apache.hadoop.hbase.nio.ByteBuff;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.io.compress.Decompressor;
 import org.apache.yetus.audience.InterfaceAudience;
 
 /**
@@ -41,10 +44,12 @@ import org.apache.yetus.audience.InterfaceAudience;
  */
 @InterfaceAudience.Private
 public class HFileBlockDefaultDecodingContext implements HFileBlockDecodingContext {
+  private final Configuration conf;
   private final HFileContext fileContext;
   private TagCompressionContext tagCompressionContext;
 
-  public HFileBlockDefaultDecodingContext(HFileContext fileContext) {
+  public HFileBlockDefaultDecodingContext(Configuration conf, HFileContext fileContext) {
+    this.conf = conf;
     this.fileContext = fileContext;
   }
 
@@ -87,8 +92,24 @@ public class HFileBlockDefaultDecodingContext implements HFileBlockDecodingConte
 
       Compression.Algorithm compression = fileContext.getCompression();
       if (compression != Compression.Algorithm.NONE) {
-        Compression.decompress(blockBufferWithoutHeader, dataInputStream,
-          uncompressedSizeWithoutHeader, compression);
+        Decompressor decompressor = null;
+        try {
+          decompressor = compression.getDecompressor();
+          // Some algorithms don't return decompressors and accept null as a valid parameter for
+          // same when creating decompression streams. We can ignore these cases wrt reinit.
+          if (decompressor instanceof CanReinit) {
+            ((CanReinit)decompressor).reinit(conf);
+          }
+          try (InputStream is =
+              compression.createDecompressionStream(dataInputStream, decompressor, 0)) {
+            BlockIOUtils.readFullyWithHeapBuffer(is, blockBufferWithoutHeader,
+              uncompressedSizeWithoutHeader);
+          }
+        } finally {
+          if (decompressor != null) {
+            compression.returnDecompressor(decompressor);
+          }
+        }
       } else {
         BlockIOUtils.readFullyWithHeapBuffer(dataInputStream, blockBufferWithoutHeader,
           onDiskSizeWithoutHeader);
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockDefaultEncodingContext.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockDefaultEncodingContext.java
index 169f979..8d9e682 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockDefaultEncodingContext.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockDefaultEncodingContext.java
@@ -22,6 +22,8 @@ import java.io.DataOutputStream;
 import java.io.IOException;
 import java.io.InputStream;
 import java.security.SecureRandom;
+
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.io.ByteArrayOutputStream;
 import org.apache.hadoop.hbase.io.TagCompressionContext;
 import org.apache.hadoop.hbase.io.compress.Compression;
@@ -72,18 +74,26 @@ public class HFileBlockDefaultEncodingContext implements HFileBlockEncodingConte
   private EncodingState encoderState;
 
   /**
+   * @param conf configuraton
    * @param encoding encoding used
    * @param headerBytes dummy header bytes
    * @param fileContext HFile meta data
    */
-  public HFileBlockDefaultEncodingContext(DataBlockEncoding encoding, byte[] headerBytes,
-      HFileContext fileContext) {
+  public HFileBlockDefaultEncodingContext(Configuration conf, DataBlockEncoding encoding,
+      byte[] headerBytes, HFileContext fileContext) {
     this.encodingAlgo = encoding;
     this.fileContext = fileContext;
     Compression.Algorithm compressionAlgorithm =
         fileContext.getCompression() == null ? NONE : fileContext.getCompression();
     if (compressionAlgorithm != NONE) {
-      compressor = compressionAlgorithm.getCompressor();
+      if (compressor == null) {
+        compressor = compressionAlgorithm.getCompressor();
+        // Some algorithms don't return compressors and accept null as a valid parameter for
+        // same when creating compression streams. We can ignore these cases wrt reinit.
+        if (compressor != null) {
+          compressor.reinit(conf);
+        }
+      }
       compressedByteStream = new ByteArrayOutputStream();
       try {
         compressionStream =
diff --git a/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/HadoopCompressor.java b/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/HadoopCompressor.java
index f5040d1..c448f58 100644
--- a/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/HadoopCompressor.java
+++ b/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/HadoopCompressor.java
@@ -20,6 +20,7 @@ import java.io.IOException;
 import java.nio.ByteBuffer;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.io.compress.CanReinit;
 import org.apache.hadoop.hbase.io.compress.CompressionUtil;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
@@ -32,7 +33,7 @@ import io.airlift.compress.Compressor;
  */
 @InterfaceAudience.Private
 public abstract class HadoopCompressor<T extends Compressor>
-    implements org.apache.hadoop.io.compress.Compressor {
+    implements CanReinit, org.apache.hadoop.io.compress.Compressor {
 
   protected static final Logger LOG = LoggerFactory.getLogger(HadoopCompressor.class);
   protected T compressor;
diff --git a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionLz4.java b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionLz4.java
index 547fe1d..5ddee51 100644
--- a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionLz4.java
+++ b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionLz4.java
@@ -18,7 +18,9 @@
 package org.apache.hadoop.hbase.io.compress.aircompressor;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.io.compress.Compression;
 import org.apache.hadoop.hbase.io.compress.HFileTestBase;
 import org.apache.hadoop.hbase.testclassification.IOTests;
@@ -35,9 +37,11 @@ public class TestHFileCompressionLz4 extends HFileTestBase {
   public static final HBaseClassTestRule CLASS_RULE =
       HBaseClassTestRule.forClass(TestHFileCompressionLz4.class);
 
+  private static Configuration conf;
+
   @BeforeClass
   public static void setUpBeforeClass() throws Exception {
-    Configuration conf = TEST_UTIL.getConfiguration();
+    conf = TEST_UTIL.getConfiguration();
     conf.set(Compression.LZ4_CODEC_CLASS_KEY, Lz4Codec.class.getCanonicalName());
     Compression.Algorithm.LZ4.reload(conf);
     HFileTestBase.setUpBeforeClass();
@@ -45,7 +49,9 @@ public class TestHFileCompressionLz4 extends HFileTestBase {
 
   @Test
   public void test() throws Exception {
-    doTest(Compression.Algorithm.LZ4);
+    Path path = new Path(TEST_UTIL.getDataTestDir(),
+      HBaseTestingUtility.getRandomUUID().toString() + ".hfile");
+    doTest(conf, path, Compression.Algorithm.LZ4);
   }
 
 }
diff --git a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionLzo.java b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionLzo.java
index db0a79d..143db46 100644
--- a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionLzo.java
+++ b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionLzo.java
@@ -18,7 +18,9 @@
 package org.apache.hadoop.hbase.io.compress.aircompressor;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.io.compress.Compression;
 import org.apache.hadoop.hbase.io.compress.HFileTestBase;
 import org.apache.hadoop.hbase.testclassification.IOTests;
@@ -35,9 +37,11 @@ public class TestHFileCompressionLzo extends HFileTestBase {
   public static final HBaseClassTestRule CLASS_RULE =
       HBaseClassTestRule.forClass(TestHFileCompressionLzo.class);
 
+  private static Configuration conf;
+
   @BeforeClass
   public static void setUpBeforeClass() throws Exception {
-    Configuration conf = TEST_UTIL.getConfiguration();
+    conf = TEST_UTIL.getConfiguration();
     conf.set(Compression.LZO_CODEC_CLASS_KEY, LzoCodec.class.getCanonicalName());
     Compression.Algorithm.LZO.reload(conf);
     HFileTestBase.setUpBeforeClass();
@@ -45,7 +49,9 @@ public class TestHFileCompressionLzo extends HFileTestBase {
 
   @Test
   public void test() throws Exception {
-    doTest(Compression.Algorithm.LZO);
+    Path path = new Path(TEST_UTIL.getDataTestDir(),
+      HBaseTestingUtility.getRandomUUID().toString() + ".hfile");
+    doTest(conf, path, Compression.Algorithm.LZO);
   }
 
 }
diff --git a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionSnappy.java b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionSnappy.java
index 85b17b0..e9b08cb 100644
--- a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionSnappy.java
+++ b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionSnappy.java
@@ -18,7 +18,9 @@
 package org.apache.hadoop.hbase.io.compress.aircompressor;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.io.compress.Compression;
 import org.apache.hadoop.hbase.io.compress.HFileTestBase;
 import org.apache.hadoop.hbase.testclassification.IOTests;
@@ -35,9 +37,11 @@ public class TestHFileCompressionSnappy extends HFileTestBase {
   public static final HBaseClassTestRule CLASS_RULE =
       HBaseClassTestRule.forClass(TestHFileCompressionSnappy.class);
 
+  private static Configuration conf;
+
   @BeforeClass
   public static void setUpBeforeClass() throws Exception {
-    Configuration conf = TEST_UTIL.getConfiguration();
+    conf = TEST_UTIL.getConfiguration();
     conf.set(Compression.SNAPPY_CODEC_CLASS_KEY, SnappyCodec.class.getCanonicalName());
     Compression.Algorithm.SNAPPY.reload(conf);
     HFileTestBase.setUpBeforeClass();
@@ -45,7 +49,9 @@ public class TestHFileCompressionSnappy extends HFileTestBase {
 
   @Test
   public void test() throws Exception {
-    doTest(Compression.Algorithm.SNAPPY);
+    Path path = new Path(TEST_UTIL.getDataTestDir(),
+      HBaseTestingUtility.getRandomUUID().toString() + ".hfile");
+    doTest(conf, path, Compression.Algorithm.SNAPPY);
   }
 
 }
diff --git a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionZstd.java b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionZstd.java
index 692cc09..c3a52d8 100644
--- a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionZstd.java
+++ b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionZstd.java
@@ -18,7 +18,9 @@
 package org.apache.hadoop.hbase.io.compress.aircompressor;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.io.compress.Compression;
 import org.apache.hadoop.hbase.io.compress.HFileTestBase;
 import org.apache.hadoop.hbase.testclassification.IOTests;
@@ -35,9 +37,11 @@ public class TestHFileCompressionZstd extends HFileTestBase {
   public static final HBaseClassTestRule CLASS_RULE =
       HBaseClassTestRule.forClass(TestHFileCompressionZstd.class);
 
+  private static Configuration conf;
+
   @BeforeClass
   public static void setUpBeforeClass() throws Exception {
-    Configuration conf = TEST_UTIL.getConfiguration();
+    conf = TEST_UTIL.getConfiguration();
     conf.set(Compression.ZSTD_CODEC_CLASS_KEY, ZstdCodec.class.getCanonicalName());
     Compression.Algorithm.ZSTD.reload(conf);
     HFileTestBase.setUpBeforeClass();
@@ -45,7 +49,9 @@ public class TestHFileCompressionZstd extends HFileTestBase {
 
   @Test
   public void test() throws Exception {
-    doTest(Compression.Algorithm.ZSTD);
+    Path path = new Path(TEST_UTIL.getDataTestDir(),
+      HBaseTestingUtility.getRandomUUID().toString() + ".hfile");
+    doTest(conf, path, Compression.Algorithm.ZSTD);
   }
 
 }
diff --git a/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/Lz4Compressor.java b/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/Lz4Compressor.java
index 66928ad..71b5164 100644
--- a/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/Lz4Compressor.java
+++ b/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/Lz4Compressor.java
@@ -20,6 +20,7 @@ import java.io.IOException;
 import java.nio.ByteBuffer;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.io.compress.CanReinit;
 import org.apache.hadoop.hbase.io.compress.CompressionUtil;
 import org.apache.hadoop.io.compress.Compressor;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -33,7 +34,7 @@ import net.jpountz.lz4.LZ4Factory;
  * Hadoop compressor glue for lz4-java.
  */
 @InterfaceAudience.Private
-public class Lz4Compressor implements Compressor {
+public class Lz4Compressor implements CanReinit, Compressor {
 
   protected static final Logger LOG = LoggerFactory.getLogger(Lz4Compressor.class);
   protected LZ4Compressor compressor;
diff --git a/hbase-compression/hbase-compression-lz4/src/test/java/org/apache/hadoop/hbase/io/compress/lz4/TestHFileCompressionLz4.java b/hbase-compression/hbase-compression-lz4/src/test/java/org/apache/hadoop/hbase/io/compress/lz4/TestHFileCompressionLz4.java
index 78c0f65..8f61829 100644
--- a/hbase-compression/hbase-compression-lz4/src/test/java/org/apache/hadoop/hbase/io/compress/lz4/TestHFileCompressionLz4.java
+++ b/hbase-compression/hbase-compression-lz4/src/test/java/org/apache/hadoop/hbase/io/compress/lz4/TestHFileCompressionLz4.java
@@ -18,7 +18,9 @@
 package org.apache.hadoop.hbase.io.compress.lz4;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.io.compress.Compression;
 import org.apache.hadoop.hbase.io.compress.HFileTestBase;
 import org.apache.hadoop.hbase.testclassification.IOTests;
@@ -35,9 +37,11 @@ public class TestHFileCompressionLz4 extends HFileTestBase {
   public static final HBaseClassTestRule CLASS_RULE =
       HBaseClassTestRule.forClass(TestHFileCompressionLz4.class);
 
+  private static Configuration conf;
+
   @BeforeClass
   public static void setUpBeforeClass() throws Exception {
-    Configuration conf = TEST_UTIL.getConfiguration();
+    conf = TEST_UTIL.getConfiguration();
     conf.set(Compression.LZ4_CODEC_CLASS_KEY, Lz4Codec.class.getCanonicalName());
     Compression.Algorithm.LZ4.reload(conf);
     HFileTestBase.setUpBeforeClass();
@@ -45,7 +49,9 @@ public class TestHFileCompressionLz4 extends HFileTestBase {
 
   @Test
   public void test() throws Exception {
-    doTest(Compression.Algorithm.LZ4);
+    Path path = new Path(TEST_UTIL.getDataTestDir(),
+      HBaseTestingUtility.getRandomUUID().toString() + ".hfile");
+    doTest(conf, path, Compression.Algorithm.LZ4);
   }
 
 }
diff --git a/hbase-compression/hbase-compression-snappy/src/main/java/org/apache/hadoop/hbase/io/compress/xerial/SnappyCompressor.java b/hbase-compression/hbase-compression-snappy/src/main/java/org/apache/hadoop/hbase/io/compress/xerial/SnappyCompressor.java
index 71e7b7a..fd99942 100644
--- a/hbase-compression/hbase-compression-snappy/src/main/java/org/apache/hadoop/hbase/io/compress/xerial/SnappyCompressor.java
+++ b/hbase-compression/hbase-compression-snappy/src/main/java/org/apache/hadoop/hbase/io/compress/xerial/SnappyCompressor.java
@@ -20,6 +20,7 @@ import java.io.IOException;
 import java.nio.ByteBuffer;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.io.compress.CanReinit;
 import org.apache.hadoop.hbase.io.compress.CompressionUtil;
 import org.apache.hadoop.io.compress.Compressor;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -32,7 +33,7 @@ import org.xerial.snappy.Snappy;
  * Hadoop compressor glue for Xerial Snappy.
  */
 @InterfaceAudience.Private
-public class SnappyCompressor implements Compressor {
+public class SnappyCompressor implements CanReinit, Compressor {
 
   protected static final Logger LOG = LoggerFactory.getLogger(SnappyCompressor.class);
   protected ByteBuffer inBuf, outBuf;
diff --git a/hbase-compression/hbase-compression-snappy/src/test/java/org/apache/hadoop/hbase/io/compress/xerial/TestHFileCompressionSnappy.java b/hbase-compression/hbase-compression-snappy/src/test/java/org/apache/hadoop/hbase/io/compress/xerial/TestHFileCompressionSnappy.java
index 094b434..0343e8b 100644
--- a/hbase-compression/hbase-compression-snappy/src/test/java/org/apache/hadoop/hbase/io/compress/xerial/TestHFileCompressionSnappy.java
+++ b/hbase-compression/hbase-compression-snappy/src/test/java/org/apache/hadoop/hbase/io/compress/xerial/TestHFileCompressionSnappy.java
@@ -18,7 +18,9 @@
 package org.apache.hadoop.hbase.io.compress.xerial;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.io.compress.Compression;
 import org.apache.hadoop.hbase.io.compress.HFileTestBase;
 import org.apache.hadoop.hbase.testclassification.IOTests;
@@ -35,9 +37,11 @@ public class TestHFileCompressionSnappy extends HFileTestBase {
   public static final HBaseClassTestRule CLASS_RULE =
       HBaseClassTestRule.forClass(TestHFileCompressionSnappy.class);
 
+  private static Configuration conf;
+
   @BeforeClass
   public static void setUpBeforeClass() throws Exception {
-    Configuration conf = TEST_UTIL.getConfiguration();
+    conf = TEST_UTIL.getConfiguration();
     conf.set(Compression.SNAPPY_CODEC_CLASS_KEY, SnappyCodec.class.getCanonicalName());
     Compression.Algorithm.SNAPPY.reload(conf);
     HFileTestBase.setUpBeforeClass();
@@ -45,7 +49,9 @@ public class TestHFileCompressionSnappy extends HFileTestBase {
 
   @Test
   public void test() throws Exception {
-    doTest(Compression.Algorithm.SNAPPY);
+    Path path = new Path(TEST_UTIL.getDataTestDir(),
+      HBaseTestingUtility.getRandomUUID().toString() + ".hfile");
+    doTest(conf, path, Compression.Algorithm.SNAPPY);
   }
 
 }
diff --git a/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestHFileCompressionLzma.java b/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestHFileCompressionLzma.java
index 04c7b51..617e02d 100644
--- a/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestHFileCompressionLzma.java
+++ b/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestHFileCompressionLzma.java
@@ -17,8 +17,12 @@
  */
 package org.apache.hadoop.hbase.io.compress.xz;
 
+import static org.junit.Assert.assertTrue;
+
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.io.compress.Compression;
 import org.apache.hadoop.hbase.io.compress.HFileTestBase;
 import org.apache.hadoop.hbase.testclassification.IOTests;
@@ -35,9 +39,11 @@ public class TestHFileCompressionLzma extends HFileTestBase {
   public static final HBaseClassTestRule CLASS_RULE =
       HBaseClassTestRule.forClass(TestHFileCompressionLzma.class);
 
+  private static Configuration conf;
+
   @BeforeClass
   public static void setUpBeforeClass() throws Exception {
-    Configuration conf = TEST_UTIL.getConfiguration();
+    conf = TEST_UTIL.getConfiguration();
     conf.set(Compression.LZMA_CODEC_CLASS_KEY, LzmaCodec.class.getCanonicalName());
     Compression.Algorithm.LZMA.reload(conf);
     HFileTestBase.setUpBeforeClass();
@@ -45,7 +51,26 @@ public class TestHFileCompressionLzma extends HFileTestBase {
 
   @Test
   public void test() throws Exception {
-    doTest(Compression.Algorithm.LZMA);
+    Path path = new Path(TEST_UTIL.getDataTestDir(),
+      HBaseTestingUtility.getRandomUUID().toString() + ".hfile");
+    doTest(conf, path, Compression.Algorithm.LZMA);
+  }
+
+  @Test
+  public void testReconfLevels() throws Exception {
+    Path path_1 = new Path(TEST_UTIL.getDataTestDir(),
+      HBaseTestingUtility.getRandomUUID().toString() + ".1.hfile");
+    Path path_2 = new Path(TEST_UTIL.getDataTestDir(),
+      HBaseTestingUtility.getRandomUUID().toString() + ".2.hfile");
+    conf.setInt(LzmaCodec.LZMA_LEVEL_KEY, 1);
+    doTest(conf, path_1, Compression.Algorithm.LZMA);
+    long len_1 = FS.getFileStatus(path_1).getLen();
+    conf.setInt(LzmaCodec.LZMA_LEVEL_KEY, 9);
+    doTest(conf, path_2, Compression.Algorithm.LZMA);
+    long len_2 = FS.getFileStatus(path_2).getLen();
+    LOG.info("Level 1 len {}", len_1);
+    LOG.info("Level 9 len {}", len_2);
+    assertTrue("Reconfiguraton with LZMA_LEVEL_KEY did not seem to work", len_1 > len_2);
   }
 
 }
diff --git a/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCompressor.java b/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCompressor.java
index 9e0b850..16ec438 100644
--- a/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCompressor.java
+++ b/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCompressor.java
@@ -20,6 +20,7 @@ import java.io.IOException;
 import java.nio.ByteBuffer;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.io.compress.CanReinit;
 import org.apache.hadoop.hbase.io.compress.CompressionUtil;
 import org.apache.hadoop.io.compress.Compressor;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -32,7 +33,7 @@ import com.github.luben.zstd.Zstd;
  * Hadoop compressor glue for zstd-jni.
  */
 @InterfaceAudience.Private
-public class ZstdCompressor implements Compressor {
+public class ZstdCompressor implements CanReinit, Compressor {
 
   protected static final Logger LOG = LoggerFactory.getLogger(ZstdCompressor.class);
   protected int level, bufferSize;
@@ -40,7 +41,7 @@ public class ZstdCompressor implements Compressor {
   protected boolean finish, finished;
   protected long bytesRead, bytesWritten;
 
-  ZstdCompressor(int level, int bufferSize) {
+  ZstdCompressor(final int level, final int bufferSize) {
     this.level = level;
     this.bufferSize = bufferSize;
     this.inBuf = ByteBuffer.allocateDirect(bufferSize);
@@ -49,7 +50,7 @@ public class ZstdCompressor implements Compressor {
   }
 
   @Override
-  public int compress(byte[] b, int off, int len) throws IOException {
+  public int compress(final byte[] b, final int off, final int len) throws IOException {
     // If we have previously compressed our input and still have some buffered bytes
     // remaining, provide them to the caller.
     if (outBuf.hasRemaining()) {
@@ -73,7 +74,7 @@ public class ZstdCompressor implements Compressor {
         } else {
           outBuf.clear();
         }
-        int written = Zstd.compress(outBuf, inBuf, level, true);
+        int written = Zstd.compress(outBuf, inBuf, level);
         bytesWritten += written;
         inBuf.clear();
         LOG.trace("compress: compressed {} -> {} (level {})", uncompressed, written, level);
@@ -127,7 +128,7 @@ public class ZstdCompressor implements Compressor {
   }
 
   @Override
-  public void reinit(Configuration conf) {
+  public void reinit(final Configuration conf) {
     LOG.trace("reinit");
     if (conf != null) {
       // Level might have changed
@@ -156,12 +157,12 @@ public class ZstdCompressor implements Compressor {
   }
 
   @Override
-  public void setDictionary(byte[] b, int off, int len) {
+  public void setDictionary(final byte[] b, final int off, final int len) {
     throw new UnsupportedOperationException("setDictionary is not supported");
   }
 
   @Override
-  public void setInput(byte[] b, int off, int len) {
+  public void setInput(final byte[] b, final int off, final int len) {
     LOG.trace("setInput: off={} len={}", off, len);
     if (inBuf.remaining() < len) {
       // Get a new buffer that can accomodate the accumulated input plus the additional
@@ -181,7 +182,7 @@ public class ZstdCompressor implements Compressor {
 
   // Package private
 
-  int maxCompressedLength(int len) {
+  int maxCompressedLength(final int len) {
     return (int) Zstd.compressBound(len);
   }
 
diff --git a/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdDecompressor.java b/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdDecompressor.java
index b25d0a3..a3d77f5 100644
--- a/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdDecompressor.java
+++ b/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdDecompressor.java
@@ -18,34 +18,36 @@ package org.apache.hadoop.hbase.io.compress.zstd;
 
 import java.io.IOException;
 import java.nio.ByteBuffer;
-
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.io.compress.CanReinit;
 import org.apache.hadoop.hbase.io.compress.CompressionUtil;
 import org.apache.hadoop.io.compress.Decompressor;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-
 import com.github.luben.zstd.Zstd;
 
 /**
  * Hadoop decompressor glue for zstd-java.
  */
 @InterfaceAudience.Private
-public class ZstdDecompressor implements Decompressor {
+public class ZstdDecompressor implements CanReinit, Decompressor {
 
   protected static final Logger LOG = LoggerFactory.getLogger(ZstdDecompressor.class);
   protected ByteBuffer inBuf, outBuf;
+  protected int bufferSize;
   protected int inLen;
   protected boolean finished;
 
-  ZstdDecompressor(int bufferSize) {
+  ZstdDecompressor(final int bufferSize) {
+    this.bufferSize = bufferSize;
     this.inBuf = ByteBuffer.allocateDirect(bufferSize);
     this.outBuf = ByteBuffer.allocateDirect(bufferSize);
     this.outBuf.position(bufferSize);
   }
 
   @Override
-  public int decompress(byte[] b, int off, int len) throws IOException {
+  public int decompress(final byte[] b, final int off, final int len) throws IOException {
     if (outBuf.hasRemaining()) {
       int remaining = outBuf.remaining(), n = Math.min(remaining, len);
       outBuf.get(b, off, n);
@@ -57,7 +59,8 @@ public class ZstdDecompressor implements Decompressor {
       int remaining = inBuf.remaining();
       inLen -= remaining;
       outBuf.clear();
-      int written = Zstd.decompress(outBuf, inBuf);
+      int written;
+      written = Zstd.decompress(outBuf, inBuf);
       inBuf.clear();
       LOG.trace("decompress: decompressed {} -> {}", remaining, written);
       outBuf.flip();
@@ -106,24 +109,25 @@ public class ZstdDecompressor implements Decompressor {
 
   @Override
   public boolean needsInput() {
-    boolean b = (inBuf.position() == 0);
+    final boolean b = (inBuf.position() == 0);
     LOG.trace("needsInput: {}", b);
     return b;
   }
 
   @Override
-  public void setDictionary(byte[] b, int off, int len) {
-    throw new UnsupportedOperationException("setDictionary is not supported");
+  public void setDictionary(final byte[] b, final int off, final int len) {
+    LOG.trace("setDictionary: off={} len={}", off, len);
+    throw new UnsupportedOperationException("setDictionary not supported");
   }
 
   @Override
-  public void setInput(byte[] b, int off, int len) {
+  public void setInput(final byte[] b, final int off, final int len) {
     LOG.trace("setInput: off={} len={}", off, len);
     if (inBuf.remaining() < len) {
       // Get a new buffer that can accomodate the accumulated input plus the additional
       // input that would cause a buffer overflow without reallocation.
       // This condition should be fortunately rare, because it is expensive.
-      int needed = CompressionUtil.roundInt2(inBuf.capacity() + len);
+      final int needed = CompressionUtil.roundInt2(inBuf.capacity() + len);
       LOG.trace("setInput: resize inBuf {}", needed);
       ByteBuffer newBuf = ByteBuffer.allocateDirect(needed);
       inBuf.flip();
@@ -135,4 +139,19 @@ public class ZstdDecompressor implements Decompressor {
     finished = false;
   }
 
+  @Override
+  public void reinit(final Configuration conf) {
+    LOG.trace("reinit");
+    if (conf != null) {
+      // Buffer size might have changed
+      int newBufferSize = ZstdCodec.getBufferSize(conf);
+      if (bufferSize != newBufferSize) {
+        bufferSize = newBufferSize;
+        this.inBuf = ByteBuffer.allocateDirect(bufferSize);
+        this.outBuf = ByteBuffer.allocateDirect(bufferSize);
+      }
+    }
+    reset();
+  }
+
 }
diff --git a/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestHFileCompressionZstd.java b/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestHFileCompressionZstd.java
index 07ce12d..55a197b 100644
--- a/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestHFileCompressionZstd.java
+++ b/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestHFileCompressionZstd.java
@@ -17,8 +17,12 @@
  */
 package org.apache.hadoop.hbase.io.compress.zstd;
 
+import static org.junit.Assert.assertTrue;
+
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.io.compress.Compression;
 import org.apache.hadoop.hbase.io.compress.HFileTestBase;
 import org.apache.hadoop.hbase.testclassification.IOTests;
@@ -35,9 +39,11 @@ public class TestHFileCompressionZstd extends HFileTestBase {
   public static final HBaseClassTestRule CLASS_RULE =
       HBaseClassTestRule.forClass(TestHFileCompressionZstd.class);
 
+  private static Configuration conf;
+
   @BeforeClass
   public static void setUpBeforeClass() throws Exception {
-    Configuration conf = TEST_UTIL.getConfiguration();
+    conf = TEST_UTIL.getConfiguration();
     conf.set(Compression.ZSTD_CODEC_CLASS_KEY, ZstdCodec.class.getCanonicalName());
     Compression.Algorithm.ZSTD.reload(conf);
     HFileTestBase.setUpBeforeClass();
@@ -45,7 +51,26 @@ public class TestHFileCompressionZstd extends HFileTestBase {
 
   @Test
   public void test() throws Exception {
-    doTest(Compression.Algorithm.ZSTD);
+    Path path = new Path(TEST_UTIL.getDataTestDir(),
+      HBaseTestingUtility.getRandomUUID().toString() + ".hfile");
+    doTest(conf, path, Compression.Algorithm.ZSTD);
+  }
+
+  @Test
+  public void testReconfLevels() throws Exception {
+    Path path_1 = new Path(TEST_UTIL.getDataTestDir(),
+      HBaseTestingUtility.getRandomUUID().toString() + ".1.hfile");
+    Path path_2 = new Path(TEST_UTIL.getDataTestDir(),
+      HBaseTestingUtility.getRandomUUID().toString() + ".2.hfile");
+    conf.setInt(ZstdCodec.ZSTD_LEVEL_KEY, 1);
+    doTest(conf, path_1, Compression.Algorithm.ZSTD);
+    long len_1 = FS.getFileStatus(path_1).getLen();
+    conf.setInt(ZstdCodec.ZSTD_LEVEL_KEY, 22);
+    doTest(conf, path_2, Compression.Algorithm.ZSTD);
+    long len_2 = FS.getFileStatus(path_2).getLen();
+    LOG.info("Level 1 len {}", len_1);
+    LOG.info("Level 22 len {}", len_2);
+    assertTrue("Reconfiguraton with ZSTD_LEVEL_KEY did not seem to work", len_1 > len_2);
   }
 
 }
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileInputFormat.java
index 1a9b655..03254fe 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileInputFormat.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileInputFormat.java
@@ -91,7 +91,7 @@ public class HFileInputFormat extends FileInputFormat<NullWritable, Cell> {
 
       // The file info must be loaded before the scanner can be used.
       // This seems like a bug in HBase, but it's easily worked around.
-      this.scanner = in.getScanner(false, false);
+      this.scanner = in.getScanner(conf, false, false);
 
     }
 
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellBasedHFileOutputFormat2.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellBasedHFileOutputFormat2.java
index ed151ec..0a7a930 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellBasedHFileOutputFormat2.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellBasedHFileOutputFormat2.java
@@ -490,7 +490,7 @@ public class TestCellBasedHFileOutputFormat2  {
         LocatedFileStatus keyFileStatus = iterator.next();
         HFile.Reader reader =
             HFile.createReader(fs, keyFileStatus.getPath(), new CacheConfig(conf), true, conf);
-        HFileScanner scanner = reader.getScanner(false, false, false);
+        HFileScanner scanner = reader.getScanner(conf, false, false, false);
         scanner.seekTo();
         Cell cell = scanner.getCell();
         List<Tag> tagsFromCell = TagUtil.asList(cell.getTagsArray(), cell.getTagsOffset(),
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
index f4cc385..5b26530 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
@@ -476,7 +476,7 @@ public class TestHFileOutputFormat2  {
         LocatedFileStatus keyFileStatus = iterator.next();
         HFile.Reader reader =
                 HFile.createReader(fs, keyFileStatus.getPath(), new CacheConfig(conf), true, conf);
-        HFileScanner scanner = reader.getScanner(false, false, false);
+        HFileScanner scanner = reader.getScanner(conf, false, false, false);
 
         kvCount += reader.getEntries();
         scanner.seekTo();
@@ -525,7 +525,7 @@ public class TestHFileOutputFormat2  {
         LocatedFileStatus keyFileStatus = iterator.next();
         HFile.Reader reader =
             HFile.createReader(fs, keyFileStatus.getPath(), new CacheConfig(conf), true, conf);
-        HFileScanner scanner = reader.getScanner(false, false, false);
+        HFileScanner scanner = reader.getScanner(conf, false, false, false);
         scanner.seekTo();
         Cell cell = scanner.getCell();
         List<Tag> tagsFromCell = PrivateCellUtil.getTags(cell);
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithVisibilityLabels.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithVisibilityLabels.java
index 9ee649b..cad5b49 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithVisibilityLabels.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithVisibilityLabels.java
@@ -484,7 +484,7 @@ public class TestImportTSVWithVisibilityLabels implements Configurable {
   private static int getKVCountFromHfile(FileSystem fs, Path p) throws IOException {
     Configuration conf = util.getConfiguration();
     HFile.Reader reader = HFile.createReader(fs, p, new CacheConfig(conf), true, conf);
-    HFileScanner scanner = reader.getScanner(false, false);
+    HFileScanner scanner = reader.getScanner(conf, false, false);
     scanner.seekTo();
     int count = 0;
     do {
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java
index fb213a3..3eee930 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java
@@ -561,7 +561,7 @@ public class TestImportTsv implements Configurable {
   private static int getKVCountFromHfile(FileSystem fs, Path p) throws IOException {
     Configuration conf = util.getConfiguration();
     HFile.Reader reader = HFile.createReader(fs, p, new CacheConfig(conf), true, conf);
-    HFileScanner scanner = reader.getScanner(false, false);
+    HFileScanner scanner = reader.getScanner(conf, false, false);
     scanner.seekTo();
     int count = 0;
     do {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
index ed0e84d..2e6c19e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
@@ -397,7 +397,8 @@ public final class HFile {
 
     CellComparator getComparator();
 
-    HFileScanner getScanner(boolean cacheBlocks, final boolean pread, final boolean isCompaction);
+    HFileScanner getScanner(Configuration conf, boolean cacheBlocks, boolean pread,
+      boolean isCompaction);
 
     HFileBlock getMetaBlock(String metaBlockName, boolean cacheBlock) throws IOException;
 
@@ -425,7 +426,7 @@ public final class HFile {
     void setMetaBlockIndexReader(HFileBlockIndex.ByteArrayKeyBlockIndexReader reader);
     HFileBlockIndex.ByteArrayKeyBlockIndexReader getMetaBlockIndexReader();
 
-    HFileScanner getScanner(boolean cacheBlocks, boolean pread);
+    HFileScanner getScanner(Configuration conf, boolean cacheBlocks, boolean pread);
 
     /**
      * Retrieves general Bloom filter metadata as appropriate for each
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
index 70c5c8a..faa42a3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
@@ -28,6 +28,8 @@ import java.util.List;
 import java.util.concurrent.atomic.AtomicReference;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
+
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.hbase.Cell;
@@ -837,12 +839,13 @@ public class HFileBlock implements Cacheable {
     /**
      * @param dataBlockEncoder data block encoding algorithm to use
      */
-    public Writer(HFileDataBlockEncoder dataBlockEncoder, HFileContext fileContext) {
-      this(dataBlockEncoder, fileContext, ByteBuffAllocator.HEAP);
+    public Writer(Configuration conf, HFileDataBlockEncoder dataBlockEncoder,
+        HFileContext fileContext) {
+      this(conf, dataBlockEncoder, fileContext, ByteBuffAllocator.HEAP);
     }
 
-    public Writer(HFileDataBlockEncoder dataBlockEncoder, HFileContext fileContext,
-        ByteBuffAllocator allocator) {
+    public Writer(Configuration conf, HFileDataBlockEncoder dataBlockEncoder,
+        HFileContext fileContext, ByteBuffAllocator allocator) {
       if (fileContext.getBytesPerChecksum() < HConstants.HFILEBLOCK_HEADER_SIZE) {
         throw new RuntimeException("Unsupported value of bytesPerChecksum. " +
             " Minimum is " + HConstants.HFILEBLOCK_HEADER_SIZE + " but the configured value is " +
@@ -851,11 +854,11 @@ public class HFileBlock implements Cacheable {
       this.allocator = allocator;
       this.dataBlockEncoder = dataBlockEncoder != null?
           dataBlockEncoder: NoOpDataBlockEncoder.INSTANCE;
-      this.dataBlockEncodingCtx = this.dataBlockEncoder.
-          newDataBlockEncodingContext(HConstants.HFILEBLOCK_DUMMY_HEADER, fileContext);
-      // TODO: This should be lazily instantiated since we usually do NOT need this default encoder
-      this.defaultBlockEncodingCtx = new HFileBlockDefaultEncodingContext(null,
-          HConstants.HFILEBLOCK_DUMMY_HEADER, fileContext);
+      this.dataBlockEncodingCtx = this.dataBlockEncoder.newDataBlockEncodingContext(conf,
+        HConstants.HFILEBLOCK_DUMMY_HEADER, fileContext);
+      // TODO: This should be lazily instantiated
+      this.defaultBlockEncodingCtx = new HFileBlockDefaultEncodingContext(conf, null,
+        HConstants.HFILEBLOCK_DUMMY_HEADER, fileContext);
       // TODO: Set BAOS initial size. Use fileContext.getBlocksize() and add for header/checksum
       baosInMemory = new ByteArrayOutputStream();
       prevOffsetByType = new long[BlockType.values().length];
@@ -1345,7 +1348,7 @@ public class HFileBlock implements Cacheable {
     HFileBlockDecodingContext getDefaultBlockDecodingContext();
 
     void setIncludesMemStoreTS(boolean includesMemstoreTS);
-    void setDataBlockEncoder(HFileDataBlockEncoder encoder);
+    void setDataBlockEncoder(HFileDataBlockEncoder encoder, Configuration conf);
 
     /**
      * To close the stream's socket. Note: This can be concurrently called from multiple threads and
@@ -1413,7 +1416,7 @@ public class HFileBlock implements Cacheable {
     private final Lock streamLock = new ReentrantLock();
 
     FSReaderImpl(ReaderContext readerContext, HFileContext fileContext,
-        ByteBuffAllocator allocator) throws IOException {
+        ByteBuffAllocator allocator, Configuration conf) throws IOException {
       this.fileSize = readerContext.getFileSize();
       this.hfs = readerContext.getFileSystem();
       if (readerContext.getFilePath() != null) {
@@ -1426,7 +1429,7 @@ public class HFileBlock implements Cacheable {
       this.streamWrapper = readerContext.getInputStreamWrapper();
       // Older versions of HBase didn't support checksum.
       this.streamWrapper.prepareForBlockReader(!fileContext.isUseHBaseChecksum());
-      defaultDecodingCtx = new HFileBlockDefaultDecodingContext(fileContext);
+      defaultDecodingCtx = new HFileBlockDefaultDecodingContext(conf, fileContext);
       encodedBlockDecodingCtx = defaultDecodingCtx;
     }
 
@@ -1790,8 +1793,8 @@ public class HFileBlock implements Cacheable {
     }
 
     @Override
-    public void setDataBlockEncoder(HFileDataBlockEncoder encoder) {
-      encodedBlockDecodingCtx = encoder.newDataBlockDecodingContext(this.fileContext);
+    public void setDataBlockEncoder(HFileDataBlockEncoder encoder, Configuration conf) {
+      encodedBlockDecodingCtx = encoder.newDataBlockDecodingContext(conf, fileContext);
     }
 
     @Override
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoder.java
index 3c118da..6a1611d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoder.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoder.java
@@ -18,13 +18,13 @@ package org.apache.hadoop.hbase.io.hfile;
 
 import java.io.DataOutputStream;
 import java.io.IOException;
-
-import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
 import org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext;
 import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.yetus.audience.InterfaceAudience;
 
 /**
  * Controls what kind of data block encoding is used. If data block encoding is
@@ -97,11 +97,12 @@ public interface HFileDataBlockEncoder {
    * encoding context should also perform compression if compressionAlgorithm is
    * valid.
    *
+   * @param conf store configuration
    * @param headerBytes header bytes
    * @param fileContext HFile meta data
    * @return a new {@link HFileBlockEncodingContext} object
    */
-  HFileBlockEncodingContext newDataBlockEncodingContext(byte[] headerBytes,
+  HFileBlockEncodingContext newDataBlockEncodingContext(Configuration conf, byte[] headerBytes,
       HFileContext fileContext);
 
   /**
@@ -109,8 +110,10 @@ public interface HFileDataBlockEncoder {
    * decoding context should also do decompression if compressionAlgorithm
    * is valid.
    *
+   * @param conf store configuration
    * @param fileContext - HFile meta data
    * @return a new {@link HFileBlockDecodingContext} object
    */
-  HFileBlockDecodingContext newDataBlockDecodingContext(HFileContext fileContext);
+  HFileBlockDecodingContext newDataBlockDecodingContext(Configuration conf,
+      HFileContext fileContext);
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoderImpl.java
index 462064f..d2ce772 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoderImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoderImpl.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.io.hfile;
 import java.io.DataOutputStream;
 import java.io.IOException;
 
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoder;
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
@@ -108,22 +109,23 @@ public class HFileDataBlockEncoderImpl implements HFileDataBlockEncoder {
   }
 
   @Override
-  public HFileBlockEncodingContext newDataBlockEncodingContext(
+  public HFileBlockEncodingContext newDataBlockEncodingContext(Configuration conf,
       byte[] dummyHeader, HFileContext fileContext) {
     DataBlockEncoder encoder = encoding.getEncoder();
     if (encoder != null) {
-      return encoder.newDataBlockEncodingContext(encoding, dummyHeader, fileContext);
+      return encoder.newDataBlockEncodingContext(conf, encoding, dummyHeader, fileContext);
     }
-    return new HFileBlockDefaultEncodingContext(null, dummyHeader, fileContext);
+    return new HFileBlockDefaultEncodingContext(conf, null, dummyHeader, fileContext);
   }
 
   @Override
-  public HFileBlockDecodingContext newDataBlockDecodingContext(HFileContext fileContext) {
+  public HFileBlockDecodingContext newDataBlockDecodingContext(Configuration conf,
+      HFileContext fileContext) {
     DataBlockEncoder encoder = encoding.getEncoder();
     if (encoder != null) {
-      return encoder.newDataBlockDecodingContext(fileContext);
+      return encoder.newDataBlockDecodingContext(conf, fileContext);
     }
-    return new HFileBlockDefaultDecodingContext(fileContext);
+    return new HFileBlockDefaultDecodingContext(conf, fileContext);
   }
 
   @Override
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
index 1f7711e..01e8863 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
@@ -319,7 +319,7 @@ public class HFilePrettyPrinter extends Configured implements Tool {
 
     if (verbose || printKey || checkRow || checkFamily || printStats || checkMobIntegrity) {
       // scan over file and read key/value's and check if requested
-      HFileScanner scanner = reader.getScanner(false, false, false);
+      HFileScanner scanner = reader.getScanner(getConf(), false, false, false);
       fileStats = new KeyValueStatsCollector();
       boolean shouldScanKeysValues;
       if (this.isSeekToRow && !Bytes.equals(row, reader.getFirstRowKey().orElse(null))) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
index 0bb8d23..80049c9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
@@ -141,9 +141,9 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable {
     this.trailer = fileInfo.getTrailer();
     this.hfileContext = fileInfo.getHFileContext();
     this.fsBlockReader = new HFileBlock.FSReaderImpl(context, hfileContext,
-        cacheConf.getByteBuffAllocator());
+        cacheConf.getByteBuffAllocator(), conf);
     this.dataBlockEncoder = HFileDataBlockEncoderImpl.createFromFileInfo(fileInfo);
-    fsBlockReader.setDataBlockEncoder(dataBlockEncoder);
+    fsBlockReader.setDataBlockEncoder(dataBlockEncoder, conf);
     dataBlockIndexReader = fileInfo.getDataBlockIndexReader();
     metaBlockIndexReader = fileInfo.getMetaBlockIndexReader();
   }
@@ -256,7 +256,7 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable {
   @Override
   public void setDataBlockEncoder(HFileDataBlockEncoder dataBlockEncoder) {
     this.dataBlockEncoder = dataBlockEncoder;
-    this.fsBlockReader.setDataBlockEncoder(dataBlockEncoder);
+    this.fsBlockReader.setDataBlockEncoder(dataBlockEncoder, conf);
   }
 
   @Override
@@ -1444,11 +1444,11 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable {
     private final DataBlockEncoder dataBlockEncoder;
 
     public EncodedScanner(HFile.Reader reader, boolean cacheBlocks,
-        boolean pread, boolean isCompaction, HFileContext meta) {
+        boolean pread, boolean isCompaction, HFileContext meta, Configuration conf) {
       super(reader, cacheBlocks, pread, isCompaction);
       DataBlockEncoding encoding = reader.getDataBlockEncoding();
       dataBlockEncoder = encoding.getEncoder();
-      decodingCtx = dataBlockEncoder.newDataBlockDecodingContext(meta);
+      decodingCtx = dataBlockEncoder.newDataBlockDecodingContext(conf, meta);
       seeker = dataBlockEncoder.createSeeker(decodingCtx);
     }
 
@@ -1636,16 +1636,17 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable {
    * {@link HFileScanner#seekTo(Cell)} to position an start the read. There is
    * nothing to clean up in a Scanner. Letting go of your references to the
    * scanner is sufficient. NOTE: Do not use this overload of getScanner for
-   * compactions. See {@link #getScanner(boolean, boolean, boolean)}
+   * compactions. See {@link #getScanner(Configuration, boolean, boolean, boolean)}
    *
+   * @param conf Store configuration.
    * @param cacheBlocks True if we should cache blocks read in by this scanner.
    * @param pread Use positional read rather than seek+read if true (pread is
    *          better for random reads, seek+read is better scanning).
    * @return Scanner on this file.
    */
   @Override
-  public HFileScanner getScanner(boolean cacheBlocks, final boolean pread) {
-    return getScanner(cacheBlocks, pread, false);
+  public HFileScanner getScanner(Configuration conf, boolean cacheBlocks, final boolean pread) {
+    return getScanner(conf, cacheBlocks, pread, false);
   }
 
   /**
@@ -1653,6 +1654,8 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable {
    * {@link HFileScanner#seekTo(Cell)} to position an start the read. There is
    * nothing to clean up in a Scanner. Letting go of your references to the
    * scanner is sufficient.
+   * @param conf
+   *          Store configuration.
    * @param cacheBlocks
    *          True if we should cache blocks read in by this scanner.
    * @param pread
@@ -1663,10 +1666,10 @@ public abstract class HFileReaderImpl implements HFile.Reader, Configurable {
    * @return Scanner on this file.
    */
   @Override
-  public HFileScanner getScanner(boolean cacheBlocks, final boolean pread,
+  public HFileScanner getScanner(Configuration conf, boolean cacheBlocks, final boolean pread,
       final boolean isCompaction) {
     if (dataBlockEncoder.useEncodedScanner()) {
-      return new EncodedScanner(this, cacheBlocks, pread, isCompaction, this.hfileContext);
+      return new EncodedScanner(this, cacheBlocks, pread, isCompaction, this.hfileContext, conf);
     }
     return new HFileScannerImpl(this, cacheBlocks, pread, isCompaction);
   }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java
index b9503ce..3e03b88 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java
@@ -298,7 +298,7 @@ public class HFileWriterImpl implements HFile.Writer {
     if (blockWriter != null) {
       throw new IllegalStateException("finishInit called twice");
     }
-    blockWriter = new HFileBlock.Writer(blockEncoder, hFileContext,
+    blockWriter = new HFileBlock.Writer(conf, blockEncoder, hFileContext,
         cacheConf.getByteBuffAllocator());
     // Data block index writer
     boolean cacheIndexesOnWrite = cacheConf.shouldCacheIndexesOnWrite();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpDataBlockEncoder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpDataBlockEncoder.java
index 467480f..c519d9f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpDataBlockEncoder.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpDataBlockEncoder.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.io.hfile;
 import java.io.DataOutputStream;
 import java.io.IOException;
 
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
@@ -81,14 +82,15 @@ public class NoOpDataBlockEncoder implements HFileDataBlockEncoder {
   }
 
   @Override
-  public HFileBlockEncodingContext newDataBlockEncodingContext(
+  public HFileBlockEncodingContext newDataBlockEncodingContext(Configuration conf,
       byte[] dummyHeader, HFileContext meta) {
-    return new HFileBlockDefaultEncodingContext(null, dummyHeader, meta);
+    return new HFileBlockDefaultEncodingContext(conf, null, dummyHeader, meta);
   }
 
   @Override
-  public HFileBlockDecodingContext newDataBlockDecodingContext(HFileContext meta) {
-    return new HFileBlockDefaultDecodingContext(meta);
+  public HFileBlockDecodingContext newDataBlockDecodingContext(Configuration conf,
+      HFileContext meta) {
+    return new HFileBlockDefaultDecodingContext(conf, meta);
   }
 
   @Override
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/store/region/HFileProcedurePrettyPrinter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/store/region/HFileProcedurePrettyPrinter.java
index f52c166..ede2949 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/store/region/HFileProcedurePrettyPrinter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/store/region/HFileProcedurePrettyPrinter.java
@@ -136,7 +136,7 @@ public class HFileProcedurePrettyPrinter extends AbstractHBaseTool {
     out.println("Scanning -> " + file);
     FileSystem fs = file.getFileSystem(conf);
     try (HFile.Reader reader = HFile.createReader(fs, file, CacheConfig.DISABLED, true, conf);
-      HFileScanner scanner = reader.getScanner(false, false, false)) {
+      HFileScanner scanner = reader.getScanner(conf, false, false, false)) {
       if (procId != null) {
         if (scanner
           .seekTo(PrivateCellUtil.createFirstOnRow(Bytes.toBytes(procId.longValue()))) != -1) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
index 8a59051..2d53276 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
@@ -806,7 +806,7 @@ public class HStore implements Store, HeapSize, StoreConfigInformation,
         long verificationStartTime = EnvironmentEdgeManager.currentTime();
         LOG.info("Full verification started for bulk load hfile: {}", srcPath);
         Cell prevCell = null;
-        HFileScanner scanner = reader.getScanner(false, false, false);
+        HFileScanner scanner = reader.getScanner(conf, false, false, false);
         scanner.seekTo();
         do {
           Cell cell = scanner.getCell();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
index 7550511..32ee47e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
@@ -75,6 +75,7 @@ public class StoreFileReader {
   private KeyValue.KeyOnlyKeyValue lastBloomKeyOnlyKV = null;
   private boolean skipResetSeqId = true;
   private int prefixLength = -1;
+  protected Configuration conf;
 
   // Counter that is incremented every time a scanner is created on the
   // store file. It is decremented when the scan on the store file is
@@ -82,16 +83,18 @@ public class StoreFileReader {
   private final AtomicInteger refCount;
   private final ReaderContext context;
 
-  private StoreFileReader(HFile.Reader reader, AtomicInteger refCount, ReaderContext context) {
+  private StoreFileReader(HFile.Reader reader, AtomicInteger refCount, ReaderContext context,
+      Configuration conf) {
     this.reader = reader;
     bloomFilterType = BloomType.NONE;
     this.refCount = refCount;
     this.context = context;
+    this.conf = conf;
   }
 
   public StoreFileReader(ReaderContext context, HFileInfo fileInfo, CacheConfig cacheConf,
       AtomicInteger refCount, Configuration conf) throws IOException {
-    this(HFile.createReader(context, fileInfo, cacheConf, conf), refCount, context);
+    this(HFile.createReader(context, fileInfo, cacheConf, conf), refCount, context, conf);
   }
 
   void copyFields(StoreFileReader storeFileReader) throws IOException {
@@ -205,7 +208,7 @@ public class StoreFileReader {
   @Deprecated
   public HFileScanner getScanner(boolean cacheBlocks, boolean pread,
       boolean isCompaction) {
-    return reader.getScanner(cacheBlocks, pread, isCompaction);
+    return reader.getScanner(conf, cacheBlocks, pread, isCompaction);
   }
 
   public void close(boolean evictOnClose) throws IOException {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java
index b72e1cc..7b897e7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java
@@ -135,7 +135,7 @@ public class CompressionTest {
     Cell cc = null;
     HFile.Reader reader = HFile.createReader(fs, path, CacheConfig.DISABLED, true, conf);
     try {
-      HFileScanner scanner = reader.getScanner(false, true);
+      HFileScanner scanner = reader.getScanner(conf, false, true);
       scanner.seekTo(); // position to the start of file
       // Scanner does not do Cells yet. Do below for now till fixed.
       cc = scanner.getCell();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java
index 82b068d..5ab407b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java
@@ -424,7 +424,7 @@ public class HFilePerformanceEvaluation {
     @Override
     void setUp() throws Exception {
       super.setUp();
-      this.scanner = this.reader.getScanner(false, false);
+      this.scanner = this.reader.getScanner(conf, false, false);
       this.scanner.seekTo();
     }
 
@@ -456,7 +456,7 @@ public class HFilePerformanceEvaluation {
 
     @Override
     void doRow(int i) throws Exception {
-      HFileScanner scanner = this.reader.getScanner(false, true);
+      HFileScanner scanner = this.reader.getScanner(conf, false, true);
       byte [] b = getRandomRow();
       if (scanner.seekTo(createCell(b)) < 0) {
         LOG.info("Not able to seekTo " + new String(b));
@@ -483,7 +483,7 @@ public class HFilePerformanceEvaluation {
 
     @Override
     void doRow(int i) throws Exception {
-      HFileScanner scanner = this.reader.getScanner(false, false);
+      HFileScanner scanner = this.reader.getScanner(conf, false, false);
       byte [] b = getRandomRow();
       // System.out.println("Random row: " + new String(b));
       Cell c = createCell(b);
@@ -522,7 +522,7 @@ public class HFilePerformanceEvaluation {
 
     @Override
     void doRow(int i) throws Exception {
-      HFileScanner scanner = this.reader.getScanner(false, true);
+      HFileScanner scanner = this.reader.getScanner(conf, false, true);
       byte[] gaussianRandomRowBytes = getGaussianRandomRowBytes();
       scanner.seekTo(createCell(gaussianRandomRowBytes));
       for (int ii = 0; ii < 30; ii++) {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/compress/HFileTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/compress/HFileTestBase.java
index 4ca3a43..a99074f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/compress/HFileTestBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/compress/HFileTestBase.java
@@ -36,38 +36,32 @@ import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.io.hfile.HFileContext;
 import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
 import org.apache.hadoop.hbase.io.hfile.HFileScanner;
-import org.apache.hadoop.hbase.logging.Log4jUtils;
 import org.apache.hadoop.hbase.util.RedundantKVGenerator;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 public class HFileTestBase {
 
-  static {
-    Log4jUtils.setLogLevel("org.apache.hadoop.hbase.io.compress", "TRACE");
-  }
-
   protected static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
   protected static final Logger LOG = LoggerFactory.getLogger(HFileTestBase.class);
   protected static final SecureRandom RNG = new SecureRandom();
-  protected static FileSystem fs;
+  protected static FileSystem FS;
 
   public static void setUpBeforeClass() throws Exception {
     Configuration conf = TEST_UTIL.getConfiguration();
     // Disable block cache in this test.
     conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.0f);
-    conf.setInt("hfile.format.version", 3);
-    fs = FileSystem.get(conf);
+    FS = FileSystem.get(conf);
   }
 
   @SuppressWarnings("deprecation")
-  public void doTest(Compression.Algorithm compression) throws Exception {
+  public void doTest(Configuration conf, Path path, Compression.Algorithm compression)
+      throws Exception {
     // Create 10000 random test KVs
     RedundantKVGenerator generator = new RedundantKVGenerator();
     List<KeyValue> testKvs = generator.generateTestKeyValues(10000);
 
     // Iterate through data block encoding and compression combinations
-    Configuration conf = TEST_UTIL.getConfiguration();
     CacheConfig cacheConf = new CacheConfig(conf);
     HFileContext fileContext = new HFileContextBuilder()
       .withBlockSize(4096) // small block
@@ -75,9 +69,7 @@ public class HFileTestBase {
       .build();
     // write a new test HFile
     LOG.info("Writing with " + fileContext);
-    Path path = new Path(TEST_UTIL.getDataTestDir(),
-      HBaseTestingUtility.getRandomUUID().toString() + ".hfile");
-    FSDataOutputStream out = fs.create(path);
+    FSDataOutputStream out = FS.create(path);
     HFile.Writer writer = HFile.getWriterFactory(conf, cacheConf)
       .withOutputStream(out)
       .withFileContext(fileContext)
@@ -95,9 +87,9 @@ public class HFileTestBase {
     LOG.info("Reading with " + fileContext);
     int i = 0;
     HFileScanner scanner = null;
-    HFile.Reader reader = HFile.createReader(fs, path, cacheConf, true, conf);
+    HFile.Reader reader = HFile.createReader(FS, path, cacheConf, true, conf);
     try {
-      scanner = reader.getScanner(false, false);
+      scanner = reader.getScanner(conf, false, false);
       assertTrue("Initial seekTo failed", scanner.seekTo());
       do {
         Cell kv = scanner.getCell();
@@ -114,9 +106,9 @@ public class HFileTestBase {
 
     // Test random seeks with pread
     LOG.info("Random seeking with " + fileContext);
-    reader = HFile.createReader(fs, path, cacheConf, true, conf);
+    reader = HFile.createReader(FS, path, cacheConf, true, conf);
     try {
-      scanner = reader.getScanner(false, true);
+      scanner = reader.getScanner(conf, false, true);
       assertTrue("Initial seekTo failed", scanner.seekTo());
       for (i = 0; i < 100; i++) {
         KeyValue kv = testKvs.get(RNG.nextInt(testKvs.size()));
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java
index 3f21f0c..390efd2 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java
@@ -30,11 +30,14 @@ import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
 import java.util.Random;
+
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.ArrayBackedTag;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellComparatorImpl;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
@@ -82,9 +85,9 @@ public class TestDataBlockEncoders {
       + DataBlockEncoding.ID_SIZE;
   static final byte[] HFILEBLOCK_DUMMY_HEADER = new byte[HConstants.HFILEBLOCK_HEADER_SIZE];
 
-  private RedundantKVGenerator generator = new RedundantKVGenerator();
-  private Random randomizer = new Random(42L);
-
+  private final Configuration conf = HBaseConfiguration.create();
+  private final RedundantKVGenerator generator = new RedundantKVGenerator();
+  private final Random randomizer = new Random(42L);
   private final boolean includesMemstoreTS;
   private final boolean includesTags;
   private final boolean useOffheapData;
@@ -101,8 +104,8 @@ public class TestDataBlockEncoders {
     this.useOffheapData = useOffheapData;
   }
 
-  private HFileBlockEncodingContext getEncodingContext(Compression.Algorithm algo,
-      DataBlockEncoding encoding) {
+  private HFileBlockEncodingContext getEncodingContext(Configuration conf,
+      Compression.Algorithm algo, DataBlockEncoding encoding) {
     DataBlockEncoder encoder = encoding.getEncoder();
     HFileContext meta = new HFileContextBuilder()
                         .withHBaseCheckSum(false)
@@ -110,9 +113,9 @@ public class TestDataBlockEncoders {
                         .withIncludesTags(includesTags)
                         .withCompression(algo).build();
     if (encoder != null) {
-      return encoder.newDataBlockEncodingContext(encoding, HFILEBLOCK_DUMMY_HEADER, meta);
+      return encoder.newDataBlockEncodingContext(conf, encoding, HFILEBLOCK_DUMMY_HEADER, meta);
     } else {
-      return new HFileBlockDefaultEncodingContext(encoding, HFILEBLOCK_DUMMY_HEADER, meta);
+      return new HFileBlockDefaultEncodingContext(conf, encoding, HFILEBLOCK_DUMMY_HEADER, meta);
     }
   }
 
@@ -199,7 +202,7 @@ public class TestDataBlockEncoders {
       }
       LOG.info("Encoder: " + encoder);
       ByteBuffer encodedBuffer = encodeKeyValues(encoding, sampleKv,
-          getEncodingContext(Compression.Algorithm.NONE, encoding), this.useOffheapData);
+          getEncodingContext(conf, Compression.Algorithm.NONE, encoding), this.useOffheapData);
       HFileContext meta = new HFileContextBuilder()
                           .withHBaseCheckSum(false)
                           .withIncludesMvcc(includesMemstoreTS)
@@ -207,7 +210,7 @@ public class TestDataBlockEncoders {
                           .withCompression(Compression.Algorithm.NONE)
                           .build();
       DataBlockEncoder.EncodedSeeker seeker =
-        encoder.createSeeker(encoder.newDataBlockDecodingContext(meta));
+        encoder.createSeeker(encoder.newDataBlockDecodingContext(conf, meta));
       seeker.setCurrentBuffer(new SingleByteBuff(encodedBuffer));
       encodedSeekers.add(seeker);
     }
@@ -272,7 +275,7 @@ public class TestDataBlockEncoders {
       }
       DataBlockEncoder encoder = encoding.getEncoder();
       ByteBuffer encodedBuffer = encodeKeyValues(encoding, sampleKv,
-          getEncodingContext(Compression.Algorithm.NONE, encoding), this.useOffheapData);
+          getEncodingContext(conf, Compression.Algorithm.NONE, encoding), this.useOffheapData);
       HFileContext meta = new HFileContextBuilder()
                           .withHBaseCheckSum(false)
                           .withIncludesMvcc(includesMemstoreTS)
@@ -280,7 +283,7 @@ public class TestDataBlockEncoders {
                           .withCompression(Compression.Algorithm.NONE)
                           .build();
       DataBlockEncoder.EncodedSeeker seeker =
-        encoder.createSeeker(encoder.newDataBlockDecodingContext(meta));
+        encoder.createSeeker(encoder.newDataBlockDecodingContext(conf, meta));
       seeker.setCurrentBuffer(new SingleByteBuff(encodedBuffer));
       int i = 0;
       do {
@@ -315,7 +318,7 @@ public class TestDataBlockEncoders {
       }
       DataBlockEncoder encoder = encoding.getEncoder();
       ByteBuffer encodedBuffer = encodeKeyValues(encoding, sampleKv,
-          getEncodingContext(Compression.Algorithm.NONE, encoding), this.useOffheapData);
+          getEncodingContext(conf, Compression.Algorithm.NONE, encoding), this.useOffheapData);
       Cell key = encoder.getFirstKeyCellInBlock(new SingleByteBuff(encodedBuffer));
       KeyValue firstKv = sampleKv.get(0);
       if (0 != PrivateCellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, key, firstKv)) {
@@ -336,13 +339,13 @@ public class TestDataBlockEncoders {
     kvList.add(expectedKV);
     DataBlockEncoding encoding = DataBlockEncoding.ROW_INDEX_V1;
     DataBlockEncoder encoder = encoding.getEncoder();
-    ByteBuffer encodedBuffer =
-        encodeKeyValues(encoding, kvList, getEncodingContext(Algorithm.NONE, encoding), false);
+    ByteBuffer encodedBuffer = encodeKeyValues(encoding, kvList,
+      getEncodingContext(conf, Algorithm.NONE, encoding), false);
     HFileContext meta =
-        new HFileContextBuilder().withHBaseCheckSum(false).withIncludesMvcc(includesMemstoreTS)
-            .withIncludesTags(includesTags).withCompression(Compression.Algorithm.NONE).build();
+      new HFileContextBuilder().withHBaseCheckSum(false).withIncludesMvcc(includesMemstoreTS)
+        .withIncludesTags(includesTags).withCompression(Compression.Algorithm.NONE).build();
     DataBlockEncoder.EncodedSeeker seeker =
-      encoder.createSeeker(encoder.newDataBlockDecodingContext(meta));
+      encoder.createSeeker(encoder.newDataBlockDecodingContext(conf, meta));
     seeker.setCurrentBuffer(new SingleByteBuff(encodedBuffer));
     Cell cell = seeker.getCell();
     Assert.assertEquals(expectedKV.getLength(), ((KeyValue) cell).getLength());
@@ -393,9 +396,9 @@ public class TestDataBlockEncoders {
       if (encoder == null) {
         continue;
       }
-      HFileBlockEncodingContext encodingContext = new HFileBlockDefaultEncodingContext(encoding,
-          HFILEBLOCK_DUMMY_HEADER, fileContext);
-
+      HFileBlockEncodingContext encodingContext =
+        new HFileBlockDefaultEncodingContext(conf, encoding, HFILEBLOCK_DUMMY_HEADER,
+          fileContext);
       ByteArrayOutputStream baos = new ByteArrayOutputStream();
       baos.write(HFILEBLOCK_DUMMY_HEADER);
       DataOutputStream dos = new DataOutputStream(baos);
@@ -441,7 +444,7 @@ public class TestDataBlockEncoders {
     HFileContext meta = new HFileContextBuilder().withHBaseCheckSum(false)
         .withIncludesMvcc(includesMemstoreTS).withIncludesTags(includesTags)
         .withCompression(Compression.Algorithm.NONE).build();
-    actualDataset = encoder.decodeKeyValues(dis, encoder.newDataBlockDecodingContext(meta));
+    actualDataset = encoder.decodeKeyValues(dis, encoder.newDataBlockDecodingContext(conf, meta));
     actualDataset.rewind();
 
     // this is because in case of prefix tree the decoded stream will not have
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestSeekToBlockWithEncoders.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestSeekToBlockWithEncoders.java
index d9f22bc..bb3e1a3 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestSeekToBlockWithEncoders.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestSeekToBlockWithEncoders.java
@@ -24,9 +24,12 @@ import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseCommonTestingUtility;
+import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.PrivateCellUtil;
@@ -54,6 +57,7 @@ public class TestSeekToBlockWithEncoders {
 
   static final byte[] HFILEBLOCK_DUMMY_HEADER = new byte[HConstants.HFILEBLOCK_HEADER_SIZE];
   private final boolean useOffheapData;
+  private final Configuration conf = HBaseConfiguration.create();
 
   @Parameters
   public static Collection<Object[]> parameters() {
@@ -283,14 +287,14 @@ public class TestSeekToBlockWithEncoders {
       }
       DataBlockEncoder encoder = encoding.getEncoder();
       HFileContext meta = new HFileContextBuilder().withHBaseCheckSum(false)
-          .withIncludesMvcc(false).withIncludesTags(false)
-          .withCompression(Compression.Algorithm.NONE).build();
-      HFileBlockEncodingContext encodingContext = encoder.newDataBlockEncodingContext(encoding,
-          HFILEBLOCK_DUMMY_HEADER, meta);
+        .withIncludesMvcc(false).withIncludesTags(false)
+        .withCompression(Compression.Algorithm.NONE).build();
+      HFileBlockEncodingContext encodingContext = encoder.newDataBlockEncodingContext(conf,
+        encoding, HFILEBLOCK_DUMMY_HEADER, meta);
       ByteBuffer encodedBuffer = TestDataBlockEncoders.encodeKeyValues(encoding, kvs,
-          encodingContext, this.useOffheapData);
+        encodingContext, this.useOffheapData);
       DataBlockEncoder.EncodedSeeker seeker =
-        encoder.createSeeker(encoder.newDataBlockDecodingContext(meta));
+        encoder.createSeeker(encoder.newDataBlockDecodingContext(conf, meta));
       seeker.setCurrentBuffer(new SingleByteBuff(encodedBuffer));
       encodedSeekers.add(seeker);
     }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java
index 119d26c..a3ea68c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java
@@ -283,7 +283,7 @@ public class TestCacheOnWrite {
       .withIncludesTags(useTags).build();
     final boolean cacheBlocks = false;
     final boolean pread = false;
-    HFileScanner scanner = reader.getScanner(cacheBlocks, pread);
+    HFileScanner scanner = reader.getScanner(conf, cacheBlocks, pread);
     assertTrue(testDescription, scanner.seekTo());
 
     long offset = 0;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestChecksum.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestChecksum.java
index 85f74c9..0b3444c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestChecksum.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestChecksum.java
@@ -30,6 +30,7 @@ import java.io.IOException;
 import java.nio.BufferUnderflowException;
 import java.nio.ByteBuffer;
 
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
@@ -85,7 +86,7 @@ public class TestChecksum {
     Path path = new Path(TEST_UTIL.getDataTestDir(), "default_checksum");
     FSDataOutputStream os = fs.create(path);
     HFileContext meta = new HFileContextBuilder().build();
-    HFileBlock.Writer hbw = new HFileBlock.Writer(null, meta);
+    HFileBlock.Writer hbw = new HFileBlock.Writer(TEST_UTIL.getConfiguration(), null, meta);
     DataOutputStream dos = hbw.startWriting(BlockType.DATA);
     for (int i = 0; i < 1000; ++i)
       dos.writeInt(i);
@@ -105,7 +106,7 @@ public class TestChecksum {
         .withFilePath(path)
         .build();
     HFileBlock.FSReader hbr = new HFileBlock.FSReaderImpl(context,
-        meta, ByteBuffAllocator.HEAP);
+        meta, ByteBuffAllocator.HEAP, TEST_UTIL.getConfiguration());
     HFileBlock b = hbr.readBlockData(0, -1, false, false, true);
     assertTrue(!b.isSharedMem());
     assertEquals(b.getChecksumType(), ChecksumType.getDefaultChecksumType().getCode());
@@ -137,7 +138,7 @@ public class TestChecksum {
       HFileContext meta = new HFileContextBuilder()
             .withChecksumType(ckt)
             .build();
-      HFileBlock.Writer hbw = new HFileBlock.Writer(null, meta);
+      HFileBlock.Writer hbw = new HFileBlock.Writer(TEST_UTIL.getConfiguration(), null, meta);
       DataOutputStream dos = hbw.startWriting(BlockType.DATA);
       for (int i = 0; i < intCount; ++i) {
         dos.writeInt(i);
@@ -158,7 +159,7 @@ public class TestChecksum {
           .withFilePath(path)
           .build();
       HFileBlock.FSReader hbr = new HFileBlock.FSReaderImpl(context,
-          meta, ByteBuffAllocator.HEAP);
+          meta, ByteBuffAllocator.HEAP, TEST_UTIL.getConfiguration());
       HFileBlock b = hbr.readBlockData(0, -1, false, false, true);
       assertTrue(!b.isSharedMem());
 
@@ -206,7 +207,7 @@ public class TestChecksum {
                             .withIncludesTags(useTags)
                             .withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM)
                             .build();
-        HFileBlock.Writer hbw = new HFileBlock.Writer(null, meta);
+        HFileBlock.Writer hbw = new HFileBlock.Writer(TEST_UTIL.getConfiguration(), null, meta);
         long totalSize = 0;
         for (int blockId = 0; blockId < 2; ++blockId) {
           DataOutputStream dos = hbw.startWriting(BlockType.DATA);
@@ -234,7 +235,8 @@ public class TestChecksum {
            .withFileSystem(fs)
            .withFilePath(path)
            .build();
-        HFileBlock.FSReader hbr = new CorruptedFSReaderImpl(context, meta);
+        HFileBlock.FSReader hbr = new CorruptedFSReaderImpl(context, meta,
+          TEST_UTIL.getConfiguration());
         HFileBlock b = hbr.readBlockData(0, -1, pread, false, true);
         b.sanityCheck();
         assertEquals(4936, b.getUncompressedSizeWithoutHeader());
@@ -276,7 +278,8 @@ public class TestChecksum {
         // Now, use a completely new reader. Switch off hbase checksums in
         // the configuration. In this case, we should not detect
         // any retries within hbase.
-        HFileSystem newfs = new HFileSystem(TEST_UTIL.getConfiguration(), false);
+        Configuration conf = TEST_UTIL.getConfiguration();
+        HFileSystem newfs = new HFileSystem(conf, false);
         assertEquals(false, newfs.useHBaseChecksum());
         is = new FSDataInputStreamWrapper(newfs, path);
         context = new ReaderContextBuilder()
@@ -285,7 +288,7 @@ public class TestChecksum {
             .withFileSystem(newfs)
             .withFilePath(path)
             .build();
-        hbr = new CorruptedFSReaderImpl(context, meta);
+        hbr = new CorruptedFSReaderImpl(context, meta, conf);
         b = hbr.readBlockData(0, -1, pread, false, true);
         is.close();
         b.sanityCheck();
@@ -329,7 +332,7 @@ public class TestChecksum {
                             .withHBaseCheckSum(true)
                             .withBytesPerCheckSum(bytesPerChecksum)
                             .build();
-        HFileBlock.Writer hbw = new HFileBlock.Writer(null,
+        HFileBlock.Writer hbw = new HFileBlock.Writer(TEST_UTIL.getConfiguration(), null,
            meta);
 
         // write one block. The block has data
@@ -373,7 +376,8 @@ public class TestChecksum {
             .withFilePath(path)
             .build();
         HFileBlock.FSReader hbr =
-            new HFileBlock.FSReaderImpl(context, meta, ByteBuffAllocator.HEAP);
+            new HFileBlock.FSReaderImpl(context, meta, ByteBuffAllocator.HEAP,
+              TEST_UTIL.getConfiguration());
         HFileBlock b = hbr.readBlockData(0, -1, pread, false, true);
         assertTrue(b.getBufferReadOnly() instanceof SingleByteBuff);
         is.close();
@@ -413,8 +417,9 @@ public class TestChecksum {
      */
     boolean corruptDataStream = false;
 
-    public CorruptedFSReaderImpl(ReaderContext context, HFileContext meta) throws IOException {
-      super(context, meta, ByteBuffAllocator.HEAP);
+    public CorruptedFSReaderImpl(ReaderContext context, HFileContext meta, Configuration conf)
+        throws IOException {
+      super(context, meta, ByteBuffAllocator.HEAP, conf);
     }
 
     @Override
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java
index 6253dd1..b8c798b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java
@@ -528,7 +528,7 @@ public class TestHFile  {
     System.out.println(cacheConf.toString());
     // Load up the index.
     // Get a scanner that caches and that does not use pread.
-    HFileScanner scanner = reader.getScanner(true, false);
+    HFileScanner scanner = reader.getScanner(conf, true, false);
     // Align scanner at start of the file.
     scanner.seekTo();
     readAllRecords(scanner);
@@ -617,7 +617,7 @@ public class TestHFile  {
     ReaderContext context = new ReaderContextBuilder().withFileSystemAndPath(fs, mFile).build();
     Reader reader = createReaderFromStream(context, cacheConf, conf);
     // No data -- this should return false.
-    assertFalse(reader.getScanner(false, false).seekTo());
+    assertFalse(reader.getScanner(conf, false, false).seekTo());
     someReadingWithMetaBlock(reader);
     fs.delete(mFile, true);
     reader.close();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java
index d7294bf..5f2008f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java
@@ -269,7 +269,7 @@ public class TestHFileBlock {
                         .withIncludesTags(includesTag)
                         .withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM)
                         .build();
-    HFileBlock.Writer hbw = new HFileBlock.Writer(null, meta);
+    HFileBlock.Writer hbw = new HFileBlock.Writer(TEST_UTIL.getConfiguration(), null, meta);
     DataOutputStream dos = hbw.startWriting(blockType);
     writeTestBlockContents(dos);
     dos.flush();
@@ -351,8 +351,9 @@ public class TestHFileBlock {
   }
 
   protected void testReaderV2Internals() throws IOException {
-    if(includesTag) {
-      TEST_UTIL.getConfiguration().setInt("hfile.format.version", 3);
+    final Configuration conf = TEST_UTIL.getConfiguration();
+    if (includesTag) {
+      conf.setInt("hfile.format.version", 3);
     }
     for (Compression.Algorithm algo : COMPRESSION_ALGORITHMS) {
       for (boolean pread : new boolean[] { false, true }) {
@@ -367,7 +368,7 @@ public class TestHFileBlock {
                            .withIncludesTags(includesTag)
                            .withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM)
                            .build();
-        HFileBlock.Writer hbw = new HFileBlock.Writer(null,
+        HFileBlock.Writer hbw = new HFileBlock.Writer(conf, null,
            meta);
         long totalSize = 0;
         for (int blockId = 0; blockId < 2; ++blockId) {
@@ -391,7 +392,8 @@ public class TestHFileBlock {
             .withFilePath(path)
             .withFileSystem(fs)
             .build();
-        HFileBlock.FSReader hbr = new HFileBlock.FSReaderImpl(context, meta, alloc);
+        HFileBlock.FSReader hbr = new HFileBlock.FSReaderImpl(context, meta, alloc,
+          TEST_UTIL.getConfiguration());
         HFileBlock b = hbr.readBlockData(0, -1, pread, false, true);
         is.close();
         assertEquals(0, HFile.getAndResetChecksumFailuresCount());
@@ -410,7 +412,8 @@ public class TestHFileBlock {
               .withFilePath(path)
               .withFileSystem(fs)
               .build();
-          hbr = new HFileBlock.FSReaderImpl(readerContext, meta, alloc);
+          hbr = new HFileBlock.FSReaderImpl(readerContext, meta, alloc,
+            TEST_UTIL.getConfiguration());
           b = hbr.readBlockData(0,
             2173 + HConstants.HFILEBLOCK_HEADER_SIZE + b.totalChecksumBytes(), pread, false, true);
           assertEquals(expected, b);
@@ -444,8 +447,9 @@ public class TestHFileBlock {
 
   private void testInternals() throws IOException {
     final int numBlocks = 5;
+    final Configuration conf = TEST_UTIL.getConfiguration();
     if(includesTag) {
-      TEST_UTIL.getConfiguration().setInt("hfile.format.version", 3);
+      conf.setInt("hfile.format.version", 3);
     }
     for (Compression.Algorithm algo : COMPRESSION_ALGORITHMS) {
       for (boolean pread : new boolean[] { false, true }) {
@@ -463,7 +467,7 @@ public class TestHFileBlock {
                               .withIncludesTags(includesTag)
                               .withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM)
                               .build();
-          HFileBlock.Writer hbw = new HFileBlock.Writer(dataBlockEncoder, meta);
+          HFileBlock.Writer hbw = new HFileBlock.Writer(conf, dataBlockEncoder, meta);
           long totalSize = 0;
           final List<Integer> encodedSizes = new ArrayList<>();
           final List<ByteBuff> encodedBlocks = new ArrayList<>();
@@ -500,8 +504,8 @@ public class TestHFileBlock {
               .withFileSystem(fs)
               .build();
           HFileBlock.FSReaderImpl hbr =
-              new HFileBlock.FSReaderImpl(context, meta, alloc);
-          hbr.setDataBlockEncoder(dataBlockEncoder);
+              new HFileBlock.FSReaderImpl(context, meta, alloc, conf);
+          hbr.setDataBlockEncoder(dataBlockEncoder, conf);
           hbr.setIncludesMemStoreTS(includesMemstoreTS);
           HFileBlock blockFromHFile, blockUnpacked;
           int pos = 0;
@@ -609,6 +613,7 @@ public class TestHFileBlock {
 
   protected void testPreviousOffsetInternals() throws IOException {
     // TODO: parameterize these nested loops.
+    Configuration conf = TEST_UTIL.getConfiguration();
     for (Compression.Algorithm algo : COMPRESSION_ALGORITHMS) {
       for (boolean pread : BOOLEAN_VALUES) {
         for (boolean cacheOnWrite : BOOLEAN_VALUES) {
@@ -620,8 +625,8 @@ public class TestHFileBlock {
           List<Long> expectedPrevOffsets = new ArrayList<>();
           List<BlockType> expectedTypes = new ArrayList<>();
           List<ByteBuffer> expectedContents = cacheOnWrite ? new ArrayList<>() : null;
-          long totalSize = writeBlocks(rand, algo, path, expectedOffsets,
-              expectedPrevOffsets, expectedTypes, expectedContents);
+          long totalSize = writeBlocks(TEST_UTIL.getConfiguration(), rand, algo, path,
+            expectedOffsets, expectedPrevOffsets, expectedTypes, expectedContents);
 
           FSDataInputStream is = fs.open(path);
           HFileContext meta = new HFileContextBuilder()
@@ -635,8 +640,7 @@ public class TestHFileBlock {
               .withFilePath(path)
               .withFileSystem(fs)
               .build();
-          HFileBlock.FSReader hbr =
-              new HFileBlock.FSReaderImpl(context, meta, alloc);
+          HFileBlock.FSReader hbr = new HFileBlock.FSReaderImpl(context, meta, alloc, conf);
           long curOffset = 0;
           for (int i = 0; i < NUM_TEST_BLOCKS; ++i) {
             if (!pread) {
@@ -819,12 +823,14 @@ public class TestHFileBlock {
 
   protected void testConcurrentReadingInternals() throws IOException,
       InterruptedException, ExecutionException {
+    Configuration conf = TEST_UTIL.getConfiguration();
     for (Compression.Algorithm compressAlgo : COMPRESSION_ALGORITHMS) {
       Path path = new Path(TEST_UTIL.getDataTestDir(), "concurrent_reading");
       Random rand = defaultRandom();
       List<Long> offsets = new ArrayList<>();
       List<BlockType> types = new ArrayList<>();
-      writeBlocks(rand, compressAlgo, path, offsets, null, types, null);
+      writeBlocks(TEST_UTIL.getConfiguration(), rand, compressAlgo, path, offsets, null,
+        types, null);
       FSDataInputStream is = fs.open(path);
       long fileSize = fs.getFileStatus(path).getLen();
       HFileContext meta = new HFileContextBuilder()
@@ -839,7 +845,7 @@ public class TestHFileBlock {
           .withFilePath(path)
           .withFileSystem(fs)
           .build();
-      HFileBlock.FSReader hbr = new HFileBlock.FSReaderImpl(context, meta, alloc);
+      HFileBlock.FSReader hbr = new HFileBlock.FSReaderImpl(context, meta, alloc, conf);
 
       Executor exec = Executors.newFixedThreadPool(NUM_READER_THREADS);
       ExecutorCompletionService<Boolean> ecs = new ExecutorCompletionService<>(exec);
@@ -862,7 +868,7 @@ public class TestHFileBlock {
     }
   }
 
-  private long writeBlocks(Random rand, Compression.Algorithm compressAlgo,
+  private long writeBlocks(Configuration conf, Random rand, Compression.Algorithm compressAlgo,
       Path path, List<Long> expectedOffsets, List<Long> expectedPrevOffsets,
       List<BlockType> expectedTypes, List<ByteBuffer> expectedContents
   ) throws IOException {
@@ -875,7 +881,7 @@ public class TestHFileBlock {
                         .withCompression(compressAlgo)
                         .withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM)
                         .build();
-    HFileBlock.Writer hbw = new HFileBlock.Writer(null, meta);
+    HFileBlock.Writer hbw = new HFileBlock.Writer(conf, null, meta);
     Map<BlockType, Long> prevOffsetByType = new HashMap<>();
     long totalSize = 0;
     for (int i = 0; i < NUM_TEST_BLOCKS; ++i) {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java
index 14b9bbf..526072b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java
@@ -210,7 +210,7 @@ public class TestHFileBlockIndex {
                         .build();
     ReaderContext context = new ReaderContextBuilder().withFileSystemAndPath(fs, path).build();
     HFileBlock.FSReader blockReader = new HFileBlock.FSReaderImpl(context, meta,
-        ByteBuffAllocator.HEAP);
+        ByteBuffAllocator.HEAP, conf);
 
     BlockReaderWrapper brw = new BlockReaderWrapper(blockReader);
     HFileBlockIndex.BlockIndexReader indexReader =
@@ -267,7 +267,7 @@ public class TestHFileBlockIndex {
                         .withCompression(compr)
                         .withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM)
                         .build();
-    HFileBlock.Writer hbw = new HFileBlock.Writer(null,
+    HFileBlock.Writer hbw = new HFileBlock.Writer(TEST_UTIL.getConfiguration(), null,
         meta);
     FSDataOutputStream outputStream = fs.create(path);
     HFileBlockIndex.BlockIndexWriter biw =
@@ -647,7 +647,7 @@ public class TestHFileBlockIndex {
       LOG.info("Last key: " + Bytes.toStringBinary(keys[NUM_KV - 1]));
 
       for (boolean pread : new boolean[] { false, true }) {
-        HFileScanner scanner = reader.getScanner(true, pread);
+        HFileScanner scanner = reader.getScanner(conf, true, pread);
         for (int i = 0; i < NUM_KV; ++i) {
           checkSeekTo(keys, scanner, i);
           checkKeyValue("i=" + i, keys[i], values[i],
@@ -770,7 +770,7 @@ public class TestHFileBlockIndex {
 
     HFile.Reader reader = HFile.createReader(fs, hfPath, cacheConf, true, conf);
     // Scanner doesn't do Cells yet.  Fix.
-    HFileScanner scanner = reader.getScanner(true, true);
+    HFileScanner scanner = reader.getScanner(conf, true, true);
     for (int i = 0; i < keys.size(); ++i) {
       scanner.seekTo(CellUtil.createCell(keys.get(i)));
     }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java
index 400cece..a1c02ae 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java
@@ -27,8 +27,11 @@ import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.io.ByteArrayOutputStream;
@@ -58,8 +61,9 @@ public class TestHFileDataBlockEncoder {
   public static final HBaseClassTestRule CLASS_RULE =
       HBaseClassTestRule.forClass(TestHFileDataBlockEncoder.class);
 
+  private final Configuration conf = HBaseConfiguration.create();
+  private final RedundantKVGenerator generator = new RedundantKVGenerator();
   private HFileDataBlockEncoder blockEncoder;
-  private RedundantKVGenerator generator = new RedundantKVGenerator();
   private boolean includesMemstoreTS;
 
   /**
@@ -87,7 +91,7 @@ public class TestHFileDataBlockEncoder {
   private void testEncodingWithCacheInternals(boolean useTag) throws IOException {
     List<KeyValue> kvs = generator.generateTestKeyValues(60, useTag);
     HFileBlock block = getSampleHFileBlock(kvs, useTag);
-    HFileBlock cacheBlock = createBlockOnDisk(kvs, block, useTag);
+    HFileBlock cacheBlock = createBlockOnDisk(conf, kvs, block, useTag);
 
     LruBlockCache blockCache =
         new LruBlockCache(8 * 1024 * 1024, 32 * 1024);
@@ -135,7 +139,7 @@ public class TestHFileDataBlockEncoder {
                         .build();
     HFileBlock block = new HFileBlock(BlockType.DATA, size, size, -1, ByteBuff.wrap(buf),
         HFileBlock.FILL_HEADER, 0, 0, -1, hfileContext, ByteBuffAllocator.HEAP);
-    HFileBlock cacheBlock = createBlockOnDisk(kvs, block, useTags);
+    HFileBlock cacheBlock = createBlockOnDisk(conf, kvs, block, useTags);
     assertEquals(headerSize, cacheBlock.getDummyHeaderForVersion().length);
   }
 
@@ -162,7 +166,7 @@ public class TestHFileDataBlockEncoder {
       HFileContext meta = new HFileContextBuilder().withIncludesMvcc(includesMemstoreTS)
           .withIncludesTags(true).withHBaseCheckSum(true).withCompression(Algorithm.NONE)
           .withBlockSize(0).withChecksumType(ChecksumType.NULL).build();
-      writeBlock(kvs, meta, true);
+      writeBlock(conf, kvs, meta, true);
     } catch (IllegalArgumentException e) {
       fail("No exception should have been thrown");
     }
@@ -172,7 +176,7 @@ public class TestHFileDataBlockEncoder {
     // usually we have just block without headers, but don't complicate that
     List<KeyValue> kvs = generator.generateTestKeyValues(60, useTag);
     HFileBlock block = getSampleHFileBlock(kvs, useTag);
-    HFileBlock blockOnDisk = createBlockOnDisk(kvs, block, useTag);
+    HFileBlock blockOnDisk = createBlockOnDisk(conf, kvs, block, useTag);
 
     if (blockEncoder.getDataBlockEncoding() !=
         DataBlockEncoding.NONE) {
@@ -204,10 +208,10 @@ public class TestHFileDataBlockEncoder {
     return b;
   }
 
-  private HFileBlock createBlockOnDisk(List<KeyValue> kvs, HFileBlock block, boolean useTags)
-      throws IOException {
+  private HFileBlock createBlockOnDisk(Configuration conf, List<KeyValue> kvs, HFileBlock block,
+      boolean useTags) throws IOException {
     int size;
-    HFileBlockEncodingContext context = new HFileBlockDefaultEncodingContext(
+    HFileBlockEncodingContext context = new HFileBlockDefaultEncodingContext(conf,
         blockEncoder.getDataBlockEncoding(), HConstants.HFILEBLOCK_DUMMY_HEADER,
         block.getHFileContext());
 
@@ -226,9 +230,9 @@ public class TestHFileDataBlockEncoder {
         block.getOnDiskDataSizeWithHeader(), -1, block.getHFileContext(), ByteBuffAllocator.HEAP);
   }
 
-  private void writeBlock(List<Cell> kvs, HFileContext fileContext, boolean useTags)
-      throws IOException {
-    HFileBlockEncodingContext context = new HFileBlockDefaultEncodingContext(
+  private void writeBlock(Configuration conf, List<Cell> kvs, HFileContext fileContext,
+      boolean useTags) throws IOException {
+    HFileBlockEncodingContext context = new HFileBlockDefaultEncodingContext(conf,
         blockEncoder.getDataBlockEncoding(), HConstants.HFILEBLOCK_DUMMY_HEADER,
         fileContext);
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileEncryption.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileEncryption.java
index 6e76b5a..0bd3661 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileEncryption.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileEncryption.java
@@ -93,9 +93,9 @@ public class TestHFileEncryption {
     cryptoContext.setKey(key);
   }
 
-  private int writeBlock(FSDataOutputStream os, HFileContext fileContext, int size)
-      throws IOException {
-    HFileBlock.Writer hbw = new HFileBlock.Writer(null, fileContext);
+  private int writeBlock(Configuration conf, FSDataOutputStream os, HFileContext fileContext,
+      int size) throws IOException {
+    HFileBlock.Writer hbw = new HFileBlock.Writer(conf, null, fileContext);
     DataOutputStream dos = hbw.startWriting(BlockType.DATA);
     for (int j = 0; j < size; j++) {
       dos.writeInt(j);
@@ -148,7 +148,7 @@ public class TestHFileEncryption {
       FSDataOutputStream os = fs.create(path);
       try {
         for (int i = 0; i < blocks; i++) {
-          totalSize += writeBlock(os, fileContext, blockSizes[i]);
+          totalSize += writeBlock(TEST_UTIL.getConfiguration(), os, fileContext, blockSizes[i]);
         }
       } finally {
         os.close();
@@ -161,7 +161,7 @@ public class TestHFileEncryption {
           .withFileSize(totalSize).build();
       try {
         HFileBlock.FSReaderImpl hbr = new HFileBlock.FSReaderImpl(context, fileContext,
-            ByteBuffAllocator.HEAP);
+            ByteBuffAllocator.HEAP, TEST_UTIL.getConfiguration());
         long pos = 0;
         for (int i = 0; i < blocks; i++) {
           pos += readAndVerifyBlock(pos, fileContext, hbr, blockSizes[i]);
@@ -252,7 +252,7 @@ public class TestHFileEncryption {
         try {
           FixedFileTrailer trailer = reader.getTrailer();
           assertNotNull(trailer.getEncryptionKey());
-          scanner = reader.getScanner(false, false);
+          scanner = reader.getScanner(conf, false, false);
           assertTrue("Initial seekTo failed", scanner.seekTo());
           do {
             Cell kv = scanner.getCell();
@@ -271,7 +271,7 @@ public class TestHFileEncryption {
         LOG.info("Random seeking with " + fileContext);
         reader = HFile.createReader(fs, path, cacheConf, true, conf);
         try {
-          scanner = reader.getScanner(false, true);
+          scanner = reader.getScanner(conf, false, true);
           assertTrue("Initial seekTo failed", scanner.seekTo());
           for (i = 0; i < 100; i++) {
             KeyValue kv = testKvs.get(RNG.nextInt(testKvs.size()));
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileInlineToRootChunkConversion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileInlineToRootChunkConversion.java
index 8102f11..7c2461d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileInlineToRootChunkConversion.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileInlineToRootChunkConversion.java
@@ -86,7 +86,7 @@ public class TestHFileInlineToRootChunkConversion {
 
     HFile.Reader reader = HFile.createReader(fs, hfPath, cacheConf, true, conf);
     // Scanner doesn't do Cells yet.  Fix.
-    HFileScanner scanner = reader.getScanner(true, true);
+    HFileScanner scanner = reader.getScanner(conf, true, true);
     for (int i = 0; i < keys.size(); ++i) {
       scanner.seekTo(CellUtil.createCell(keys.get(i)));
     }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileReaderImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileReaderImpl.java
index 8c3a632..8459662 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileReaderImpl.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileReaderImpl.java
@@ -92,7 +92,7 @@ public class TestHFileReaderImpl {
     HFile.Reader reader = HFile.createReader(fs, p, new CacheConfig(conf, bucketcache), true, conf);
 
     // warm cache
-    HFileScanner scanner = reader.getScanner(true, true);
+    HFileScanner scanner = reader.getScanner(conf, true, true);
     scanner.seekTo(toKV("i"));
     assertEquals("i", toRowStr(scanner.getCell()));
     scanner.close();
@@ -102,7 +102,7 @@ public class TestHFileReaderImpl {
     }
 
     // reopen again.
-    scanner = reader.getScanner(true, true);
+    scanner = reader.getScanner(conf, true, true);
     scanner.seekTo(toKV("i"));
     assertEquals("i", toRowStr(scanner.getCell()));
     scanner.seekBefore(toKV("i"));
@@ -117,7 +117,7 @@ public class TestHFileReaderImpl {
     }
 
     // case 2
-    scanner = reader.getScanner(true, true);
+    scanner = reader.getScanner(conf, true, true);
     scanner.seekTo(toKV("i"));
     assertEquals("i", toRowStr(scanner.getCell()));
     scanner.seekBefore(toKV("c"));
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileScannerImplReferenceCount.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileScannerImplReferenceCount.java
index cd33aac..273f674 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileScannerImplReferenceCount.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileScannerImplReferenceCount.java
@@ -201,7 +201,7 @@ public class TestHFileScannerImplReferenceCount {
     // We've build a HFile tree with index = 16.
     Assert.assertEquals(16, reader.getTrailer().getNumDataIndexLevels());
 
-    HFileScannerImpl scanner = (HFileScannerImpl) reader.getScanner(true, true, false);
+    HFileScannerImpl scanner = (HFileScannerImpl) reader.getScanner(conf, true, true, false);
     HFileBlock block1 = reader.getDataBlockIndexReader()
         .loadDataBlockWithScanInfo(firstCell, null, true, true, false,
             DataBlockEncoding.NONE, reader).getHFileBlock();
@@ -285,7 +285,7 @@ public class TestHFileScannerImplReferenceCount {
     // We've build a HFile tree with index = 16.
     Assert.assertEquals(16, reader.getTrailer().getNumDataIndexLevels());
 
-    HFileScannerImpl scanner = (HFileScannerImpl) reader.getScanner(true, true, false);
+    HFileScannerImpl scanner = (HFileScannerImpl) reader.getScanner(conf, true, true, false);
     HFileBlock block1 = reader.getDataBlockIndexReader()
         .loadDataBlockWithScanInfo(firstCell, null, true, true, false,
             DataBlockEncoding.NONE, reader).getHFileBlock();
@@ -415,7 +415,7 @@ public class TestHFileScannerImplReferenceCount {
     // We've build a HFile tree with index = 16.
     Assert.assertEquals(16, reader.getTrailer().getNumDataIndexLevels());
 
-    HFileScannerImpl scanner = (HFileScannerImpl) reader.getScanner(true, true, false);
+    HFileScannerImpl scanner = (HFileScannerImpl) reader.getScanner(conf, true, true, false);
     HFileBlock block1 = reader.getDataBlockIndexReader()
         .loadDataBlockWithScanInfo(firstCell, null, true, true, false,
             DataBlockEncoding.NONE, reader).getHFileBlock();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java
index 7d6212d..51fa925 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java
@@ -189,7 +189,7 @@ public class TestHFileSeek extends TestCase {
     Reader reader = TestHFile.createReaderFromStream(context, new CacheConfig(conf), conf);
     KeySampler kSampler = new KeySampler(rng, ((KeyValue) reader.getFirstKey().get()).getKey(),
         ((KeyValue) reader.getLastKey().get()).getKey(), keyLenGen);
-    HFileScanner scanner = reader.getScanner(false, USE_PREAD);
+    HFileScanner scanner = reader.getScanner(conf, false, USE_PREAD);
     BytesWritable key = new BytesWritable();
     timer.reset();
     timer.start();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java
index e6906be..7e48911 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java
@@ -190,7 +190,7 @@ public class TestHFileWriterV3 {
       .withFileSystem(fs)
       .withFileSize(fileSize).build();
     HFileBlock.FSReader blockReader =
-        new HFileBlock.FSReaderImpl(readerContext, meta, ByteBuffAllocator.HEAP);
+        new HFileBlock.FSReaderImpl(readerContext, meta, ByteBuffAllocator.HEAP, conf);
     // Comparator class name is stored in the trailer in version 3.
     CellComparator comparator = trailer.createComparator();
     HFileBlockIndex.BlockIndexReader dataBlockIndexReader =
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3WithDataEncoders.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3WithDataEncoders.java
index e5f9703..2d9a5bd 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3WithDataEncoders.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3WithDataEncoders.java
@@ -176,7 +176,7 @@ public class TestHFileWriterV3WithDataEncoders {
       .withFileSystem(fs)
       .withFileSize(fileSize).build();
     HFileBlock.FSReader blockReader =
-      new HFileBlock.FSReaderImpl(readerContext, meta, ByteBuffAllocator.HEAP);
+      new HFileBlock.FSReaderImpl(readerContext, meta, ByteBuffAllocator.HEAP, conf);
     // Comparator class name is stored in the trailer in version 3.
     CellComparator comparator = trailer.createComparator();
     HFileBlockIndex.BlockIndexReader dataBlockIndexReader =
@@ -277,7 +277,7 @@ public class TestHFileWriterV3WithDataEncoders {
       origBlock.limit(pos + block.getUncompressedSizeWithoutHeader() - DataBlockEncoding.ID_SIZE);
       ByteBuff buf =  origBlock.slice();
       DataBlockEncoder.EncodedSeeker seeker =
-        encoder.createSeeker(encoder.newDataBlockDecodingContext(meta));
+        encoder.createSeeker(encoder.newDataBlockDecodingContext(conf, meta));
       seeker.setCurrentBuffer(buf);
       Cell res = seeker.getCell();
       KeyValue kv = keyValues.get(entriesRead);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java
index de68578..ebae30c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java
@@ -112,7 +112,7 @@ public class TestReseekTo {
 
     HFile.Reader reader = HFile.createReader(TEST_UTIL.getTestFileSystem(), ncTFile, cacheConf,
       true, TEST_UTIL.getConfiguration());
-    HFileScanner scanner = reader.getScanner(false, true);
+    HFileScanner scanner = reader.getScanner(TEST_UTIL.getConfiguration(), false, true);
 
     scanner.seekTo();
     for (int i = 0; i < keyList.size(); i++) {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekBeforeWithInlineBlocks.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekBeforeWithInlineBlocks.java
index 6451344..c420d24 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekBeforeWithInlineBlocks.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekBeforeWithInlineBlocks.java
@@ -150,7 +150,7 @@ public class TestSeekBeforeWithInlineBlocks {
           // Check that we can seekBefore in either direction and with both pread
           // enabled and disabled
           for (boolean pread : new boolean[] { false, true }) {
-            HFileScanner scanner = reader.getScanner(true, pread);
+            HFileScanner scanner = reader.getScanner(conf, true, pread);
             checkNoSeekBefore(cells, scanner, 0);
             for (int i = 1; i < NUM_KV; i++) {
               checkSeekBefore(cells, scanner, i);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java
index e45383a..b00c508 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java
@@ -148,7 +148,7 @@ public class TestSeekTo {
     FileSystem fs = TEST_UTIL.getTestFileSystem();
     Configuration conf = TEST_UTIL.getConfiguration();
     HFile.Reader reader = HFile.createReader(fs, p, new CacheConfig(conf), true, conf);
-    HFileScanner scanner = reader.getScanner(false, true);
+    HFileScanner scanner = reader.getScanner(conf, false, true);
     assertFalse(scanner.seekBefore(toKV("a", tagUsage)));
 
     assertFalse(scanner.seekBefore(toKV("c", tagUsage)));
@@ -206,7 +206,7 @@ public class TestSeekTo {
     FileSystem fs = TEST_UTIL.getTestFileSystem();
     Configuration conf = TEST_UTIL.getConfiguration();
     HFile.Reader reader = HFile.createReader(fs, p, new CacheConfig(conf), true, conf);
-    HFileScanner scanner = reader.getScanner(false, true);
+    HFileScanner scanner = reader.getScanner(conf, false, true);
     assertFalse(scanner.seekBefore(toKV("a", tagUsage)));
     assertFalse(scanner.seekBefore(toKV("b", tagUsage)));
     assertFalse(scanner.seekBefore(toKV("c", tagUsage)));
@@ -300,7 +300,7 @@ public class TestSeekTo {
     Configuration conf = TEST_UTIL.getConfiguration();
     HFile.Reader reader = HFile.createReader(fs, p, new CacheConfig(conf), true, conf);
     assertEquals(2, reader.getDataBlockIndexReader().getRootBlockCount());
-    HFileScanner scanner = reader.getScanner(false, true);
+    HFileScanner scanner = reader.getScanner(conf, false, true);
     // lies before the start of the file.
     assertEquals(-1, scanner.seekTo(toKV("a", tagUsage)));
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java
index a8efa16..01f40be 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java
@@ -119,6 +119,7 @@ public class DataBlockEncodingTool {
   private static int benchmarkNTimes = DEFAULT_BENCHMARK_N_TIMES;
   private static int benchmarkNOmit = DEFAULT_BENCHMARK_N_OMIT;
 
+  private final Configuration conf;
   private List<EncodedDataBlock> codecs = new ArrayList<>();
   private long totalPrefixLength = 0;
   private long totalKeyLength = 0;
@@ -157,7 +158,8 @@ public class DataBlockEncodingTool {
    * @param compressionAlgorithmName What kind of algorithm should be used
    *                                 as baseline for comparison (e.g. lzo, gz).
    */
-  public DataBlockEncodingTool(String compressionAlgorithmName) {
+  public DataBlockEncodingTool(Configuration conf, String compressionAlgorithmName) {
+    this.conf = conf;
     this.compressionAlgorithmName = compressionAlgorithmName;
     this.compressionAlgorithm = Compression.getCompressionAlgorithmByName(
         compressionAlgorithmName);
@@ -242,7 +244,7 @@ public class DataBlockEncodingTool {
           .withCompression(Compression.Algorithm.NONE)
           .withIncludesMvcc(includesMemstoreTS)
           .withIncludesTags(USE_TAG).build();
-      codecs.add(new EncodedDataBlock(d, encoding, rawKVs, meta ));
+      codecs.add(new EncodedDataBlock(conf, d, encoding, rawKVs, meta));
     }
   }
 
@@ -619,7 +621,7 @@ public class DataBlockEncodingTool {
         false, hsf.getMaxMemStoreTS(), 0, false);
     USE_TAG = reader.getHFileReader().getFileContext().isIncludesTags();
     // run the utilities
-    DataBlockEncodingTool comp = new DataBlockEncodingTool(compressionName);
+    DataBlockEncodingTool comp = new DataBlockEncodingTool(conf, compressionName);
     int majorVersion = reader.getHFileVersion();
     comp.useHBaseChecksum = majorVersion > 2 ||
       (majorVersion == 2 &&
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java
index ab5c8ff..7d6d624 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java
@@ -231,7 +231,7 @@ public class TestCacheOnWriteInSchema {
     HFile.Reader reader = sf.getReader().getHFileReader();
     try {
       // Open a scanner with (on read) caching disabled
-      HFileScanner scanner = reader.getScanner(false, false);
+      HFileScanner scanner = reader.getScanner(conf, false, false);
       assertTrue(testDescription, scanner.seekTo());
       // Cribbed from io.hfile.TestCacheOnWrite
       long offset = 0;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestLoadIncrementalHFiles.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestLoadIncrementalHFiles.java
index e1ed768..e1a443e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestLoadIncrementalHFiles.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestLoadIncrementalHFiles.java
@@ -614,7 +614,7 @@ public class TestLoadIncrementalHFiles {
     Configuration conf = util.getConfiguration();
     HFile.Reader reader =
       HFile.createReader(p.getFileSystem(conf), p, new CacheConfig(conf), true, conf);
-    HFileScanner scanner = reader.getScanner(false, false);
+    HFileScanner scanner = reader.getScanner(conf, false, false);
     scanner.seekTo();
     int count = 0;
     do {