You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by jx...@apache.org on 2013/11/13 18:31:04 UTC

svn commit: r1541629 [1/2] - in /hbase/trunk: hbase-client/src/main/java/org/apache/hadoop/hbase/ hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/ hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/ hbase-server/src/main/java/or...

Author: jxiang
Date: Wed Nov 13 17:31:02 2013
New Revision: 1541629

URL: http://svn.apache.org/r1541629
Log:
HBASE-9870 HFileDataBlockEncoderImpl#diskToCacheFormat uses wrong format

Modified:
    hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
    hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoding.java
    hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContext.java
    hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContextBuilder.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileReader.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheKey.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoder.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoderImpl.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV3.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV3.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpDataBlockEncoder.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestChangingEncoding.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestLoadAndSwitchEncodeOnDisk.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockCompatibility.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/EncodedSeekPerformanceTest.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/HFileReadWriteTest.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockStoreFile.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java
    hbase/trunk/hbase-shell/src/main/ruby/hbase/admin.rb
    hbase/trunk/src/main/docbkx/shell.xml

Modified: hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java?rev=1541629&r1=1541628&r2=1541629&view=diff
==============================================================================
--- hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java (original)
+++ hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java Wed Nov 13 17:31:02 2013
@@ -71,7 +71,7 @@ public class HColumnDescriptor implement
   // These constants are used as FileInfo keys
   public static final String COMPRESSION = "COMPRESSION";
   public static final String COMPRESSION_COMPACT = "COMPRESSION_COMPACT";
-  public static final String ENCODE_ON_DISK =
+  public static final String ENCODE_ON_DISK = // To be removed, it is not used anymore
       "ENCODE_ON_DISK";
   public static final String DATA_BLOCK_ENCODING =
       "DATA_BLOCK_ENCODING";
@@ -209,7 +209,6 @@ public class HColumnDescriptor implement
       DEFAULT_VALUES.put(HConstants.IN_MEMORY, String.valueOf(DEFAULT_IN_MEMORY));
       DEFAULT_VALUES.put(BLOCKCACHE, String.valueOf(DEFAULT_BLOCKCACHE));
       DEFAULT_VALUES.put(KEEP_DELETED_CELLS, String.valueOf(DEFAULT_KEEP_DELETED));
-      DEFAULT_VALUES.put(ENCODE_ON_DISK, String.valueOf(DEFAULT_ENCODE_ON_DISK));
       DEFAULT_VALUES.put(DATA_BLOCK_ENCODING, String.valueOf(DEFAULT_DATA_BLOCK_ENCODING));
       DEFAULT_VALUES.put(CACHE_DATA_ON_WRITE, String.valueOf(DEFAULT_CACHE_DATA_ON_WRITE));
       DEFAULT_VALUES.put(CACHE_INDEX_ON_WRITE, String.valueOf(DEFAULT_CACHE_INDEX_ON_WRITE));
@@ -421,7 +420,6 @@ public class HColumnDescriptor implement
     setTimeToLive(timeToLive);
     setCompressionType(Compression.Algorithm.
       valueOf(compression.toUpperCase()));
-    setEncodeOnDisk(encodeOnDisk);
     setDataBlockEncoding(DataBlockEncoding.
         valueOf(dataBlockEncoding.toUpperCase()));
     setBloomFilterType(BloomType.
@@ -629,29 +627,19 @@ public class HColumnDescriptor implement
   }
 
   /** @return data block encoding algorithm used on disk */
+  @Deprecated
   public DataBlockEncoding getDataBlockEncodingOnDisk() {
-    String encodeOnDiskStr = getValue(ENCODE_ON_DISK);
-    boolean encodeOnDisk;
-    if (encodeOnDiskStr == null) {
-      encodeOnDisk = DEFAULT_ENCODE_ON_DISK;
-    } else {
-      encodeOnDisk = Boolean.valueOf(encodeOnDiskStr);
-    }
-
-    if (!encodeOnDisk) {
-      // No encoding on disk.
-      return DataBlockEncoding.NONE;
-    }
     return getDataBlockEncoding();
   }
 
   /**
-   * Set the flag indicating that we only want to encode data block in cache
-   * but not on disk.
+   * This method does nothing now. Flag ENCODE_ON_DISK is not used
+   * any more. Data blocks have the same encoding in cache as on disk.
    * @return this (for chained invocation)
    */
+  @Deprecated
   public HColumnDescriptor setEncodeOnDisk(boolean encodeOnDisk) {
-    return setValue(ENCODE_ON_DISK, String.valueOf(encodeOnDisk));
+    return this;
   }
 
   /**

Modified: hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoding.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoding.java?rev=1541629&r1=1541628&r2=1541629&view=diff
==============================================================================
--- hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoding.java (original)
+++ hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoding.java Wed Nov 13 17:31:02 2013
@@ -173,7 +173,8 @@ public enum DataBlockEncoding {
     }
 
     DataBlockEncoding algorithm = idToEncoding.get(encoderId);
-    return algorithm.getClass().equals(encoder.getClass());
+    String encoderCls = encoder.getClass().getName();
+    return encoderCls.equals(algorithm.encoderCls);
   }
 
   public static DataBlockEncoding getEncodingById(short dataBlockEncodingId) {

Modified: hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContext.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContext.java?rev=1541629&r1=1541628&r2=1541629&view=diff
==============================================================================
--- hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContext.java (original)
+++ hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContext.java Wed Nov 13 17:31:02 2013
@@ -52,8 +52,7 @@ public class HFileContext implements Hea
   private int bytesPerChecksum = DEFAULT_BYTES_PER_CHECKSUM;
   /** Number of uncompressed bytes we allow per block. */
   private int blocksize = HConstants.DEFAULT_BLOCKSIZE;
-  private DataBlockEncoding encodingOnDisk = DataBlockEncoding.NONE;
-  private DataBlockEncoding encodingInCache = DataBlockEncoding.NONE;
+  private DataBlockEncoding encoding = DataBlockEncoding.NONE;
 
   //Empty constructor.  Go with setters
   public HFileContext() {
@@ -71,14 +70,12 @@ public class HFileContext implements Hea
     this.checksumType = context.checksumType;
     this.bytesPerChecksum = context.bytesPerChecksum;
     this.blocksize = context.blocksize;
-    this.encodingOnDisk = context.encodingOnDisk;
-    this.encodingInCache = context.encodingInCache;
+    this.encoding = context.encoding;
   }
 
   public HFileContext(boolean useHBaseChecksum, boolean includesMvcc, boolean includesTags,
       Algorithm compressAlgo, boolean compressTags, ChecksumType checksumType,
-      int bytesPerChecksum, int blockSize, DataBlockEncoding encodingOnDisk,
-      DataBlockEncoding encodingInCache) {
+      int bytesPerChecksum, int blockSize, DataBlockEncoding encoding) {
     this.usesHBaseChecksum = useHBaseChecksum;
     this.includesMvcc =  includesMvcc;
     this.includesTags = includesTags;
@@ -87,8 +84,9 @@ public class HFileContext implements Hea
     this.checksumType = checksumType;
     this.bytesPerChecksum = bytesPerChecksum;
     this.blocksize = blockSize;
-    this.encodingOnDisk = encodingOnDisk;
-    this.encodingInCache = encodingInCache;
+    if (encoding != null) {
+      this.encoding = encoding;
+    }
   }
 
   public Algorithm getCompression() {
@@ -135,12 +133,8 @@ public class HFileContext implements Hea
     return blocksize;
   }
 
-  public DataBlockEncoding getEncodingOnDisk() {
-    return encodingOnDisk;
-  }
-
-  public DataBlockEncoding getEncodingInCache() {
-    return encodingInCache;
+  public DataBlockEncoding getDataBlockEncoding() {
+    return encoding;
   }
 
   /**
@@ -151,8 +145,8 @@ public class HFileContext implements Hea
   @Override
   public long heapSize() {
     long size = ClassSize.align(ClassSize.OBJECT +
-        // Algorithm reference, encodingondisk, encodingincache, checksumtype
-        4 * ClassSize.REFERENCE +
+        // Algorithm reference, encoding, checksumtype
+        3 * ClassSize.REFERENCE +
         2 * Bytes.SIZEOF_INT +
         // usesHBaseChecksum, includesMvcc, includesTags and compressTags
         4 * Bytes.SIZEOF_BOOLEAN);
@@ -170,8 +164,7 @@ public class HFileContext implements Hea
     clonnedCtx.checksumType = this.checksumType;
     clonnedCtx.bytesPerChecksum = this.bytesPerChecksum;
     clonnedCtx.blocksize = this.blocksize;
-    clonnedCtx.encodingOnDisk = this.encodingOnDisk;
-    clonnedCtx.encodingInCache = this.encodingInCache;
+    clonnedCtx.encoding = this.encoding;
     return clonnedCtx;
   }
 }

Modified: hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContextBuilder.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContextBuilder.java?rev=1541629&r1=1541628&r2=1541629&view=diff
==============================================================================
--- hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContextBuilder.java (original)
+++ hbase/trunk/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContextBuilder.java Wed Nov 13 17:31:02 2013
@@ -47,8 +47,7 @@ public class HFileContextBuilder {
   private int bytesPerChecksum = DEFAULT_BYTES_PER_CHECKSUM;
   /** Number of uncompressed bytes we allow per block. */
   private int blocksize = HConstants.DEFAULT_BLOCKSIZE;
-  private DataBlockEncoding encodingOnDisk = DataBlockEncoding.NONE;
-  private DataBlockEncoding encodingInCache = DataBlockEncoding.NONE;
+  private DataBlockEncoding encoding = DataBlockEncoding.NONE;
 
   public HFileContextBuilder withHBaseCheckSum(boolean useHBaseCheckSum) {
     this.usesHBaseChecksum = useHBaseCheckSum;
@@ -90,18 +89,13 @@ public class HFileContextBuilder {
     return this;
   }
 
-  public HFileContextBuilder withDataBlockEncodingOnDisk(DataBlockEncoding encodingOnDisk) {
-    this.encodingOnDisk = encodingOnDisk;
-    return this;
-  }
-
-  public HFileContextBuilder withDataBlockEncodingInCache(DataBlockEncoding encodingInCache) {
-    this.encodingInCache = encodingInCache;
+  public HFileContextBuilder withDataBlockEncoding(DataBlockEncoding encoding) {
+    this.encoding = encoding;
     return this;
   }
 
   public HFileContext build() {
     return new HFileContext(usesHBaseChecksum, includesMvcc, includesTags, compression,
-        compressTags, checksumType, bytesPerChecksum, blocksize, encodingOnDisk, encodingInCache);
+      compressTags, checksumType, bytesPerChecksum, blocksize, encoding);
   }
 }

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java?rev=1541629&r1=1541628&r2=1541629&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java Wed Nov 13 17:31:02 2013
@@ -43,7 +43,6 @@ import org.apache.hadoop.hbase.filter.By
 import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
 import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
 import org.apache.hadoop.hbase.io.Reference;
-import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
@@ -58,7 +57,6 @@ import org.apache.hadoop.hbase.regionser
 import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
 import org.apache.hadoop.hbase.util.Pair;
-import org.mortbay.log.Log;
 
 import com.google.common.collect.ImmutableList;
 
@@ -467,14 +465,14 @@ public abstract class BaseRegionObserver
   @Override
   public Reader preStoreFileReaderOpen(ObserverContext<RegionCoprocessorEnvironment> ctx,
       FileSystem fs, Path p, FSDataInputStreamWrapper in, long size, CacheConfig cacheConf,
-      DataBlockEncoding preferredEncodingInCache, Reference r, Reader reader) throws IOException {
+      Reference r, Reader reader) throws IOException {
     return reader;
   }
 
   @Override
   public Reader postStoreFileReaderOpen(ObserverContext<RegionCoprocessorEnvironment> ctx,
       FileSystem fs, Path p, FSDataInputStreamWrapper in, long size, CacheConfig cacheConf,
-      DataBlockEncoding preferredEncodingInCache, Reference r, Reader reader) throws IOException {
+      Reference r, Reader reader) throws IOException {
     return reader;
   }
 

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java?rev=1541629&r1=1541628&r2=1541629&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java Wed Nov 13 17:31:02 2013
@@ -30,23 +30,23 @@ import org.apache.hadoop.hbase.HRegionIn
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.Append;
 import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Increment;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.client.Durability;
-import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
 import org.apache.hadoop.hbase.filter.ByteArrayComparable;
+import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
 import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
 import org.apache.hadoop.hbase.io.Reference;
-import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
 import org.apache.hadoop.hbase.regionserver.KeyValueScanner;
 import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
+import org.apache.hadoop.hbase.regionserver.OperationStatus;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
 import org.apache.hadoop.hbase.regionserver.ScanType;
 import org.apache.hadoop.hbase.regionserver.Store;
@@ -55,9 +55,9 @@ import org.apache.hadoop.hbase.regionser
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
 import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
+import org.apache.hadoop.hbase.util.Pair;
 
 import com.google.common.collect.ImmutableList;
-import org.apache.hadoop.hbase.util.Pair;
 
 /**
  * Coprocessors implement this interface to observe and mediate client actions
@@ -1026,7 +1026,6 @@ public interface RegionObserver extends 
    * @param in {@link FSDataInputStreamWrapper}
    * @param size Full size of the file
    * @param cacheConf
-   * @param preferredEncodingInCache
    * @param r original reference file. This will be not null only when reading a split file.
    * @param reader the base reader, if not {@code null}, from previous RegionObserver in the chain
    * @return a Reader instance to use instead of the base reader if overriding
@@ -1035,8 +1034,7 @@ public interface RegionObserver extends 
    */
   StoreFile.Reader preStoreFileReaderOpen(final ObserverContext<RegionCoprocessorEnvironment> ctx,
       final FileSystem fs, final Path p, final FSDataInputStreamWrapper in, long size,
-      final CacheConfig cacheConf, final DataBlockEncoding preferredEncodingInCache,
-      final Reference r, StoreFile.Reader reader) throws IOException;
+      final CacheConfig cacheConf, final Reference r, StoreFile.Reader reader) throws IOException;
 
   /**
    * Called after the creation of Reader for a store file.
@@ -1047,7 +1045,6 @@ public interface RegionObserver extends 
    * @param in {@link FSDataInputStreamWrapper}
    * @param size Full size of the file
    * @param cacheConf
-   * @param preferredEncodingInCache
    * @param r original reference file. This will be not null only when reading a split file.
    * @param reader the base reader instance
    * @return The reader to use
@@ -1055,8 +1052,7 @@ public interface RegionObserver extends 
    */
   StoreFile.Reader postStoreFileReaderOpen(final ObserverContext<RegionCoprocessorEnvironment> ctx,
       final FileSystem fs, final Path p, final FSDataInputStreamWrapper in, long size,
-      final CacheConfig cacheConf, final DataBlockEncoding preferredEncodingInCache,
-      final Reference r, StoreFile.Reader reader) throws IOException;
+      final CacheConfig cacheConf, final Reference r, StoreFile.Reader reader) throws IOException;
 
   /**
    * Called after a new cell has been created during an increment operation, but before

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java?rev=1541629&r1=1541628&r2=1541629&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java Wed Nov 13 17:31:02 2013
@@ -24,13 +24,11 @@ import java.nio.ByteBuffer;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.HFileScanner;
 import org.apache.hadoop.hbase.regionserver.StoreFile;
@@ -67,13 +65,11 @@ public class HalfStoreFileReader extends
    * @param p path to hfile
    * @param cacheConf
    * @param r original reference file (contains top or bottom)
-   * @param preferredEncodingInCache
    * @throws IOException
    */
   public HalfStoreFileReader(final FileSystem fs, final Path p,
-      final CacheConfig cacheConf, final Reference r,
-      DataBlockEncoding preferredEncodingInCache) throws IOException {
-    super(fs, p, cacheConf, preferredEncodingInCache);
+      final CacheConfig cacheConf, final Reference r) throws IOException {
+    super(fs, p, cacheConf);
     // This is not actual midkey for this half-file; its just border
     // around which we split top and bottom.  Have to look in files to find
     // actual last and first keys for bottom and top halves.  Half-files don't
@@ -92,13 +88,11 @@ public class HalfStoreFileReader extends
    * @param size Full size of the hfile file
    * @param cacheConf
    * @param r original reference file (contains top or bottom)
-   * @param preferredEncodingInCache
    * @throws IOException
    */
   public HalfStoreFileReader(final FileSystem fs, final Path p, final FSDataInputStreamWrapper in,
-      long size, final CacheConfig cacheConf,  final Reference r,
-      final DataBlockEncoding preferredEncodingInCache) throws IOException {
-    super(fs, p, in, size, cacheConf, preferredEncodingInCache);
+      long size, final CacheConfig cacheConf,  final Reference r) throws IOException {
+    super(fs, p, in, size, cacheConf);
     // This is not actual midkey for this half-file; its just border
     // around which we split top and bottom.  Have to look in files to find
     // actual last and first keys for bottom and top halves.  Half-files don't

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileReader.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileReader.java?rev=1541629&r1=1541628&r2=1541629&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileReader.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileReader.java Wed Nov 13 17:31:02 2013
@@ -325,8 +325,8 @@ public abstract class AbstractHFileReade
   }
 
   @Override
-  public DataBlockEncoding getEncodingOnDisk() {
-    return dataBlockEncoder.getEncodingOnDisk();
+  public DataBlockEncoding getDataBlockEncoding() {
+    return dataBlockEncoder.getDataBlockEncoding();
   }
 
   public abstract int getMajorVersion();

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java?rev=1541629&r1=1541628&r2=1541629&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java Wed Nov 13 17:31:02 2013
@@ -114,10 +114,9 @@ public abstract class AbstractHFileWrite
     this.path = path;
     this.name = path != null ? path.getName() : outputStream.toString();
     this.hFileContext = fileContext;
-    if (hFileContext.getEncodingOnDisk() != DataBlockEncoding.NONE
-        || hFileContext.getEncodingInCache() != DataBlockEncoding.NONE) {
-      this.blockEncoder = new HFileDataBlockEncoderImpl(hFileContext.getEncodingOnDisk(),
-          hFileContext.getEncodingInCache());
+    DataBlockEncoding encoding = hFileContext.getDataBlockEncoding();
+    if (encoding != DataBlockEncoding.NONE) {
+      this.blockEncoder = new HFileDataBlockEncoderImpl(encoding);
     } else {
       this.blockEncoder = NoOpDataBlockEncoder.INSTANCE;
     }

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheKey.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheKey.java?rev=1541629&r1=1541628&r2=1541629&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheKey.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheKey.java Wed Nov 13 17:31:02 2013
@@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.util.Clas
  */
 @InterfaceAudience.Private
 public class BlockCacheKey implements HeapSize, java.io.Serializable {
+  private static final long serialVersionUID = -5199992013113130534L;
   private final String hfileName;
   private final long offset;
   private final DataBlockEncoding encoding;
@@ -39,8 +40,8 @@ public class BlockCacheKey implements He
     // We add encoding to the cache key only for data blocks. If the block type
     // is unknown (this should never be the case in production), we just use
     // the provided encoding, because it might be a data block.
-    this.encoding = (blockType == null || blockType.isData()) ? encoding :
-        DataBlockEncoding.NONE;
+    this.encoding = (encoding != null && (blockType == null
+      || blockType.isData())) ? encoding : DataBlockEncoding.NONE;
   }
 
   /**
@@ -62,7 +63,7 @@ public class BlockCacheKey implements He
   public boolean equals(Object o) {
     if (o instanceof BlockCacheKey) {
       BlockCacheKey k = (BlockCacheKey) o;
-      return offset == k.offset
+      return offset == k.offset && encoding == k.encoding
           && (hfileName == null ? k.hfileName == null : hfileName
               .equals(k.hfileName));
     } else {

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java?rev=1541629&r1=1541628&r2=1541629&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java Wed Nov 13 17:31:02 2013
@@ -66,11 +66,10 @@ import org.apache.hadoop.hbase.util.Byte
 import org.apache.hadoop.hbase.util.ChecksumType;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.io.Writable;
-import com.google.protobuf.ZeroCopyLiteralByteString;
 
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
-import com.google.protobuf.ByteString;
+import com.google.protobuf.ZeroCopyLiteralByteString;
 
 /**
  * File format for hbase.
@@ -497,7 +496,7 @@ public class HFile {
     /** Close method with optional evictOnClose */
     void close(boolean evictOnClose) throws IOException;
 
-    DataBlockEncoding getEncodingOnDisk();
+    DataBlockEncoding getDataBlockEncoding();
 
     boolean hasMVCCInfo();
   }
@@ -510,13 +509,12 @@ public class HFile {
    * @param fsdis stream of path's file
    * @param size max size of the trailer.
    * @param cacheConf Cache configuation values, cannot be null.
-   * @param preferredEncodingInCache
    * @param hfs
    * @return an appropriate instance of HFileReader
    * @throws IOException If file is invalid, will throw CorruptHFileException flavored IOException
    */
   private static Reader pickReaderVersion(Path path, FSDataInputStreamWrapper fsdis,
-      long size, CacheConfig cacheConf, DataBlockEncoding preferredEncodingInCache,
+      long size, CacheConfig cacheConf,
       HFileSystem hfs) throws IOException {
     FixedFileTrailer trailer = null;
     try {
@@ -526,10 +524,10 @@ public class HFile {
       switch (trailer.getMajorVersion()) {
       case 2:
         return new HFileReaderV2(
-            path, trailer, fsdis, size, cacheConf, preferredEncodingInCache, hfs);
+            path, trailer, fsdis, size, cacheConf, hfs);
       case 3 :
         return new HFileReaderV3(
-            path, trailer, fsdis, size, cacheConf, preferredEncodingInCache, hfs);
+            path, trailer, fsdis, size, cacheConf, hfs);
       default:
         throw new IllegalArgumentException("Invalid HFile version " + trailer.getMajorVersion());
       }
@@ -546,32 +544,14 @@ public class HFile {
   /**
    * @param fs A file system
    * @param path Path to HFile
-   * @param cacheConf Cache configuration for hfile's contents
-   * @param preferredEncodingInCache Preferred in-cache data encoding algorithm.
-   * @return A version specific Hfile Reader
-   * @throws IOException If file is invalid, will throw CorruptHFileException flavored IOException
-   */
-  public static Reader createReaderWithEncoding(
-      FileSystem fs, Path path, CacheConfig cacheConf,
-      DataBlockEncoding preferredEncodingInCache) throws IOException {
-    FSDataInputStreamWrapper stream = new FSDataInputStreamWrapper(fs, path);
-    return pickReaderVersion(path, stream, fs.getFileStatus(path).getLen(),
-        cacheConf, preferredEncodingInCache, stream.getHfs());
-  }
-
-  /**
-   * @param fs A file system
-   * @param path Path to HFile
    * @param fsdis a stream of path's file
    * @param size max size of the trailer.
    * @param cacheConf Cache configuration for hfile's contents
-   * @param preferredEncodingInCache Preferred in-cache data encoding algorithm.
    * @return A version specific Hfile Reader
    * @throws IOException If file is invalid, will throw CorruptHFileException flavored IOException
    */
-  public static Reader createReaderWithEncoding(FileSystem fs, Path path,
-      FSDataInputStreamWrapper fsdis, long size, CacheConfig cacheConf,
-      DataBlockEncoding preferredEncodingInCache) throws IOException {
+  public static Reader createReader(FileSystem fs, Path path,
+      FSDataInputStreamWrapper fsdis, long size, CacheConfig cacheConf) throws IOException {
     HFileSystem hfs = null;
 
     // If the fs is not an instance of HFileSystem, then create an
@@ -583,7 +563,7 @@ public class HFile {
     } else {
       hfs = (HFileSystem)fs;
     }
-    return pickReaderVersion(path, fsdis, size, cacheConf, preferredEncodingInCache, hfs);
+    return pickReaderVersion(path, fsdis, size, cacheConf, hfs);
   }
 
   /**
@@ -597,8 +577,9 @@ public class HFile {
   public static Reader createReader(
       FileSystem fs, Path path, CacheConfig cacheConf) throws IOException {
     Preconditions.checkNotNull(cacheConf, "Cannot create Reader with null CacheConf");
-    return createReaderWithEncoding(fs, path, cacheConf,
-        DataBlockEncoding.NONE);
+    FSDataInputStreamWrapper stream = new FSDataInputStreamWrapper(fs, path);
+    return pickReaderVersion(path, stream, fs.getFileStatus(path).getLen(),
+      cacheConf, stream.getHfs());
   }
 
   /**
@@ -608,7 +589,7 @@ public class HFile {
       FSDataInputStream fsdis, long size, CacheConfig cacheConf)
       throws IOException {
     FSDataInputStreamWrapper wrapper = new FSDataInputStreamWrapper(fsdis);
-    return pickReaderVersion(path, wrapper, size, cacheConf, DataBlockEncoding.NONE, null);
+    return pickReaderVersion(path, wrapper, size, cacheConf, null);
   }
 
   /**

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java?rev=1541629&r1=1541628&r2=1541629&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java Wed Nov 13 17:31:02 2013
@@ -689,7 +689,7 @@ public class HFileBlock implements Cache
       defaultBlockEncodingCtx = new HFileBlockDefaultEncodingContext(null,
           HConstants.HFILEBLOCK_DUMMY_HEADER, fileContext);
       dataBlockEncodingCtx = this.dataBlockEncoder
-          .newOnDiskDataBlockEncodingContext(HConstants.HFILEBLOCK_DUMMY_HEADER, fileContext);
+          .newDataBlockEncodingContext(HConstants.HFILEBLOCK_DUMMY_HEADER, fileContext);
 
       if (fileContext.getBytesPerChecksum() < HConstants.HFILEBLOCK_HEADER_SIZE) {
         throw new RuntimeException("Unsupported value of bytesPerChecksum. " +
@@ -1025,8 +1025,7 @@ public class HFileBlock implements Cache
                                 .withBytesPerCheckSum(0)
                                 .withChecksumType(ChecksumType.NULL) // no checksums in cached data
                                 .withCompression(fileContext.getCompression())
-                                .withDataBlockEncodingInCache(fileContext.getEncodingInCache())
-                                .withDataBlockEncodingOnDisk(fileContext.getEncodingOnDisk())
+                                .withDataBlockEncoding(fileContext.getDataBlockEncoding())
                                 .withHBaseCheckSum(fileContext.isUseHBaseChecksum())
                                 .withCompressTags(fileContext.isCompressTags())
                                 .withIncludesMvcc(fileContext.isIncludesMvcc())
@@ -1256,10 +1255,6 @@ public class HFileBlock implements Cache
      * does or doesn't do checksum validations in the filesystem */
     protected FSDataInputStreamWrapper streamWrapper;
 
-    /** Data block encoding used to read from file */
-    protected HFileDataBlockEncoder dataBlockEncoder =
-        NoOpDataBlockEncoder.INSTANCE;
-
     private HFileBlockDecodingContext encodedBlockDecodingCtx;
 
     private HFileBlockDefaultDecodingContext defaultDecodingCtx;
@@ -1512,7 +1507,7 @@ public class HFileBlock implements Cache
       if (isCompressed) {
         // This will allocate a new buffer but keep header bytes.
         b.allocateBuffer(nextBlockOnDiskSize > 0);
-        if (b.blockType.equals(BlockType.ENCODED_DATA)) {
+        if (b.blockType == BlockType.ENCODED_DATA) {
           encodedBlockDecodingCtx.prepareDecoding(b.getOnDiskSizeWithoutHeader(),
               b.getUncompressedSizeWithoutHeader(), b.getBufferWithoutHeader(), onDiskBlock,
               hdrSize);
@@ -1557,8 +1552,7 @@ public class HFileBlock implements Cache
     }
 
     void setDataBlockEncoder(HFileDataBlockEncoder encoder) {
-      this.dataBlockEncoder = encoder;
-      encodedBlockDecodingCtx = encoder.newOnDiskDataBlockDecodingContext(this.fileContext);
+      encodedBlockDecodingCtx = encoder.newDataBlockDecodingContext(this.fileContext);
     }
 
     /**

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java?rev=1541629&r1=1541628&r2=1541629&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java Wed Nov 13 17:31:02 2013
@@ -112,7 +112,6 @@ public class HFileBlockIndex {
     private byte[][] blockKeys;
     private long[] blockOffsets;
     private int[] blockDataSizes;
-    private int rootByteSize = 0;
     private int rootCount = 0;
 
     // Mid-key metadata.
@@ -262,8 +261,7 @@ public class HFileBlockIndex {
         }
 
         // Found a data block, break the loop and check our level in the tree.
-        if (block.getBlockType().equals(BlockType.DATA) ||
-            block.getBlockType().equals(BlockType.ENCODED_DATA)) {
+        if (block.getBlockType().isData()) {
           break;
         }
 
@@ -423,9 +421,7 @@ public class HFileBlockIndex {
       blockOffsets[rootCount] = offset;
       blockKeys[rootCount] = key;
       blockDataSizes[rootCount] = dataSize;
-
       rootCount++;
-      rootByteSize += SECONDARY_INDEX_ENTRY_OVERHEAD + key.length;
     }
 
     /**
@@ -672,7 +668,7 @@ public class HFileBlockIndex {
     @Override
     public long heapSize() {
       long heapSize = ClassSize.align(6 * ClassSize.REFERENCE +
-          3 * Bytes.SIZEOF_INT + ClassSize.OBJECT);
+          2 * Bytes.SIZEOF_INT + ClassSize.OBJECT);
 
       // Mid-key metadata.
       heapSize += MID_KEY_METADATA_SIZE;

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoder.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoder.java?rev=1541629&r1=1541628&r2=1541629&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoder.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoder.java Wed Nov 13 17:31:02 2013
@@ -34,24 +34,6 @@ import org.apache.hadoop.hbase.util.Byte
 public interface HFileDataBlockEncoder {
   /** Type of encoding used for data blocks in HFile. Stored in file info. */
   byte[] DATA_BLOCK_ENCODING = Bytes.toBytes("DATA_BLOCK_ENCODING");
-  
-  /**
-   * Converts a block from the on-disk format to the in-cache format. Called in
-   * the following cases:
-   * <ul>
-   * <li>After an encoded or unencoded data block is read from disk, but before
-   * it is put into the cache.</li>
-   * <li>To convert brand-new blocks to the in-cache format when doing
-   * cache-on-write.</li>
-   * </ul>
-   * @param block a block in an on-disk format (read from HFile or freshly
-   *          generated).
-   * @param isCompaction
-   * @return non null block which is coded according to the settings.
-   */
-  HFileBlock diskToCacheFormat(
-    HFileBlock block, boolean isCompaction
-  );
 
   /**
    * Should be called before an encoded or unencoded data block is written to
@@ -69,10 +51,9 @@ public interface HFileDataBlockEncoder {
 
   /**
    * Decides whether we should use a scanner over encoded blocks.
-   * @param isCompaction whether we are in a compaction.
    * @return Whether to use encoded scanner.
    */
-  boolean useEncodedScanner(boolean isCompaction);
+  boolean useEncodedScanner();
 
   /**
    * Save metadata in HFile which will be written to disk
@@ -82,17 +63,8 @@ public interface HFileDataBlockEncoder {
   void saveMetadata(HFile.Writer writer)
       throws IOException;
 
-  /** @return the on-disk data block encoding */
-  DataBlockEncoding getEncodingOnDisk();
-
-  /** @return the preferred in-cache data block encoding for normal reads */
-  DataBlockEncoding getEncodingInCache();
-
-  /**
-   * @return the effective in-cache data block encoding, taking into account
-   *         whether we are doing a compaction.
-   */
-  DataBlockEncoding getEffectiveEncodingInCache(boolean isCompaction);
+  /** @return the data block encoding */
+  DataBlockEncoding getDataBlockEncoding();
 
   /**
    * Create an encoder specific encoding context object for writing. And the
@@ -103,7 +75,7 @@ public interface HFileDataBlockEncoder {
    * @param fileContext HFile meta data
    * @return a new {@link HFileBlockEncodingContext} object
    */
-  HFileBlockEncodingContext newOnDiskDataBlockEncodingContext(byte[] headerBytes,
+  HFileBlockEncodingContext newDataBlockEncodingContext(byte[] headerBytes,
       HFileContext fileContext);
 
   /**
@@ -114,6 +86,5 @@ public interface HFileDataBlockEncoder {
    * @param fileContext - HFile meta data
    * @return a new {@link HFileBlockDecodingContext} object
    */
-  HFileBlockDecodingContext newOnDiskDataBlockDecodingContext(HFileContext fileContext);
-
+  HFileBlockDecodingContext newDataBlockDecodingContext(HFileContext fileContext);
 }

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoderImpl.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoderImpl.java?rev=1541629&r1=1541628&r2=1541629&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoderImpl.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoderImpl.java Wed Nov 13 17:31:02 2013
@@ -20,7 +20,6 @@ import java.io.IOException;
 import java.nio.ByteBuffer;
 
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoder;
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
 import org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext;
@@ -30,147 +29,50 @@ import org.apache.hadoop.hbase.io.encodi
 import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo;
 import org.apache.hadoop.hbase.util.Bytes;
 
-import com.google.common.base.Preconditions;
-
 /**
  * Do different kinds of data block encoding according to column family
  * options.
  */
 @InterfaceAudience.Private
 public class HFileDataBlockEncoderImpl implements HFileDataBlockEncoder {
-  private final DataBlockEncoding onDisk;
-  private final DataBlockEncoding inCache;
-  private final byte[] dummyHeader;
-
-  public HFileDataBlockEncoderImpl(DataBlockEncoding encoding) {
-    this(encoding, encoding);
-  }
-
-  /**
-   * Do data block encoding with specified options.
-   * @param onDisk What kind of data block encoding will be used before writing
-   *          HFileBlock to disk. This must be either the same as inCache or
-   *          {@link DataBlockEncoding#NONE}.
-   * @param inCache What kind of data block encoding will be used in block
-   *          cache.
-   */
-  public HFileDataBlockEncoderImpl(DataBlockEncoding onDisk,
-      DataBlockEncoding inCache) {
-    this(onDisk, inCache, HConstants.HFILEBLOCK_DUMMY_HEADER);
-  }
+  private final DataBlockEncoding encoding;
 
   /**
    * Do data block encoding with specified options.
-   * @param onDisk What kind of data block encoding will be used before writing
-   *          HFileBlock to disk. This must be either the same as inCache or
-   *          {@link DataBlockEncoding#NONE}.
-   * @param inCache What kind of data block encoding will be used in block
-   *          cache.
-   * @param dummyHeader dummy header bytes
+   * @param encoding What kind of data block encoding will be used.
    */
-  public HFileDataBlockEncoderImpl(DataBlockEncoding onDisk,
-      DataBlockEncoding inCache, byte[] dummyHeader) {
-    this.onDisk = onDisk != null ?
-        onDisk : DataBlockEncoding.NONE;
-    this.inCache = inCache != null ?
-        inCache : DataBlockEncoding.NONE;
-    this.dummyHeader = dummyHeader;
-
-    Preconditions.checkArgument(onDisk == DataBlockEncoding.NONE ||
-        onDisk == inCache, "on-disk encoding (" + onDisk + ") must be " +
-        "either the same as in-cache encoding (" + inCache + ") or " +
-        DataBlockEncoding.NONE);
+  public HFileDataBlockEncoderImpl(DataBlockEncoding encoding) {
+    this.encoding = encoding != null ? encoding : DataBlockEncoding.NONE;
   }
 
   public static HFileDataBlockEncoder createFromFileInfo(
-      FileInfo fileInfo, DataBlockEncoding preferredEncodingInCache)
-      throws IOException {
-    boolean hasPreferredCacheEncoding = preferredEncodingInCache != null
-        && preferredEncodingInCache != DataBlockEncoding.NONE;
-
+      FileInfo fileInfo) throws IOException {
+    DataBlockEncoding encoding = DataBlockEncoding.NONE;
     byte[] dataBlockEncodingType = fileInfo.get(DATA_BLOCK_ENCODING);
-    if (dataBlockEncodingType == null && !hasPreferredCacheEncoding) {
-      return NoOpDataBlockEncoder.INSTANCE;
-    }
-
-    DataBlockEncoding onDisk;
-    if (dataBlockEncodingType == null) {
-      onDisk = DataBlockEncoding.NONE;
-    } else {
+    if (dataBlockEncodingType != null) {
       String dataBlockEncodingStr = Bytes.toString(dataBlockEncodingType);
       try {
-        onDisk = DataBlockEncoding.valueOf(dataBlockEncodingStr);
+        encoding = DataBlockEncoding.valueOf(dataBlockEncodingStr);
       } catch (IllegalArgumentException ex) {
         throw new IOException("Invalid data block encoding type in file info: "
-            + dataBlockEncodingStr, ex);
+          + dataBlockEncodingStr, ex);
       }
     }
 
-    DataBlockEncoding inCache;
-    if (onDisk == DataBlockEncoding.NONE) {
-      // This is an "in-cache-only" encoding or fully-unencoded scenario.
-      // Either way, we use the given encoding (possibly NONE) specified by
-      // the column family in cache.
-      inCache = preferredEncodingInCache;
-    } else {
-      // Leave blocks in cache encoded the same way as they are on disk.
-      // If we switch encoding type for the CF or the in-cache-only encoding
-      // flag, old files will keep their encoding both on disk and in cache,
-      // but new files will be generated with the new encoding.
-      inCache = onDisk;
+    if (encoding == DataBlockEncoding.NONE) {
+      return NoOpDataBlockEncoder.INSTANCE;
     }
-    // TODO: we are not passing proper header size here based on minor version, presumably
-    //       because this encoder will never actually be used for encoding.
-    return new HFileDataBlockEncoderImpl(onDisk, inCache);
+    return new HFileDataBlockEncoderImpl(encoding);
   }
 
   @Override
   public void saveMetadata(HFile.Writer writer) throws IOException {
-    writer.appendFileInfo(DATA_BLOCK_ENCODING, onDisk.getNameInBytes());
-  }
-
-  @Override
-  public DataBlockEncoding getEncodingOnDisk() {
-    return onDisk;
-  }
-
-  @Override
-  public DataBlockEncoding getEncodingInCache() {
-    return inCache;
-  }
-
-  @Override
-  public DataBlockEncoding getEffectiveEncodingInCache(boolean isCompaction) {
-    if (!useEncodedScanner(isCompaction)) {
-      return DataBlockEncoding.NONE;
-    }
-    return inCache;
+    writer.appendFileInfo(DATA_BLOCK_ENCODING, encoding.getNameInBytes());
   }
 
   @Override
-  public HFileBlock diskToCacheFormat(HFileBlock block, boolean isCompaction) {
-    if (block.getBlockType() == BlockType.DATA) {
-      if (!useEncodedScanner(isCompaction)) {
-        // Unencoded block, and we don't want to encode in cache.
-        return block;
-      }
-      // Encode the unencoded block with the in-cache encoding.
-      return encodeDataBlock(block, inCache,
-          createInCacheEncodingContext(block.getHFileContext()));
-    }
-
-    if (block.getBlockType() == BlockType.ENCODED_DATA) {
-      if (block.getDataBlockEncodingId() == onDisk.getId()) {
-        // The block is already in the desired in-cache encoding.
-        return block;
-      }
-      // We don't want to re-encode a block in a different encoding. The HFile
-      // reader should have been instantiated in such a way that we would not
-      // have to do this.
-      throw new AssertionError("Expected on-disk data block encoding " +
-          onDisk + ", got " + block.getDataBlockEncoding());
-    }
-    return block;
+  public DataBlockEncoding getDataBlockEncoding() {
+    return encoding;
   }
 
   /**
@@ -184,21 +86,18 @@ public class HFileDataBlockEncoderImpl i
   public void beforeWriteToDisk(ByteBuffer in,
       HFileBlockEncodingContext encodeCtx,
       BlockType blockType) throws IOException {
-    if (onDisk == DataBlockEncoding.NONE) {
+    if (encoding == DataBlockEncoding.NONE) {
       // there is no need to encode the block before writing it to disk
       ((HFileBlockDefaultEncodingContext) encodeCtx).compressAfterEncodingWithBlockType(
           in.array(), blockType);
       return;
     }
-    encodeBufferToHFileBlockBuffer(in, onDisk, encodeCtx);
+    encodeBufferToHFileBlockBuffer(in, encoding, encodeCtx);
   }
 
   @Override
-  public boolean useEncodedScanner(boolean isCompaction) {
-    if (isCompaction && onDisk == DataBlockEncoding.NONE) {
-      return false;
-    }
-    return inCache != DataBlockEncoding.NONE;
+  public boolean useEncodedScanner() {
+    return encoding != DataBlockEncoding.NONE;
   }
 
   /**
@@ -222,66 +121,27 @@ public class HFileDataBlockEncoderImpl i
     }
   }
 
-  private HFileBlock encodeDataBlock(HFileBlock block, DataBlockEncoding algo,
-      HFileBlockEncodingContext encodingCtx) {
-    encodingCtx.setDummyHeader(block.getDummyHeaderForVersion());
-    encodeBufferToHFileBlockBuffer(
-      block.getBufferWithoutHeader(), algo, encodingCtx);
-    byte[] encodedUncompressedBytes =
-      encodingCtx.getUncompressedBytesWithHeader();
-    ByteBuffer bufferWrapper = ByteBuffer.wrap(encodedUncompressedBytes);
-    int sizeWithoutHeader = bufferWrapper.limit() - block.headerSize();
-    HFileBlock encodedBlock = new HFileBlock(BlockType.ENCODED_DATA,
-        block.getOnDiskSizeWithoutHeader(),
-        sizeWithoutHeader, block.getPrevBlockOffset(),
-        bufferWrapper, HFileBlock.FILL_HEADER, block.getOffset(),
-        block.getOnDiskDataSizeWithHeader(), encodingCtx.getHFileContext());
-    return encodedBlock;
-  }
-
-  /**
-   * Returns a new encoding context given the inCache encoding scheme provided in the constructor.
-   * This used to be kept around but HFileBlockDefaultEncodingContext isn't thread-safe.
-   * See HBASE-8732
-   * @return a new in cache encoding context
-   */
-  private HFileBlockEncodingContext createInCacheEncodingContext(HFileContext fileContext) {
-    HFileContext newContext = new HFileContext(fileContext);
-    return (inCache != DataBlockEncoding.NONE) ?
-                this.inCache.getEncoder().newDataBlockEncodingContext(
-                    this.inCache, dummyHeader, newContext)
-                :
-                // create a default encoding context
-                new HFileBlockDefaultEncodingContext(this.inCache, dummyHeader, newContext);
-  }
-
   @Override
   public String toString() {
-    return getClass().getSimpleName() + "(onDisk=" + onDisk + ", inCache=" +
-        inCache + ")";
+    return getClass().getSimpleName() + "(encoding=" + encoding + ")";
   }
 
   @Override
-  public HFileBlockEncodingContext newOnDiskDataBlockEncodingContext(
+  public HFileBlockEncodingContext newDataBlockEncodingContext(
       byte[] dummyHeader, HFileContext fileContext) {
-    if (onDisk != null) {
-      DataBlockEncoder encoder = onDisk.getEncoder();
-      if (encoder != null) {
-        return encoder.newDataBlockEncodingContext(onDisk, dummyHeader, fileContext);
-      }
+    DataBlockEncoder encoder = encoding.getEncoder();
+    if (encoder != null) {
+      return encoder.newDataBlockEncodingContext(encoding, dummyHeader, fileContext);
     }
     return new HFileBlockDefaultEncodingContext(null, dummyHeader, fileContext);
   }
 
   @Override
-  public HFileBlockDecodingContext newOnDiskDataBlockDecodingContext(HFileContext fileContext) {
-    if (onDisk != null) {
-      DataBlockEncoder encoder = onDisk.getEncoder();
-      if (encoder != null) {
-        return encoder.newDataBlockDecodingContext(fileContext);
-      }
+  public HFileBlockDecodingContext newDataBlockDecodingContext(HFileContext fileContext) {
+    DataBlockEncoder encoder = encoding.getEncoder();
+    if (encoder != null) {
+      return encoder.newDataBlockDecodingContext(fileContext);
     }
     return new HFileBlockDefaultDecodingContext(fileContext);
   }
-
 }

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java?rev=1541629&r1=1541628&r2=1541629&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java Wed Nov 13 17:31:02 2013
@@ -109,20 +109,16 @@ public class HFileReaderV2 extends Abstr
    * @param fsdis input stream.
    * @param size Length of the stream.
    * @param cacheConf Cache configuration.
-   * @param preferredEncodingInCache the encoding to use in cache in case we
-   *          have a choice. If the file is already encoded on disk, we will
-   *          still use its on-disk encoding in cache.
    * @param hfs
    */
   public HFileReaderV2(Path path, FixedFileTrailer trailer,
       final FSDataInputStreamWrapper fsdis, final long size, final CacheConfig cacheConf,
-      DataBlockEncoding preferredEncodingInCache, final HFileSystem hfs)
+      final HFileSystem hfs)
       throws IOException {
     super(path, trailer, size, cacheConf, hfs);
     trailer.expectMajorVersion(getMajorVersion());
     validateMinorVersion(path, trailer.getMinorVersion());
     this.hfileContext = createHFileContext(trailer);
-    // Should we set the preferredEncodinginCache here for the context
     HFileBlock.FSReaderV2 fsBlockReaderV2 = new HFileBlock.FSReaderV2(fsdis, fileSize, hfs, path,
         hfileContext);
     this.fsBlockReader = fsBlockReaderV2; // upcast
@@ -168,8 +164,7 @@ public class HFileReaderV2 extends Abstr
     }
 
     // Read data block encoding algorithm name from file info.
-    dataBlockEncoder = HFileDataBlockEncoderImpl.createFromFileInfo(fileInfo,
-        preferredEncodingInCache);
+    dataBlockEncoder = HFileDataBlockEncoderImpl.createFromFileInfo(fileInfo);
     fsBlockReaderV2.setDataBlockEncoder(dataBlockEncoder);
 
     // Store all other load-on-open blocks for further consumption.
@@ -203,8 +198,7 @@ public class HFileReaderV2 extends Abstr
    @Override
    public HFileScanner getScanner(boolean cacheBlocks, final boolean pread,
       final boolean isCompaction) {
-    // check if we want to use data block encoding in memory
-    if (dataBlockEncoder.useEncodedScanner(isCompaction)) {
+    if (dataBlockEncoder.useEncodedScanner()) {
       return new EncodedScannerV2(this, cacheBlocks, pread, isCompaction,
           hfileContext);
     }
@@ -310,7 +304,7 @@ public class HFileReaderV2 extends Abstr
 
     BlockCacheKey cacheKey =
         new BlockCacheKey(name, dataBlockOffset,
-            dataBlockEncoder.getEffectiveEncodingInCache(isCompaction),
+            dataBlockEncoder.getDataBlockEncoding(),
             expectedBlockType);
 
     boolean useLock = false;
@@ -329,19 +323,17 @@ public class HFileReaderV2 extends Abstr
           HFileBlock cachedBlock = (HFileBlock) cacheConf.getBlockCache().getBlock(cacheKey,
               cacheBlock, useLock);
           if (cachedBlock != null) {
-            if (cachedBlock.getBlockType() == BlockType.DATA) {
-              HFile.dataBlockReadCnt.incrementAndGet();
-            }
-
             validateBlockType(cachedBlock, expectedBlockType);
+            if (cachedBlock.getBlockType().isData()) {
+              HFile.dataBlockReadCnt.incrementAndGet();
 
-            // Validate encoding type for encoded blocks. We include encoding
-            // type in the cache key, and we expect it to match on a cache hit.
-            if (cachedBlock.getBlockType() == BlockType.ENCODED_DATA
-                && cachedBlock.getDataBlockEncoding() != dataBlockEncoder.getEncodingInCache()) {
-              throw new IOException("Cached block under key " + cacheKey + " "
+              // Validate encoding type for data blocks. We include encoding
+              // type in the cache key, and we expect it to match on a cache hit.
+              if (cachedBlock.getDataBlockEncoding() != dataBlockEncoder.getDataBlockEncoding()) {
+                throw new IOException("Cached block under key " + cacheKey + " "
                   + "has wrong encoding: " + cachedBlock.getDataBlockEncoding() + " (expected: "
-                  + dataBlockEncoder.getEncodingInCache() + ")");
+                  + dataBlockEncoder.getDataBlockEncoding() + ")");
+              }
             }
             return cachedBlock;
           }
@@ -359,7 +351,6 @@ public class HFileReaderV2 extends Abstr
         long startTimeNs = System.nanoTime();
         HFileBlock hfileBlock = fsBlockReader.readBlockData(dataBlockOffset, onDiskBlockSize, -1,
             pread);
-        hfileBlock = diskToCacheFormat(hfileBlock, isCompaction);
         validateBlockType(hfileBlock, expectedBlockType);
 
         final long delta = System.nanoTime() - startTimeNs;
@@ -370,7 +361,7 @@ public class HFileReaderV2 extends Abstr
           cacheConf.getBlockCache().cacheBlock(cacheKey, hfileBlock, cacheConf.isInMemory());
         }
 
-        if (hfileBlock.getBlockType() == BlockType.DATA) {
+        if (hfileBlock.getBlockType().isData()) {
           HFile.dataBlockReadCnt.incrementAndGet();
         }
 
@@ -384,10 +375,6 @@ public class HFileReaderV2 extends Abstr
     }
   }
 
-  protected HFileBlock diskToCacheFormat( HFileBlock hfileBlock, final boolean isCompaction) {
-    return dataBlockEncoder.diskToCacheFormat(hfileBlock, isCompaction);
-  }
-
   @Override
   public boolean hasMVCCInfo() {
     return includesMemstoreTS && decodeMemstoreTS;
@@ -616,8 +603,7 @@ public class HFileReaderV2 extends Abstr
             + curBlock.getOnDiskSizeWithHeader(),
             curBlock.getNextBlockOnDiskSizeWithHeader(), cacheBlocks, pread,
             isCompaction, null);
-      } while (!(curBlock.getBlockType().equals(BlockType.DATA) ||
-          curBlock.getBlockType().equals(BlockType.ENCODED_DATA)));
+      } while (!curBlock.getBlockType().isData());
 
       return curBlock;
     }
@@ -981,23 +967,22 @@ public class HFileReaderV2 extends Abstr
    * ScannerV2 that operates on encoded data blocks.
    */
   protected static class EncodedScannerV2 extends AbstractScannerV2 {
-    private DataBlockEncoder.EncodedSeeker seeker = null;
-    protected DataBlockEncoder dataBlockEncoder = null;
+    private final HFileBlockDecodingContext decodingCtx;
+    private final DataBlockEncoder.EncodedSeeker seeker;
+    private final DataBlockEncoder dataBlockEncoder;
     protected final HFileContext meta;
-    protected HFileBlockDecodingContext decodingCtx;
+
     public EncodedScannerV2(HFileReaderV2 reader, boolean cacheBlocks,
         boolean pread, boolean isCompaction, HFileContext meta) {
       super(reader, cacheBlocks, pread, isCompaction);
+      DataBlockEncoding encoding = reader.dataBlockEncoder.getDataBlockEncoding();
+      dataBlockEncoder = encoding.getEncoder();
+      decodingCtx = dataBlockEncoder.newDataBlockDecodingContext(meta);
+      seeker = dataBlockEncoder.createSeeker(
+        reader.getComparator(), decodingCtx);
       this.meta = meta;
     }
 
-    protected void setDataBlockEncoder(DataBlockEncoder dataBlockEncoder) {
-      this.dataBlockEncoder = dataBlockEncoder;
-      decodingCtx = this.dataBlockEncoder.newDataBlockDecodingContext(
-          this.meta);
-      seeker = dataBlockEncoder.createSeeker(reader.getComparator(), decodingCtx);
-    }
-
     @Override
     public boolean isSeeked(){
       return this.block != null;
@@ -1008,8 +993,9 @@ public class HFileReaderV2 extends Abstr
      * the the first key/value pair.
      *
      * @param newBlock the block to make current
+     * @throws CorruptHFileException
      */
-    protected void updateCurrentBlock(HFileBlock newBlock) {
+    private void updateCurrentBlock(HFileBlock newBlock) throws CorruptHFileException {
       block = newBlock;
 
       // sanity checks
@@ -1017,8 +1003,14 @@ public class HFileReaderV2 extends Abstr
         throw new IllegalStateException(
             "EncodedScanner works only on encoded data blocks");
       }
+      short dataBlockEncoderId = block.getDataBlockEncodingId();
+      if (!DataBlockEncoding.isCorrectEncoder(dataBlockEncoder, dataBlockEncoderId)) {
+        String encoderCls = dataBlockEncoder.getClass().getName();
+        throw new CorruptHFileException("Encoder " + encoderCls
+          + " doesn't support data block encoding "
+          + DataBlockEncoding.getNameFromId(dataBlockEncoderId));
+      }
 
-      updateDataBlockEncoder(block);
       seeker.setCurrentBuffer(getEncodedBuffer(newBlock));
       blockFetches++;
 
@@ -1026,15 +1018,6 @@ public class HFileReaderV2 extends Abstr
       this.nextIndexedKey = null;
     }
 
-    private void updateDataBlockEncoder(HFileBlock curBlock) {
-      short dataBlockEncoderId = curBlock.getDataBlockEncodingId();
-      if (dataBlockEncoder == null ||
-          !DataBlockEncoding.isCorrectEncoder(dataBlockEncoder, dataBlockEncoderId)) {
-        DataBlockEncoder encoder = DataBlockEncoding.getDataBlockEncoderById(dataBlockEncoderId);
-        setDataBlockEncoder(encoder);
-      }
-    }
-
     private ByteBuffer getEncodedBuffer(HFileBlock newBlock) {
       ByteBuffer origBlock = newBlock.getBufferReadOnly();
       ByteBuffer encodedBlock = ByteBuffer.wrap(origBlock.array(),
@@ -1132,7 +1115,6 @@ public class HFileReaderV2 extends Abstr
 
     @Override
     protected ByteBuffer getFirstKeyInBlock(HFileBlock curBlock) {
-      updateDataBlockEncoder(curBlock);
       return dataBlockEncoder.getFirstKeyInBlock(getEncodedBuffer(curBlock));
     }
 

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV3.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV3.java?rev=1541629&r1=1541628&r2=1541629&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV3.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV3.java Wed Nov 13 17:31:02 2013
@@ -24,7 +24,6 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.fs.HFileSystem;
 import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
-import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
 import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo;
 import org.apache.hadoop.hbase.util.ByteBufferUtils;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -51,15 +50,11 @@ public class HFileReaderV3 extends HFile
    *          Length of the stream.
    * @param cacheConf
    *          Cache configuration.
-   * @param preferredEncodingInCache
-   *          the encoding to use in cache in case we have a choice. If the file
-   *          is already encoded on disk, we will still use its on-disk encoding
-   *          in cache.
    */
   public HFileReaderV3(Path path, FixedFileTrailer trailer, final FSDataInputStreamWrapper fsdis,
-      final long size, final CacheConfig cacheConf, DataBlockEncoding preferredEncodingInCache,
+      final long size, final CacheConfig cacheConf,
       final HFileSystem hfs) throws IOException {
-    super(path, trailer, fsdis, size, cacheConf, preferredEncodingInCache, hfs);
+    super(path, trailer, fsdis, size, cacheConf, hfs);
     byte[] tmp = fileInfo.get(FileInfo.MAX_TAGS_LEN);
     // max tag length is not present in the HFile means tags were not at all written to file.
     if (tmp != null) {
@@ -98,8 +93,7 @@ public class HFileReaderV3 extends HFile
   @Override
   public HFileScanner getScanner(boolean cacheBlocks, final boolean pread,
       final boolean isCompaction) {
-    // check if we want to use data block encoding in memory
-    if (dataBlockEncoder.useEncodedScanner(isCompaction)) {
+    if (dataBlockEncoder.useEncodedScanner()) {
       return new EncodedScannerV3(this, cacheBlocks, pread, isCompaction, this.hfileContext);
     }
     return new ScannerV3(this, cacheBlocks, pread, isCompaction);
@@ -277,9 +271,4 @@ public class HFileReaderV3 extends HFile
   public int getMajorVersion() {
     return 3;
   }
-
-  @Override
-  protected HFileBlock diskToCacheFormat(HFileBlock hfileBlock, final boolean isCompaction) {
-    return dataBlockEncoder.diskToCacheFormat(hfileBlock, isCompaction);
-  }
 }

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java?rev=1541629&r1=1541628&r2=1541629&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java Wed Nov 13 17:31:02 2013
@@ -194,13 +194,9 @@ public class HFileWriterV2 extends Abstr
    *          the cache key.
    */
   private void doCacheOnWrite(long offset) {
-    // We don't cache-on-write data blocks on compaction, so assume this is not
-    // a compaction.
-    final boolean isCompaction = false;
-    HFileBlock cacheFormatBlock = blockEncoder.diskToCacheFormat(
-        fsBlockWriter.getBlockForCaching(), isCompaction);
+    HFileBlock cacheFormatBlock = fsBlockWriter.getBlockForCaching();
     cacheConf.getBlockCache().cacheBlock(
-        new BlockCacheKey(name, offset, blockEncoder.getEncodingInCache(),
+        new BlockCacheKey(name, offset, blockEncoder.getDataBlockEncoding(),
             cacheFormatBlock.getBlockType()), cacheFormatBlock);
   }
 

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV3.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV3.java?rev=1541629&r1=1541628&r2=1541629&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV3.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV3.java Wed Nov 13 17:31:02 2013
@@ -179,8 +179,8 @@ public class HFileWriterV3 extends HFile
       // When tags are not being written in this file, MAX_TAGS_LEN is excluded
       // from the FileInfo
       fileInfo.append(FileInfo.MAX_TAGS_LEN, Bytes.toBytes(this.maxTagsLength), false);
-      boolean tagsCompressed = (hFileContext.getEncodingOnDisk() != DataBlockEncoding.NONE)
-          && hFileContext.isCompressTags();
+      boolean tagsCompressed = (hFileContext.getDataBlockEncoding() != DataBlockEncoding.NONE)
+        && hFileContext.isCompressTags();
       fileInfo.append(FileInfo.TAGS_COMPRESSED, Bytes.toBytes(tagsCompressed), false);
     }
   }

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpDataBlockEncoder.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpDataBlockEncoder.java?rev=1541629&r1=1541628&r2=1541629&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpDataBlockEncoder.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/NoOpDataBlockEncoder.java Wed Nov 13 17:31:02 2013
@@ -20,13 +20,11 @@ import java.io.IOException;
 import java.nio.ByteBuffer;
 
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
+import org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext;
 import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultDecodingContext;
 import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext;
 import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext;
-import org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext;
-import org.apache.hadoop.hbase.io.hfile.HFileContext;
 
 /**
  * Does not perform any kind of encoding/decoding.
@@ -42,14 +40,6 @@ public class NoOpDataBlockEncoder implem
   }
 
   @Override
-  public HFileBlock diskToCacheFormat(HFileBlock block, boolean isCompaction) {
-    if (block.getBlockType() == BlockType.ENCODED_DATA) {
-      throw new IllegalStateException("Unexpected encoded block");
-    }
-    return block;
-  }
-
-  @Override
   public void beforeWriteToDisk(ByteBuffer in,
       HFileBlockEncodingContext encodeCtx, BlockType blockType)
       throws IOException {
@@ -65,7 +55,7 @@ public class NoOpDataBlockEncoder implem
   }
 
   @Override
-  public boolean useEncodedScanner(boolean isCompaction) {
+  public boolean useEncodedScanner() {
     return false;
   }
 
@@ -74,17 +64,7 @@ public class NoOpDataBlockEncoder implem
   }
 
   @Override
-  public DataBlockEncoding getEncodingOnDisk() {
-    return DataBlockEncoding.NONE;
-  }
-
-  @Override
-  public DataBlockEncoding getEncodingInCache() {
-    return DataBlockEncoding.NONE;
-  }
-
-  @Override
-  public DataBlockEncoding getEffectiveEncodingInCache(boolean isCompaction) {
+  public DataBlockEncoding getDataBlockEncoding() {
     return DataBlockEncoding.NONE;
   }
 
@@ -94,14 +74,13 @@ public class NoOpDataBlockEncoder implem
   }
 
   @Override
-  public HFileBlockEncodingContext newOnDiskDataBlockEncodingContext(
+  public HFileBlockEncodingContext newDataBlockEncodingContext(
       byte[] dummyHeader, HFileContext meta) {
     return new HFileBlockDefaultEncodingContext(null, dummyHeader, meta);
   }
 
   @Override
-  public HFileBlockDecodingContext newOnDiskDataBlockDecodingContext(HFileContext meta) {
+  public HFileBlockDecodingContext newDataBlockDecodingContext(HFileContext meta) {
     return new HFileBlockDefaultDecodingContext(meta);
   }
-
 }

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java?rev=1541629&r1=1541628&r2=1541629&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java Wed Nov 13 17:31:02 2013
@@ -198,8 +198,7 @@ public class HFileOutputFormat extends F
                                     .withBytesPerCheckSum(HStore.getBytesPerChecksum(conf))
                                     .withBlockSize(blockSize);
         if(dataBlockEncodingStr !=  null) {
-          contextBuilder.withDataBlockEncodingOnDisk(DataBlockEncoding.valueOf(dataBlockEncodingStr))
-                        .withDataBlockEncodingInCache(DataBlockEncoding.valueOf(dataBlockEncodingStr));
+          contextBuilder.withDataBlockEncoding(DataBlockEncoding.valueOf(dataBlockEncodingStr));
         }
         HFileContext hFileContext = contextBuilder.build();
                                     

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java?rev=1541629&r1=1541628&r2=1541629&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java Wed Nov 13 17:31:02 2013
@@ -190,6 +190,7 @@ public class LoadIncrementalHFiles exten
    * @param table the table to load into
    * @throws TableNotFoundException if table does not yet exist
    */
+  @SuppressWarnings("deprecation")
   public void doBulkLoad(Path hfofDir, final HTable table)
     throws TableNotFoundException, IOException
   {
@@ -650,8 +651,7 @@ public class LoadIncrementalHFiles exten
     HalfStoreFileReader halfReader = null;
     StoreFile.Writer halfWriter = null;
     try {
-      halfReader = new HalfStoreFileReader(fs, inFile, cacheConf,
-          reference, DataBlockEncoding.NONE);
+      halfReader = new HalfStoreFileReader(fs, inFile, cacheConf, reference);
       Map<byte[], byte[]> fileInfo = halfReader.loadFileInfo();
 
       int blocksize = familyDescriptor.getBlocksize();
@@ -662,8 +662,7 @@ public class LoadIncrementalHFiles exten
                                   .withChecksumType(HStore.getChecksumType(conf))
                                   .withBytesPerCheckSum(HStore.getBytesPerChecksum(conf))
                                   .withBlockSize(blocksize)
-                                  .withDataBlockEncodingInCache(familyDescriptor.getDataBlockEncoding())
-                                  .withDataBlockEncodingOnDisk(familyDescriptor.getDataBlockEncodingOnDisk())
+                                  .withDataBlockEncoding(familyDescriptor.getDataBlockEncoding())
                                   .build();
       halfWriter = new StoreFile.WriterBuilder(conf, cacheConf,
           fs)

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java?rev=1541629&r1=1541628&r2=1541629&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java Wed Nov 13 17:31:02 2013
@@ -64,7 +64,6 @@ import org.apache.hadoop.hbase.io.hfile.
 import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl;
 import org.apache.hadoop.hbase.io.hfile.HFileScanner;
 import org.apache.hadoop.hbase.io.hfile.InvalidHFileException;
-import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder;
 import org.apache.hadoop.hbase.monitoring.MonitoredTask;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor;
@@ -204,8 +203,7 @@ public class HStore implements Store {
     this.blocksize = family.getBlocksize();
 
     this.dataBlockEncoder =
-        new HFileDataBlockEncoderImpl(family.getDataBlockEncodingOnDisk(),
-            family.getDataBlockEncoding());
+        new HFileDataBlockEncoderImpl(family.getDataBlockEncoding());
 
     this.comparator = info.getComparator();
     // used by ScanQueryMatcher
@@ -473,14 +471,10 @@ public class HStore implements Store {
   }
 
   private StoreFile createStoreFileAndReader(final Path p) throws IOException {
-    return createStoreFileAndReader(p, this.dataBlockEncoder);
-  }
-
-  private StoreFile createStoreFileAndReader(final Path p, final HFileDataBlockEncoder encoder) throws IOException {
     StoreFileInfo info = new StoreFileInfo(conf, this.getFileSystem(), p);
     info.setRegionCoprocessorHost(this.region.getCoprocessorHost());
     StoreFile storeFile = new StoreFile(this.getFileSystem(), info, this.conf, this.cacheConf,
-        this.family.getBloomFilterType(), encoder);
+      this.family.getBloomFilterType());
     storeFile.createReader();
     return storeFile;
   }
@@ -833,8 +827,7 @@ public class HStore implements Store {
                                 .withBytesPerCheckSum(bytesPerChecksum)
                                 .withBlockSize(blocksize)
                                 .withHBaseCheckSum(true)
-                                .withDataBlockEncodingOnDisk(family.getDataBlockEncodingOnDisk())
-                                .withDataBlockEncodingInCache(family.getDataBlockEncoding())
+                                .withDataBlockEncoding(family.getDataBlockEncoding())
                                 .build();
     return hFileContext;
   }
@@ -1386,7 +1379,7 @@ public class HStore implements Store {
       throws IOException {
     StoreFile storeFile = null;
     try {
-      storeFile = createStoreFileAndReader(path, NoOpDataBlockEncoder.INSTANCE);
+      storeFile = createStoreFileAndReader(path);
     } catch (IOException e) {
       LOG.error("Failed to open store file : " + path
           + ", keeping it in tmp location", e);

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java?rev=1541629&r1=1541628&r2=1541629&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java Wed Nov 13 17:31:02 2013
@@ -61,7 +61,6 @@ import org.apache.hadoop.hbase.filter.Co
 import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.io.Reference;
-import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
 import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
@@ -1648,7 +1647,6 @@ public class RegionCoprocessorHost
    * @param in {@link FSDataInputStreamWrapper}
    * @param size Full size of the file
    * @param cacheConf
-   * @param preferredEncodingInCache
    * @param r original reference file. This will be not null only when reading a split file.
    * @return a Reader instance to use instead of the base reader if overriding
    * default behavior, null otherwise
@@ -1656,7 +1654,7 @@ public class RegionCoprocessorHost
    */
   public StoreFile.Reader preStoreFileReaderOpen(final FileSystem fs, final Path p,
       final FSDataInputStreamWrapper in, long size, final CacheConfig cacheConf,
-      final DataBlockEncoding preferredEncodingInCache, final Reference r) throws IOException {
+      final Reference r) throws IOException {
     StoreFile.Reader reader = null;
     ObserverContext<RegionCoprocessorEnvironment> ctx = null;
     for (RegionEnvironment env : coprocessors) {
@@ -1664,7 +1662,7 @@ public class RegionCoprocessorHost
         ctx = ObserverContext.createAndPrepare(env, ctx);
         try {
           reader = ((RegionObserver) env.getInstance()).preStoreFileReaderOpen(ctx, fs, p, in,
-              size, cacheConf, preferredEncodingInCache, r, reader);
+            size, cacheConf, r, reader);
         } catch (Throwable e) {
           handleCoprocessorThrowable(env, e);
         }
@@ -1682,7 +1680,6 @@ public class RegionCoprocessorHost
    * @param in {@link FSDataInputStreamWrapper}
    * @param size Full size of the file
    * @param cacheConf
-   * @param preferredEncodingInCache
    * @param r original reference file. This will be not null only when reading a split file.
    * @param reader the base reader instance
    * @return The reader to use
@@ -1690,15 +1687,14 @@ public class RegionCoprocessorHost
    */
   public StoreFile.Reader postStoreFileReaderOpen(final FileSystem fs, final Path p,
       final FSDataInputStreamWrapper in, long size, final CacheConfig cacheConf,
-      final DataBlockEncoding preferredEncodingInCache, final Reference r, StoreFile.Reader reader)
-      throws IOException {
+      final Reference r, StoreFile.Reader reader) throws IOException {
     ObserverContext<RegionCoprocessorEnvironment> ctx = null;
     for (RegionEnvironment env : coprocessors) {
       if (env.getInstance() instanceof RegionObserver) {
         ctx = ObserverContext.createAndPrepare(env, ctx);
         try {
           reader = ((RegionObserver) env.getInstance()).postStoreFileReaderOpen(ctx, fs, p, in,
-              size, cacheConf, preferredEncodingInCache, r, reader);
+            size, cacheConf, r, reader);
         } catch (Throwable e) {
           handleCoprocessorThrowable(env, e);
         }

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java?rev=1541629&r1=1541628&r2=1541629&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java Wed Nov 13 17:31:02 2013
@@ -48,10 +48,8 @@ import org.apache.hadoop.hbase.io.hfile.
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.io.hfile.HFileContext;
-import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder;
 import org.apache.hadoop.hbase.io.hfile.HFileScanner;
 import org.apache.hadoop.hbase.io.hfile.HFileWriterV2;
-import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder;
 import org.apache.hadoop.hbase.regionserver.compactions.Compactor;
 import org.apache.hadoop.hbase.util.BloomFilter;
 import org.apache.hadoop.hbase.util.BloomFilterFactory;
@@ -123,9 +121,6 @@ public class StoreFile {
   // Block cache configuration and reference.
   private final CacheConfig cacheConf;
 
-  // What kind of data block encoding will be used
-  private final HFileDataBlockEncoder dataBlockEncoder;
-
   // Keys for metadata stored in backing HFile.
   // Set when we obtain a Reader.
   private long sequenceid = -1;
@@ -186,13 +181,11 @@ public class StoreFile {
    *          as the Bloom filter type actually present in the HFile, because
    *          column family configuration might change. If this is
    *          {@link BloomType#NONE}, the existing Bloom filter is ignored.
-   * @param dataBlockEncoder data block encoding algorithm.
    * @throws IOException When opening the reader fails.
    */
   public StoreFile(final FileSystem fs, final Path p, final Configuration conf,
-        final CacheConfig cacheConf, final BloomType cfBloomType,
-        final HFileDataBlockEncoder dataBlockEncoder) throws IOException {
-    this(fs, new StoreFileInfo(conf, fs, p), conf, cacheConf, cfBloomType, dataBlockEncoder);
+        final CacheConfig cacheConf, final BloomType cfBloomType) throws IOException {
+    this(fs, new StoreFileInfo(conf, fs, p), conf, cacheConf, cfBloomType);
   }
 
 
@@ -209,18 +202,13 @@ public class StoreFile {
    *          as the Bloom filter type actually present in the HFile, because
    *          column family configuration might change. If this is
    *          {@link BloomType#NONE}, the existing Bloom filter is ignored.
-   * @param dataBlockEncoder data block encoding algorithm.
    * @throws IOException When opening the reader fails.
    */
   public StoreFile(final FileSystem fs, final StoreFileInfo fileInfo, final Configuration conf,
-      final CacheConfig cacheConf,  final BloomType cfBloomType,
-      final HFileDataBlockEncoder dataBlockEncoder) throws IOException {
+      final CacheConfig cacheConf,  final BloomType cfBloomType) throws IOException {
     this.fs = fs;
     this.fileInfo = fileInfo;
     this.cacheConf = cacheConf;
-    this.dataBlockEncoder =
-        dataBlockEncoder == null ? NoOpDataBlockEncoder.INSTANCE
-            : dataBlockEncoder;
 
     if (BloomFilterFactory.isGeneralBloomEnabled(conf)) {
       this.cfBloomType = cfBloomType;
@@ -363,7 +351,7 @@ public class StoreFile {
     }
 
     // Open the StoreFile.Reader
-    this.reader = fileInfo.open(this.fs, this.cacheConf, dataBlockEncoder.getEncodingInCache());
+    this.reader = fileInfo.open(this.fs, this.cacheConf);
 
     // Load up indices and fileinfo. This also loads Bloom filter type.
     metadataMap = Collections.unmodifiableMap(this.reader.loadFileInfo());
@@ -659,6 +647,7 @@ public class StoreFile {
    * @param comparator Comparator used to compare KVs.
    * @return The split point row, or null if splitting is not possible, or reader is null.
    */
+  @SuppressWarnings("deprecation")
   byte[] getFileSplitPoint(KVComparator comparator) throws IOException {
     if (this.reader == null) {
       LOG.warn("Storefile " + this + " Reader is null; cannot get split point");
@@ -1023,17 +1012,14 @@ public class StoreFile {
     private byte[] lastBloomKey;
     private long deleteFamilyCnt = -1;
 
-    public Reader(FileSystem fs, Path path, CacheConfig cacheConf,
-        DataBlockEncoding preferredEncodingInCache) throws IOException {
-      reader = HFile.createReaderWithEncoding(fs, path, cacheConf,
-          preferredEncodingInCache);
+    public Reader(FileSystem fs, Path path, CacheConfig cacheConf) throws IOException {
+      reader = HFile.createReader(fs, path, cacheConf);
       bloomFilterType = BloomType.NONE;
     }
 
     public Reader(FileSystem fs, Path path, FSDataInputStreamWrapper in, long size,
-        CacheConfig cacheConf, DataBlockEncoding preferredEncodingInCache) throws IOException {
-      reader = HFile.createReaderWithEncoding(
-          fs, path, in, size, cacheConf, preferredEncodingInCache);
+        CacheConfig cacheConf) throws IOException {
+      reader = HFile.createReader(fs, path, in, size, cacheConf);
       bloomFilterType = BloomType.NONE;
     }