You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by te...@apache.org on 2011/12/25 03:49:48 UTC

svn commit: r1223042 [2/3] - in /hbase/trunk/src: main/java/org/apache/hadoop/hbase/ main/java/org/apache/hadoop/hbase/io/ main/java/org/apache/hadoop/hbase/io/encoding/ main/java/org/apache/hadoop/hbase/io/hfile/ main/java/org/apache/hadoop/hbase/mapr...

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java?rev=1223042&r1=1223041&r2=1223042&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java Sun Dec 25 02:49:46 2011
@@ -30,8 +30,6 @@ import org.apache.commons.logging.LogFac
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.io.encoding.DataBlockEncoder;
-import org.apache.hadoop.hbase.io.encoding.DataBlockEncodings;
 import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory;
 import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -82,20 +80,17 @@ public class HFileReaderV2 extends Abstr
    * @param size Length of the stream.
    * @param closeIStream Whether to close the stream.
    * @param cacheConf Cache configuration.
-   * @param blockEncoder what kind of data block encoding will be used
    * @throws IOException
    */
   public HFileReaderV2(Path path, FixedFileTrailer trailer,
       final FSDataInputStream fsdis, final long size,
-      final boolean closeIStream, final CacheConfig cacheConf,
-      final HFileDataBlockEncoder blockEncoder)
+      final boolean closeIStream, final CacheConfig cacheConf)
   throws IOException {
-    super(path, trailer, fsdis, size, closeIStream, cacheConf,
-        blockEncoder);
+    super(path, trailer, fsdis, size, closeIStream, cacheConf);
+
     trailer.expectVersion(2);
-    HFileBlock.FSReaderV2 fsBlockReader = new HFileBlock.FSReaderV2(fsdis,
-        compressAlgo, fileSize, blockEncoder);
-    this.fsBlockReader = fsBlockReader;
+    fsBlockReader = new HFileBlock.FSReaderV2(fsdis, compressAlgo,
+        fileSize);
 
     // Comparator class name is stored in the trailer in version 2.
     comparator = trailer.createComparator();
@@ -128,10 +123,8 @@ public class HFileReaderV2 extends Abstr
     avgKeyLen = Bytes.toInt(fileInfo.get(FileInfo.AVG_KEY_LEN));
     avgValueLen = Bytes.toInt(fileInfo.get(FileInfo.AVG_VALUE_LEN));
     byte [] keyValueFormatVersion = fileInfo.get(HFileWriterV2.KEY_VALUE_VERSION);
-    includesMemstoreTS = keyValueFormatVersion != null &&
-        Bytes.toInt(keyValueFormatVersion) ==
-            HFileWriterV2.KEY_VALUE_VER_WITH_MEMSTORE;
-    fsBlockReader.setIncludesMemstoreTS(includesMemstoreTS);
+    includesMemstoreTS = (keyValueFormatVersion != null &&
+        Bytes.toInt(keyValueFormatVersion) == HFileWriterV2.KEY_VALUE_VER_WITH_MEMSTORE);
 
     // Store all other load-on-open blocks for further consumption.
     HFileBlock b;
@@ -152,15 +145,9 @@ public class HFileReaderV2 extends Abstr
    * @param isCompaction is scanner being used for a compaction?
    * @return Scanner on this file.
    */
-   @Override
-   public HFileScanner getScanner(boolean cacheBlocks, final boolean pread,
+  @Override
+  public HFileScanner getScanner(boolean cacheBlocks, final boolean pread,
       final boolean isCompaction) {
-    // check if we want to use data block encoding in memory
-    if (blockEncoder.useEncodedScanner(isCompaction)) {
-      return new EncodedScannerV2(this, cacheBlocks, pread, isCompaction,
-          includesMemstoreTS);
-    }
-
     return new ScannerV2(this, cacheBlocks, pread, isCompaction);
   }
 
@@ -271,8 +258,6 @@ public class HFileReaderV2 extends Abstr
       if (cacheConf.isBlockCacheEnabled()) {
         HFileBlock cachedBlock =
           (HFileBlock) cacheConf.getBlockCache().getBlock(cacheKey, cacheBlock);
-        cachedBlock = blockEncoder.afterBlockCache(cachedBlock,
-            isCompaction, shouldIncludeMemstoreTS());
         if (cachedBlock != null) {
           BlockCategory blockCategory =
               cachedBlock.getBlockType().getCategory();
@@ -280,9 +265,8 @@ public class HFileReaderV2 extends Abstr
 
           getSchemaMetrics().updateOnCacheHit(blockCategory, isCompaction);
 
-          if (cachedBlock.getBlockType() == BlockType.DATA) {
+          if (cachedBlock.getBlockType() == BlockType.DATA)
             HFile.dataBlockReadCnt.incrementAndGet();
-          }
           return cachedBlock;
         }
         // Carry on, please load.
@@ -308,8 +292,6 @@ public class HFileReaderV2 extends Abstr
       // Cache the block
       if (cacheBlock && cacheConf.shouldCacheBlockOnRead(
               hfileBlock.getBlockType().getCategory())) {
-        hfileBlock = blockEncoder.beforeBlockCache(
-            hfileBlock, includesMemstoreTS);
         cacheConf.getBlockCache().cacheBlock(cacheKey, hfileBlock,
             cacheConf.isInMemory());
       }
@@ -318,8 +300,7 @@ public class HFileReaderV2 extends Abstr
         HFile.dataBlockReadCnt.incrementAndGet();
       }
 
-      return blockEncoder.afterReadFromDiskAndPuttingInCache(hfileBlock,
-          isCompaction, includesMemstoreTS);
+      return hfileBlock;
     } finally {
       offsetLock.releaseLockEntry(lockEntry);
     }
@@ -364,154 +345,25 @@ public class HFileReaderV2 extends Abstr
     }
   }
 
-  protected abstract static class AbstractScannerV2
-      extends AbstractHFileReader.Scanner {
-    protected HFileBlock block;
-
-    public AbstractScannerV2(HFileReaderV2 r, boolean cacheBlocks,
-        final boolean pread, final boolean isCompaction) {
-      super(r, cacheBlocks, pread, isCompaction);
-    }
-
-    /**
-     * An internal API function. Seek to the given key, optionally rewinding to
-     * the first key of the block before doing the seek.
-     *
-     * @param key key byte array
-     * @param offset key offset in the key byte array
-     * @param length key length
-     * @param rewind whether to rewind to the first key of the block before
-     *        doing the seek. If this is false, we are assuming we never go
-     *        back, otherwise the result is undefined.
-     * @return -1 if the key is earlier than the first key of the file,
-     *         0 if we are at the given key, and 1 if we are past the given key
-     * @throws IOException
-     */
-    protected int seekTo(byte[] key, int offset, int length, boolean rewind)
-        throws IOException {
-      HFileBlockIndex.BlockIndexReader indexReader =
-          reader.getDataBlockIndexReader();
-      HFileBlock seekToBlock = indexReader.seekToDataBlock(key, offset, length,
-          block, cacheBlocks, pread, isCompaction);
-      if (seekToBlock == null) {
-        // This happens if the key e.g. falls before the beginning of the file.
-        return -1;
-      }
-      return loadBlockAndSeekToKey(seekToBlock, rewind, key, offset, length,
-          false);
-    }
-
-    protected abstract ByteBuffer getFirstKeyInBlock(HFileBlock curBlock);
-
-    protected abstract int loadBlockAndSeekToKey(HFileBlock seekToBlock,
-        boolean rewind, byte[] key, int offset, int length, boolean seekBefore)
-        throws IOException;
-
-    @Override
-    public int seekTo(byte[] key, int offset, int length) throws IOException {
-      // Always rewind to the first key of the block, because the given key
-      // might be before or after the current key.
-      return seekTo(key, offset, length, true);
-    }
-
-    @Override
-    public int reseekTo(byte[] key, int offset, int length) throws IOException {
-      if (isSeeked()) {
-        ByteBuffer bb = getKey();
-        int compared = reader.getComparator().compare(key, offset,
-            length, bb.array(), bb.arrayOffset(), bb.limit());
-        if (compared < 1) {
-          // If the required key is less than or equal to current key, then
-          // don't do anything.
-          return compared;
-        }
-      }
-
-      // Don't rewind on a reseek operation, because reseek implies that we are
-      // always going forward in the file.
-      return seekTo(key, offset, length, false);
-    }
-
-    @Override
-    public boolean seekBefore(byte[] key, int offset, int length)
-        throws IOException {
-      HFileBlock seekToBlock =
-          reader.getDataBlockIndexReader().seekToDataBlock(key, offset, length,
-              block, cacheBlocks, pread, isCompaction);
-      if (seekToBlock == null) {
-        return false;
-      }
-      ByteBuffer firstKey = getFirstKeyInBlock(seekToBlock);
-      if (reader.getComparator().compare(firstKey.array(),
-          firstKey.arrayOffset(), firstKey.limit(), key, offset, length) == 0)
-      {
-        long previousBlockOffset = seekToBlock.getPrevBlockOffset();
-        // The key we are interested in
-        if (previousBlockOffset == -1) {
-          // we have a 'problem', the key we want is the first of the file.
-          return false;
-        }
-
-        // It is important that we compute and pass onDiskSize to the block
-        // reader so that it does not have to read the header separately to
-        // figure out the size.
-        seekToBlock = reader.readBlock(previousBlockOffset,
-            seekToBlock.getOffset() - previousBlockOffset, cacheBlocks,
-            pread, isCompaction);
-
-        // TODO shortcut: seek forward in this block to the last key of the
-        // block.
-      }
-      loadBlockAndSeekToKey(seekToBlock, true, key, offset, length, true);
-      return true;
-    }
-
-
-    /**
-     * Scans blocks in the "scanned" section of the {@link HFile} until the next
-     * data block is found.
-     *
-     * @return the next block, or null if there are no more data blocks
-     * @throws IOException
-     */
-    protected HFileBlock readNextDataBlock() throws IOException {
-      long lastDataBlockOffset = reader.getTrailer().getLastDataBlockOffset();
-      if (block == null)
-        return null;
-
-      HFileBlock curBlock = block;
-
-      do {
-        if (curBlock.getOffset() >= lastDataBlockOffset)
-          return null;
-
-        if (curBlock.getOffset() < 0) {
-          throw new IOException("Invalid block file offset: " + block);
-        }
-        curBlock = reader.readBlock(curBlock.getOffset()
-            + curBlock.getOnDiskSizeWithHeader(),
-            curBlock.getNextBlockOnDiskSizeWithHeader(), cacheBlocks, pread,
-            isCompaction);
-      } while (!(curBlock.getBlockType().equals(BlockType.DATA) ||
-          curBlock.getBlockType().equals(BlockType.ENCODED_DATA)));
-
-      return curBlock;
-    }
-  }
-
   /**
    * Implementation of {@link HFileScanner} interface.
    */
-  protected static class ScannerV2 extends AbstractScannerV2 {
+  protected static class ScannerV2 extends AbstractHFileReader.Scanner {
+    private HFileBlock block;
     private HFileReaderV2 reader;
 
     public ScannerV2(HFileReaderV2 r, boolean cacheBlocks,
         final boolean pread, final boolean isCompaction) {
-      super(r, cacheBlocks, pread, isCompaction);
+      super(cacheBlocks, pread, isCompaction);
       this.reader = r;
     }
 
     @Override
+    public HFileReaderV2 getReader() {
+      return reader;
+    }
+
+    @Override
     public KeyValue getKeyValue() {
       if (!isSeeked())
         return null;
@@ -600,6 +452,36 @@ public class HFileReaderV2 extends Abstr
     }
 
     /**
+     * Scans blocks in the "scanned" section of the {@link HFile} until the next
+     * data block is found.
+     *
+     * @return the next block, or null if there are no more data blocks
+     * @throws IOException
+     */
+    private HFileBlock readNextDataBlock() throws IOException {
+      long lastDataBlockOffset = reader.getTrailer().getLastDataBlockOffset();
+      if (block == null)
+        return null;
+
+      HFileBlock curBlock = block;
+
+      do {
+        if (curBlock.getOffset() >= lastDataBlockOffset)
+          return null;
+
+        if (curBlock.getOffset() < 0) {
+          throw new IOException("Invalid block file offset: " + block);
+        }
+        curBlock = reader.readBlock(curBlock.getOffset()
+            + curBlock.getOnDiskSizeWithHeader(),
+            curBlock.getNextBlockOnDiskSizeWithHeader(), cacheBlocks, pread,
+            isCompaction);
+      } while (!curBlock.getBlockType().equals(BlockType.DATA));
+
+      return curBlock;
+    }
+
+    /**
      * Positions this scanner at the start of the file.
      *
      * @return false if empty file; i.e. a call to next would return false and
@@ -635,7 +517,70 @@ public class HFileReaderV2 extends Abstr
     }
 
     @Override
-    protected int loadBlockAndSeekToKey(HFileBlock seekToBlock, boolean rewind,
+    public int seekTo(byte[] key) throws IOException {
+      return seekTo(key, 0, key.length);
+    }
+
+    /**
+     * An internal API function. Seek to the given key, optionally rewinding to
+     * the first key of the block before doing the seek.
+     *
+     * @param key key byte array
+     * @param offset key offset in the key byte array
+     * @param length key length
+     * @param rewind whether to rewind to the first key of the block before
+     *        doing the seek. If this is false, we are assuming we never go
+     *        back, otherwise the result is undefined.
+     * @return -1 if the key is earlier than the first key of the file,
+     *         0 if we are at the given key, and 1 if we are past the given key
+     * @throws IOException
+     */
+    private int seekTo(byte[] key, int offset, int length, boolean rewind)
+        throws IOException {
+      HFileBlockIndex.BlockIndexReader indexReader =
+          reader.getDataBlockIndexReader();
+      HFileBlock seekToBlock = indexReader.seekToDataBlock(key, offset, length,
+          block, cacheBlocks, pread, isCompaction);
+
+      if (seekToBlock == null) {
+        // This happens if the key e.g. falls before the beginning of the file.
+        return -1;
+      }
+      return loadBlockAndSeekToKey(seekToBlock, rewind, key, offset, length,
+          false);
+    }
+
+    @Override
+    public int seekTo(byte[] key, int offset, int length) throws IOException {
+      // Always rewind to the first key of the block, because the given key
+      // might be before or after the current key.
+      return seekTo(key, offset, length, true);
+    }
+
+    @Override
+    public int reseekTo(byte[] key) throws IOException {
+      return reseekTo(key, 0, key.length);
+    }
+
+    @Override
+    public int reseekTo(byte[] key, int offset, int length) throws IOException {
+      if (isSeeked()) {
+        ByteBuffer bb = getKey();
+        int compared = reader.getComparator().compare(key, offset,
+            length, bb.array(), bb.arrayOffset(), bb.limit());
+        if (compared < 1) {
+          // If the required key is less than or equal to current key, then
+          // don't do anything.
+          return compared;
+        }
+      }
+
+      // Don't rewind on a reseek operation, because reseek implies that we are
+      // always going forward in the file.
+      return seekTo(key, offset, length, false);
+    }
+
+    private int loadBlockAndSeekToKey(HFileBlock seekToBlock, boolean rewind,
         byte[] key, int offset, int length, boolean seekBefore)
         throws IOException {
       if (block == null || block.getOffset() != seekToBlock.getOffset()) {
@@ -654,13 +599,6 @@ public class HFileReaderV2 extends Abstr
      */
     private void updateCurrBlock(HFileBlock newBlock) {
       block = newBlock;
-
-      // sanity check
-      if (block.getBlockType() != BlockType.DATA) {
-        throw new IllegalStateException(
-            "ScannerV2 works only on data blocks");
-      }
-
       blockBuffer = block.getBufferWithoutHeader();
       readKeyValueLen();
       blockFetches++;
@@ -775,7 +713,11 @@ public class HFileReaderV2 extends Abstr
     }
 
     @Override
-    protected ByteBuffer getFirstKeyInBlock(HFileBlock curBlock) {
+    public boolean seekBefore(byte[] key) throws IOException {
+      return seekBefore(key, 0, key.length);
+    }
+
+    private ByteBuffer getFirstKeyInBlock(HFileBlock curBlock) {
       ByteBuffer buffer = curBlock.getBufferWithoutHeader();
       // It is safe to manipulate this buffer because we own the buffer object.
       buffer.rewind();
@@ -788,174 +730,53 @@ public class HFileReaderV2 extends Abstr
     }
 
     @Override
-    public String getKeyString() {
-      return Bytes.toStringBinary(blockBuffer.array(),
-          blockBuffer.arrayOffset() + blockBuffer.position()
-              + KEY_VALUE_LEN_SIZE, currKeyLen);
-    }
-
-    @Override
-    public String getValueString() {
-      return Bytes.toString(blockBuffer.array(), blockBuffer.arrayOffset()
-          + blockBuffer.position() + KEY_VALUE_LEN_SIZE + currKeyLen,
-          currValueLen);
-    }
-  }
-
-  /**
-   * ScannerV2 which operate on encoded data blocks.
-   */
-  protected static class EncodedScannerV2 extends AbstractScannerV2 {
-    private DataBlockEncoder.EncodedSeeker seeker = null;
-    private DataBlockEncoder dataBlockEncoder = null;
-    private final boolean includesMemstoreTS;
-
-    public EncodedScannerV2(HFileReaderV2 reader, boolean cacheBlocks,
-        boolean pread, boolean isCompaction, boolean includesMemstoreTS) {
-      super(reader, cacheBlocks, pread, isCompaction);
-      this.includesMemstoreTS = includesMemstoreTS;
-    }
-
-    private void setDataBlockEncoder(DataBlockEncoder dataBlockEncoder) {
-      this.dataBlockEncoder = dataBlockEncoder;
-      seeker = dataBlockEncoder.createSeeker(reader.getComparator(),
-          includesMemstoreTS);
-    }
-
-    /**
-     * Updates the current block to be the given {@link HFileBlock}. Seeks to
-     * the the first key/value pair.
-     *
-     * @param newBlock the block to make current
-     */
-    private void updateCurrentBlock(HFileBlock newBlock) {
-      block = newBlock;
-
-      // sanity checks
-      if (block.getBlockType() != BlockType.ENCODED_DATA) {
-        throw new IllegalStateException(
-            "EncodedScannerV2 works only on encoded data blocks");
-      }
-
-      short dataBlockEncoderId = block.getDataBlockEncodingId();
-      if (dataBlockEncoder == null
-          || !DataBlockEncodings.isCorrectEncoder(dataBlockEncoder,
-              dataBlockEncoderId)) {
-        DataBlockEncoder encoder =
-            DataBlockEncodings.getDataBlockEncoderFromId(dataBlockEncoderId);
-        setDataBlockEncoder(encoder);
-      }
-
-      seeker.setCurrentBuffer(getEncodedBuffer(newBlock));
-      blockFetches++;
-    }
-
-    private ByteBuffer getEncodedBuffer(HFileBlock newBlock) {
-      ByteBuffer origBlock = newBlock.getBufferReadOnly();
-      ByteBuffer encodedBlock = ByteBuffer.wrap(origBlock.array(),
-          origBlock.arrayOffset() + HFileBlock.HEADER_SIZE +
-          DataBlockEncodings.ID_SIZE,
-          origBlock.limit() - HFileBlock.HEADER_SIZE -
-          DataBlockEncodings.ID_SIZE).slice();
-      return encodedBlock;
-    }
-
-    @Override
-    public boolean seekTo() throws IOException {
-      if (reader == null) {
-        return false;
-      }
-
-      if (reader.getTrailer().getEntryCount() == 0) {
-        // No data blocks.
+    public boolean seekBefore(byte[] key, int offset, int length)
+        throws IOException {
+      HFileBlock seekToBlock =
+          reader.getDataBlockIndexReader().seekToDataBlock(key, offset,
+              length, block, cacheBlocks, pread, isCompaction);
+      if (seekToBlock == null) {
         return false;
       }
-
-      long firstDataBlockOffset =
-          reader.getTrailer().getFirstDataBlockOffset();
-      if (block != null && block.getOffset() == firstDataBlockOffset) {
-        seeker.rewind();
-        return true;
-      }
-
-      block = reader.readBlock(firstDataBlockOffset, -1, cacheBlocks, pread,
-          isCompaction);
-      if (block.getOffset() < 0) {
-        throw new IOException("Invalid block offset: " + block.getOffset());
-      }
-      updateCurrentBlock(block);
-      return true;
-    }
-
-    @Override
-    public boolean next() throws IOException {
-      boolean isValid = seeker.next();
-      if (!isValid) {
-        block = readNextDataBlock();
-        isValid = block != null;
-        if (isValid) {
-          updateCurrentBlock(block);
+      ByteBuffer firstKey = getFirstKeyInBlock(seekToBlock);
+      if (reader.getComparator().compare(firstKey.array(),
+          firstKey.arrayOffset(), firstKey.limit(), key, offset, length) == 0)
+      {
+        long previousBlockOffset = seekToBlock.getPrevBlockOffset();
+        // The key we are interested in
+        if (previousBlockOffset == -1) {
+          // we have a 'problem', the key we want is the first of the file.
+          return false;
         }
-      }
-      return isValid;
-    }
 
-    @Override
-    public ByteBuffer getKey() {
-      assertValidSeek();
-      return seeker.getKey();
-    }
-
-    @Override
-    public ByteBuffer getValue() {
-      assertValidSeek();
-      return seeker.getValue();
-    }
+        // It is important that we compute and pass onDiskSize to the block
+        // reader so that it does not have to read the header separately to
+        // figure out the size.
+        seekToBlock = reader.readBlock(previousBlockOffset,
+            seekToBlock.getOffset() - previousBlockOffset, cacheBlocks,
+            pread, isCompaction);
 
-    @Override
-    public KeyValue getKeyValue() {
-      if (block == null) {
-        return null;
+        // TODO shortcut: seek forward in this block to the last key of the
+        // block.
       }
-      return seeker.getKeyValueObject();
+      loadBlockAndSeekToKey(seekToBlock, true, key, offset, length, true);
+      return true;
     }
 
     @Override
     public String getKeyString() {
-      ByteBuffer keyBuffer = getKey();
-      return Bytes.toStringBinary(keyBuffer.array(),
-          keyBuffer.arrayOffset(), keyBuffer.limit());
+      return Bytes.toStringBinary(blockBuffer.array(),
+          blockBuffer.arrayOffset() + blockBuffer.position()
+              + KEY_VALUE_LEN_SIZE, currKeyLen);
     }
 
     @Override
     public String getValueString() {
-      ByteBuffer valueBuffer = getValue();
-      return Bytes.toStringBinary(valueBuffer.array(),
-          valueBuffer.arrayOffset(), valueBuffer.limit());
-    }
-
-    private void assertValidSeek() {
-      if (block == null) {
-        throw new NotSeekedException();
-      }
-    }
-
-    @Override
-    protected ByteBuffer getFirstKeyInBlock(HFileBlock curBlock) {
-      return dataBlockEncoder.getFirstKeyInBlock(getEncodedBuffer(curBlock));
+      return Bytes.toString(blockBuffer.array(), blockBuffer.arrayOffset()
+          + blockBuffer.position() + KEY_VALUE_LEN_SIZE + currKeyLen,
+          currValueLen);
     }
 
-    @Override
-    protected int loadBlockAndSeekToKey(HFileBlock seekToBlock, boolean rewind,
-        byte[] key, int offset, int length, boolean seekBefore)
-        throws IOException  {
-      if (block == null || block.getOffset() != seekToBlock.getOffset()) {
-        updateCurrentBlock(seekToBlock);
-      } else if (rewind) {
-        seeker.rewind();
-      }
-      return seeker.blockSeekTo(key, offset, length, seekBefore);
-    }
   }
 
   /**

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV1.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV1.java?rev=1223042&r1=1223041&r2=1223042&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV1.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV1.java Sun Dec 25 02:49:46 2011
@@ -35,10 +35,8 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValue.KeyComparator;
-import org.apache.hadoop.hbase.io.hfile.Compression.Algorithm;
 import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo;
 import org.apache.hadoop.hbase.io.hfile.HFile.Writer;
-import org.apache.hadoop.hbase.regionserver.MemStore;
 import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
 import org.apache.hadoop.hbase.util.BloomFilterWriter;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -46,7 +44,7 @@ import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.compress.Compressor;
 
 /**
- * Writes version 1 HFiles. Mainly used for testing backwards-compatibility.
+ * Writes version 1 HFiles. Mainly used for testing backwards-compatibilty.
  */
 public class HFileWriterV1 extends AbstractHFileWriter {
 
@@ -93,17 +91,16 @@ public class HFileWriterV1 extends Abstr
 
     @Override
     public Writer createWriter(FileSystem fs, Path path, int blockSize,
-        Algorithm compressAlgo, HFileDataBlockEncoder dataBlockEncoder,
-        KeyComparator comparator)
+        Compression.Algorithm compressAlgo, final KeyComparator comparator)
         throws IOException {
       return new HFileWriterV1(conf, cacheConf, fs, path, blockSize,
-          compressAlgo, dataBlockEncoder, comparator);
+          compressAlgo, comparator);
     }
 
     @Override
     public Writer createWriter(FileSystem fs, Path path, int blockSize,
-        String compressAlgoName, KeyComparator comparator)
-        throws IOException {
+        String compressAlgoName,
+        final KeyComparator comparator) throws IOException {
       return new HFileWriterV1(conf, cacheConf, fs, path, blockSize,
           compressAlgoName, comparator);
     }
@@ -120,8 +117,7 @@ public class HFileWriterV1 extends Abstr
     public Writer createWriter(final FSDataOutputStream ostream,
         final int blockSize, final Compression.Algorithm compress,
         final KeyComparator c) throws IOException {
-      return new HFileWriterV1(cacheConf, ostream, blockSize, compress,
-          new NoOpDataBlockEncoder(), c);
+      return new HFileWriterV1(cacheConf, ostream, blockSize, compress, c);
     }
   }
 
@@ -131,7 +127,7 @@ public class HFileWriterV1 extends Abstr
       throws IOException {
     this(conf, cacheConf, fs, path, HFile.DEFAULT_BLOCKSIZE,
         HFile.DEFAULT_COMPRESSION_ALGORITHM,
-        new NoOpDataBlockEncoder(), null);
+        null);
   }
 
   /**
@@ -142,18 +138,15 @@ public class HFileWriterV1 extends Abstr
       Path path, int blockSize, String compressAlgoName,
       final KeyComparator comparator) throws IOException {
     this(conf, cacheConf, fs, path, blockSize,
-        compressionByName(compressAlgoName), new NoOpDataBlockEncoder(),
-        comparator);
+        compressionByName(compressAlgoName), comparator);
   }
 
   /** Constructor that takes a path, creates and closes the output stream. */
-  public HFileWriterV1(Configuration conf, CacheConfig cacheConf,
-      FileSystem fs, Path path,
-      int blockSize, Compression.Algorithm compress,
-      HFileDataBlockEncoder blockEncoder,
+  public HFileWriterV1(Configuration conf, CacheConfig cacheConf, FileSystem fs,
+      Path path, int blockSize, Compression.Algorithm compress,
       final KeyComparator comparator) throws IOException {
     super(cacheConf, createOutputStream(conf, fs, path), path,
-        blockSize, compress, blockEncoder, comparator);
+        blockSize, compress, comparator);
     SchemaMetrics.configureGlobally(conf);
   }
 
@@ -164,17 +157,15 @@ public class HFileWriterV1 extends Abstr
       throws IOException {
     this(cacheConf, outputStream, blockSize,
         Compression.getCompressionAlgorithmByName(compressAlgoName),
-        new NoOpDataBlockEncoder(), comparator);
+        comparator);
   }
 
   /** Constructor that takes a stream. */
   public HFileWriterV1(CacheConfig cacheConf,
       final FSDataOutputStream outputStream, final int blockSize,
-      final Compression.Algorithm compress,
-      HFileDataBlockEncoder blockEncoder, final KeyComparator comparator)
+      final Compression.Algorithm compress, final KeyComparator comparator)
       throws IOException {
-    super(cacheConf, outputStream, null, blockSize, compress,
-        blockEncoder, comparator);
+    super(cacheConf, outputStream, null, blockSize, compress, comparator);
   }
 
   /**
@@ -211,17 +202,13 @@ public class HFileWriterV1 extends Abstr
 
     if (cacheConf.shouldCacheDataOnWrite()) {
       baosDos.flush();
-      // we do not do dataBlockEncoding on disk HFile V2.
       byte[] bytes = baos.toByteArray();
       HFileBlock cBlock = new HFileBlock(BlockType.DATA,
           (int) (outputStream.getPos() - blockBegin), bytes.length, -1,
-          ByteBuffer.wrap(bytes, 0, bytes.length), HFileBlock.FILL_HEADER,
-          blockBegin, MemStore.NO_PERSISTENT_TS);
-      HFileBlock codedBlock = blockEncoder.beforeBlockCache(cBlock,
-          false);
-      passSchemaMetricsTo(codedBlock);
+          ByteBuffer.wrap(bytes, 0, bytes.length), true, blockBegin);
+      passSchemaMetricsTo(cBlock);
       cacheConf.getBlockCache().cacheBlock(
-          HFile.getBlockCacheKey(name, blockBegin), codedBlock);
+          HFile.getBlockCacheKey(name, blockBegin), cBlock);
       baosDos.close();
     }
     blockNumber++;

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java?rev=1223042&r1=1223041&r2=1223042&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java Sun Dec 25 02:49:46 2011
@@ -49,13 +49,9 @@ public class HFileWriterV2 extends Abstr
   static final Log LOG = LogFactory.getLog(HFileWriterV2.class);
 
   /** Max memstore (mvcc) timestamp in FileInfo */
-  public static final byte [] MAX_MEMSTORE_TS_KEY =
-      Bytes.toBytes("MAX_MEMSTORE_TS_KEY");
-
+  public static final byte [] MAX_MEMSTORE_TS_KEY = Bytes.toBytes("MAX_MEMSTORE_TS_KEY");
   /** KeyValue version in FileInfo */
-  public static final byte [] KEY_VALUE_VERSION =
-      Bytes.toBytes("KEY_VALUE_VERSION");
-
+  public static final byte [] KEY_VALUE_VERSION = Bytes.toBytes("KEY_VALUE_VERSION");
   /** Version for KeyValue which includes memstore timestamp */
   public static final int KEY_VALUE_VER_WITH_MEMSTORE = 1;
 
@@ -96,10 +92,10 @@ public class HFileWriterV2 extends Abstr
 
     @Override
     public Writer createWriter(FileSystem fs, Path path, int blockSize,
-        Compression.Algorithm compress, HFileDataBlockEncoder blockEncoder,
+        Compression.Algorithm compress,
         final KeyComparator comparator) throws IOException {
       return new HFileWriterV2(conf, cacheConf, fs, path, blockSize,
-          compress, blockEncoder, comparator);
+          compress, comparator);
     }
 
     @Override
@@ -132,7 +128,7 @@ public class HFileWriterV2 extends Abstr
       FileSystem fs, Path path)
       throws IOException {
     this(conf, cacheConf, fs, path, HFile.DEFAULT_BLOCKSIZE,
-        HFile.DEFAULT_COMPRESSION_ALGORITHM, null, null);
+        HFile.DEFAULT_COMPRESSION_ALGORITHM, null);
   }
 
   /**
@@ -143,16 +139,15 @@ public class HFileWriterV2 extends Abstr
       Path path, int blockSize, String compressAlgoName,
       final KeyComparator comparator) throws IOException {
     this(conf, cacheConf, fs, path, blockSize,
-        compressionByName(compressAlgoName), null, comparator);
+        compressionByName(compressAlgoName), comparator);
   }
 
   /** Constructor that takes a path, creates and closes the output stream. */
   public HFileWriterV2(Configuration conf, CacheConfig cacheConf, FileSystem fs,
       Path path, int blockSize, Compression.Algorithm compressAlgo,
-      HFileDataBlockEncoder blockEncoder,
       final KeyComparator comparator) throws IOException {
     super(cacheConf, createOutputStream(conf, fs, path), path,
-        blockSize, compressAlgo, blockEncoder, comparator);
+        blockSize, compressAlgo, comparator);
     SchemaMetrics.configureGlobally(conf);
     finishInit(conf);
   }
@@ -172,8 +167,7 @@ public class HFileWriterV2 extends Abstr
       final FSDataOutputStream outputStream, final int blockSize,
       final Compression.Algorithm compress, final KeyComparator comparator)
       throws IOException {
-    super(cacheConf, outputStream, null, blockSize, compress, null,
-        comparator);
+    super(cacheConf, outputStream, null, blockSize, compress, comparator);
     finishInit(conf);
   }
 
@@ -183,8 +177,7 @@ public class HFileWriterV2 extends Abstr
       throw new IllegalStateException("finishInit called twice");
 
     // HFile filesystem-level (non-caching) block writer
-    fsBlockWriter = new HFileBlock.Writer(compressAlgo, blockEncoder,
-        includeMemstoreTS);
+    fsBlockWriter = new HFileBlock.Writer(compressAlgo);
 
     // Data block index writer
     boolean cacheIndexesOnWrite = cacheConf.shouldCacheIndexesOnWrite();
@@ -232,9 +225,8 @@ public class HFileWriterV2 extends Abstr
     long startTimeNs = System.nanoTime();
 
     // Update the first data block offset for scanning.
-    if (firstDataBlockOffset == -1) {
+    if (firstDataBlockOffset == -1)
       firstDataBlockOffset = outputStream.getPos();
-    }
 
     // Update the last data block offset
     lastDataBlockOffset = outputStream.getPos();
@@ -250,12 +242,10 @@ public class HFileWriterV2 extends Abstr
     HFile.writeOps.incrementAndGet();
 
     if (cacheConf.shouldCacheDataOnWrite()) {
-      HFileBlock cBlock = fsBlockWriter.getBlockForCaching();
-      HFileBlock codedBlock = blockEncoder.beforeBlockCache(cBlock,
-          includeMemstoreTS);
-      passSchemaMetricsTo(codedBlock);
+      HFileBlock blockForCaching = fsBlockWriter.getBlockForCaching();
+      passSchemaMetricsTo(blockForCaching);
       cacheConf.getBlockCache().cacheBlock(
-          HFile.getBlockCacheKey(name, lastDataBlockOffset), codedBlock);
+          HFile.getBlockCacheKey(name, lastDataBlockOffset), blockForCaching);
     }
   }
 
@@ -266,7 +256,7 @@ public class HFileWriterV2 extends Abstr
         long offset = outputStream.getPos();
         boolean cacheThisBlock = ibw.cacheOnWrite();
         ibw.writeInlineBlock(fsBlockWriter.startWriting(
-            ibw.getInlineBlockType()));
+            ibw.getInlineBlockType(), cacheThisBlock));
         fsBlockWriter.writeHeaderAndData(outputStream);
         ibw.blockWritten(offset, fsBlockWriter.getOnDiskSizeWithHeader(),
             fsBlockWriter.getUncompressedSizeWithoutHeader());
@@ -275,11 +265,9 @@ public class HFileWriterV2 extends Abstr
         if (cacheThisBlock) {
           // Cache this block on write.
           HFileBlock cBlock = fsBlockWriter.getBlockForCaching();
-          HFileBlock codedBlock = blockEncoder.beforeBlockCache(cBlock,
-              includeMemstoreTS);
-          passSchemaMetricsTo(codedBlock);
+          passSchemaMetricsTo(cBlock);
           cacheConf.getBlockCache().cacheBlock(
-              HFile.getBlockCacheKey(name, offset), codedBlock);
+              HFile.getBlockCacheKey(name, offset), cBlock);
         }
       }
     }
@@ -292,7 +280,8 @@ public class HFileWriterV2 extends Abstr
    */
   private void newBlock() throws IOException {
     // This is where the next block begins.
-    fsBlockWriter.startWriting(BlockType.DATA);
+    fsBlockWriter.startWriting(BlockType.DATA,
+        cacheConf.shouldCacheDataOnWrite());
     firstKeyInBlock = null;
   }
 
@@ -424,7 +413,8 @@ public class HFileWriterV2 extends Abstr
         // store the beginning offset
         long offset = outputStream.getPos();
         // write the metadata content
-        DataOutputStream dos = fsBlockWriter.startWriting(BlockType.META);
+        DataOutputStream dos = fsBlockWriter.startWriting(BlockType.META,
+            cacheConf.shouldCacheDataOnWrite());
         metaData.get(i).write(dos);
 
         fsBlockWriter.writeHeaderAndData(outputStream);
@@ -450,7 +440,7 @@ public class HFileWriterV2 extends Abstr
 
     // Meta block index.
     metaBlockIndexWriter.writeSingleLevelIndex(fsBlockWriter.startWriting(
-        BlockType.ROOT_INDEX), "meta");
+        BlockType.ROOT_INDEX, false), "meta");
     fsBlockWriter.writeHeaderAndData(outputStream);
     totalUncompressedBytes += fsBlockWriter.getUncompressedSizeWithHeader();
 
@@ -460,7 +450,8 @@ public class HFileWriterV2 extends Abstr
     }
 
     // File info
-    writeFileInfo(trailer, fsBlockWriter.startWriting(BlockType.FILE_INFO));
+    writeFileInfo(trailer, fsBlockWriter.startWriting(BlockType.FILE_INFO,
+        false));
     fsBlockWriter.writeHeaderAndData(outputStream);
     totalUncompressedBytes += fsBlockWriter.getUncompressedSizeWithHeader();
 

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java?rev=1223042&r1=1223041&r2=1223042&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java Sun Dec 25 02:49:46 2011
@@ -722,10 +722,4 @@ public class LruBlockCache implements Bl
   public void shutdown() {
     this.scheduleThreadPool.shutdown();
   }
-
-  /** Clears the cache. Used in tests. */
-  public void clearCache() {
-    map.clear();
-  }
-
 }

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java?rev=1223042&r1=1223041&r2=1223042&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java Sun Dec 25 02:49:46 2011
@@ -65,8 +65,6 @@ import org.apache.hadoop.hbase.io.Refere
 import org.apache.hadoop.hbase.io.Reference.Range;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.Compression.Algorithm;
-import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder;
-import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl;
 import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.io.hfile.HFileScanner;
 import org.apache.hadoop.hbase.regionserver.StoreFile;
@@ -530,13 +528,9 @@ public class LoadIncrementalHFiles exten
     CacheConfig cacheConf = new CacheConfig(conf);
     HalfStoreFileReader halfReader = null;
     StoreFile.Writer halfWriter = null;
-    HFileDataBlockEncoder dataBlockEncoder = new HFileDataBlockEncoderImpl(
-        familyDescriptor.getDataBlockEncodingOnDisk(),
-        familyDescriptor.getDataBlockEncodingInCache(),
-        familyDescriptor.useEncodedDataBlockSeek());
     try {
       halfReader = new HalfStoreFileReader(fs, inFile, cacheConf,
-          reference, dataBlockEncoder);
+          reference);
       Map<byte[], byte[]> fileInfo = halfReader.loadFileInfo();
 
       int blocksize = familyDescriptor.getBlocksize();
@@ -544,8 +538,7 @@ public class LoadIncrementalHFiles exten
       BloomType bloomFilterType = familyDescriptor.getBloomFilterType();
 
       halfWriter = new StoreFile.Writer(
-          fs, outFile, blocksize, compression, dataBlockEncoder,
-          conf, cacheConf,
+          fs, outFile, blocksize, compression, conf, cacheConf,
           KeyValue.COMPARATOR, bloomFilterType, 0);
       HFileScanner scanner = halfReader.getScanner(false, false, false);
       scanner.seekTo();
@@ -645,6 +638,7 @@ public class LoadIncrementalHFiles exten
       Path[] hfiles = FileUtil.stat2Paths(fs.listStatus(familyDir));
       for (Path hfile : hfiles) {
         if (hfile.getName().startsWith("_")) continue;
+        
         HFile.Reader reader = HFile.createReader(fs, hfile,
             new CacheConfig(getConf()));
         final byte[] first, last;

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java?rev=1223042&r1=1223041&r2=1223042&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java Sun Dec 25 02:49:46 2011
@@ -870,10 +870,6 @@ public class MemStore implements HeapSiz
       ClassSize.COPYONWRITE_ARRAYSET + ClassSize.COPYONWRITE_ARRAYLIST +
       (2 * ClassSize.CONCURRENT_SKIPLISTMAP));
 
-  // Constants for whether to serialize memstore timestamp.
-  public static final boolean NO_PERSISTENT_TS = false;
-  public static final boolean PERSISTENT_TS = true;
-
   /*
    * Calculate how the MemStore size has changed.  Includes overhead of the
    * backing Map.

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java?rev=1223042&r1=1223041&r2=1223042&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java Sun Dec 25 02:49:46 2011
@@ -48,8 +48,6 @@ import org.apache.hadoop.hbase.client.Sc
 import org.apache.hadoop.hbase.io.HeapSize;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.Compression;
-import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder;
-import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl;
 import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.io.hfile.HFileScanner;
 import org.apache.hadoop.hbase.io.hfile.InvalidHFileException;
@@ -146,7 +144,6 @@ public class Store extends SchemaConfigu
   private final Compression.Algorithm compression;
   /** Compression algorithm for major compaction */
   private final Compression.Algorithm compactionCompression;
-  private HFileDataBlockEncoder dataBlockEncoder;
 
   // Comparing KeyValues
   final KeyValue.KVComparator comparator;
@@ -184,12 +181,6 @@ public class Store extends SchemaConfigu
     this.compactionCompression =
       (family.getCompactionCompression() != Compression.Algorithm.NONE) ?
         family.getCompactionCompression() : this.compression;
-
-    this.dataBlockEncoder =
-        new HFileDataBlockEncoderImpl(family.getDataBlockEncodingOnDisk(),
-            family.getDataBlockEncodingInCache(),
-            family.useEncodedDataBlockSeek());
-
     this.comparator = info.getComparator();
     // getTimeToLive returns ttl in seconds.  Convert to milliseconds.
     this.ttl = family.getTimeToLive();
@@ -279,21 +270,6 @@ public class Store extends SchemaConfigu
   public Path getHomedir() {
     return homedir;
   }
-  
-  /**
-   * @return the data block encoder
-   */
-  public HFileDataBlockEncoder getDataBlockEncoder() {
-    return dataBlockEncoder;
-  }
-
-  /**
-   * Should be used only in tests.
-   * @param blockEncoder the block delta encoder to use
-   */
-  public void setDataBlockEncoderInTest(HFileDataBlockEncoder blockEncoder) {
-    this.dataBlockEncoder = blockEncoder;
-  }
 
   /*
    * Creates an unsorted list of StoreFile loaded from the given directory.
@@ -316,9 +292,8 @@ public class Store extends SchemaConfigu
         continue;
       }
       StoreFile curfile = new StoreFile(fs, p, this.conf, this.cacheConf,
-          this.family.getBloomFilterType(), this.dataBlockEncoder);
+          this.family.getBloomFilterType());
       passSchemaMetricsTo(curfile);
-
       curfile.createReader();
       long length = curfile.getReader().length();
       this.storeSize += length;
@@ -472,9 +447,8 @@ public class Store extends SchemaConfigu
     StoreFile.rename(fs, srcPath, dstPath);
 
     StoreFile sf = new StoreFile(fs, dstPath, this.conf, this.cacheConf,
-        this.family.getBloomFilterType(), this.dataBlockEncoder);
+        this.family.getBloomFilterType());
     passSchemaMetricsTo(sf);
-
     sf.createReader();
 
     LOG.info("Moved hfile " + srcPath + " into store directory " +
@@ -581,6 +555,7 @@ public class Store extends SchemaConfigu
       MonitoredTask status)
       throws IOException {
     StoreFile.Writer writer;
+    String fileName;
     // Find the smallest read point across all the Scanners.
     long smallestReadPoint = region.getSmallestReadPoint();
     long flushed = 0;
@@ -676,9 +651,8 @@ public class Store extends SchemaConfigu
 
     status.setStatus("Flushing " + this + ": reopening flushed file");
     StoreFile sf = new StoreFile(this.fs, dstPath, this.conf, this.cacheConf,
-        this.family.getBloomFilterType(), this.dataBlockEncoder);
+        this.family.getBloomFilterType());
     passSchemaMetricsTo(sf);
-
     StoreFile.Reader r = sf.createReader();
     this.storeSize += r.length();
     this.totalUncompressedBytes += r.getTotalUncompressedBytes();
@@ -716,7 +690,7 @@ public class Store extends SchemaConfigu
     Compression.Algorithm compression)
   throws IOException {
     StoreFile.Writer w = StoreFile.createWriter(fs, region.getTmpDir(),
-        blocksize, compression, dataBlockEncoder, comparator, conf, cacheConf,
+        blocksize, compression, comparator, conf, cacheConf,
         family.getBloomFilterType(), maxKeyCount);
     // The store file writer's path does not include the CF name, so we need
     // to configure the HFile writer directly.
@@ -1442,7 +1416,7 @@ public class Store extends SchemaConfigu
     StoreFile storeFile = null;
     try {
       storeFile = new StoreFile(this.fs, path, this.conf,
-          this.cacheConf, this.family.getBloomFilterType(), null);
+          this.cacheConf, this.family.getBloomFilterType());
       passSchemaMetricsTo(storeFile);
       storeFile.createReader();
     } catch (IOException e) {
@@ -1494,7 +1468,7 @@ public class Store extends SchemaConfigu
             " to " + destPath);
       }
       result = new StoreFile(this.fs, destPath, this.conf, this.cacheConf,
-          this.family.getBloomFilterType(), this.dataBlockEncoder);
+          this.family.getBloomFilterType());
       passSchemaMetricsTo(result);
       result.createReader();
     }
@@ -2082,8 +2056,8 @@ public class Store extends SchemaConfigu
   }
 
   public static final long FIXED_OVERHEAD = 
-      ClassSize.align(SchemaConfigured.SCHEMA_CONFIGURED_UNALIGNED_HEAP_SIZE +
-          + (19 * ClassSize.REFERENCE) + (7 * Bytes.SIZEOF_LONG)
+      ClassSize.align(new SchemaConfigured().heapSize()
+          + (18 * ClassSize.REFERENCE) + (7 * Bytes.SIZEOF_LONG)
           + (5 * Bytes.SIZEOF_INT) + Bytes.SIZEOF_BOOLEAN);
 
   public static final long DEEP_OVERHEAD = ClassSize.align(FIXED_OVERHEAD

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java?rev=1223042&r1=1223041&r2=1223042&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java Sun Dec 25 02:49:46 2011
@@ -56,8 +56,6 @@ import org.apache.hadoop.hbase.io.hfile.
 import org.apache.hadoop.hbase.io.hfile.HFileWriterV2;
 import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
 import org.apache.hadoop.hbase.regionserver.metrics.SchemaConfigured;
-import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder;
-import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder;
 import org.apache.hadoop.hbase.util.BloomFilter;
 import org.apache.hadoop.hbase.util.BloomFilterFactory;
 import org.apache.hadoop.hbase.util.BloomFilterWriter;
@@ -132,10 +130,6 @@ public class StoreFile extends SchemaCon
   /** Key for timestamp of earliest-put in metadata*/
   public static final byte[] EARLIEST_PUT_TS = Bytes.toBytes("EARLIEST_PUT_TS");
 
-  /** Type of encoding used for data blocks in HFile. Stored in file info. */
-  public static final byte[] DATA_BLOCK_ENCODING =
-      Bytes.toBytes("DATA_BLOCK_ENCODING");
-
   // Make default block size for StoreFiles 8k while testing.  TODO: FIX!
   // Need to make it 8k for testing.
   public static final int DEFAULT_BLOCKSIZE_SMALL = 8 * 1024;
@@ -154,10 +148,7 @@ public class StoreFile extends SchemaCon
   // Block cache configuration and reference.
   private final CacheConfig cacheConf;
 
-  // What kind of data block encoding will be used
-  private HFileDataBlockEncoder dataBlockEncoder;
-
-  // HDFS blocks distribution information
+  // HDFS blocks distribuion information
   private HDFSBlocksDistribution hdfsBlocksDistribution;
 
   // Keys for metadata stored in backing HFile.
@@ -216,23 +207,6 @@ public class StoreFile extends SchemaCon
   private long modificationTimeStamp = 0L;
 
   /**
-   * Ignore bloom filters, don't use option inMemory
-   * and dataBlockEncoding in memory.
-   * @param fs The current file system to use
-   * @param p The path of the file.
-   * @param conf The current configuration.
-   * @throws IOException When opening the reader fails.
-   */
-  StoreFile(final FileSystem fs,
-            final Path p,
-            final Configuration conf,
-            final CacheConfig cacheConf)
-      throws IOException {
-    this(fs, p, conf, cacheConf, BloomType.NONE,
-        new NoOpDataBlockEncoder());
-  }
-
-  /**
    * Constructor, loads a reader and it's indices, etc. May allocate a
    * substantial amount of ram depending on the underlying files (10-20MB?).
    *
@@ -246,20 +220,17 @@ public class StoreFile extends SchemaCon
    *          as the Bloom filter type actually present in the HFile, because
    *          column family configuration might change. If this is
    *          {@link BloomType#NONE}, the existing Bloom filter is ignored.
-   * @param dataBlockEncoder data block encoding algorithm.
    * @throws IOException When opening the reader fails.
    */
   StoreFile(final FileSystem fs,
             final Path p,
             final Configuration conf,
             final CacheConfig cacheConf,
-            final BloomType cfBloomType,
-            final HFileDataBlockEncoder dataBlockEncoder)
+            final BloomType cfBloomType)
       throws IOException {
     this.fs = fs;
     this.path = p;
     this.cacheConf = cacheConf;
-    this.dataBlockEncoder = dataBlockEncoder;
     if (isReference(p)) {
       this.reference = Reference.read(fs, p);
       this.referencePath = getReferredToFile(this.path);
@@ -522,10 +493,9 @@ public class StoreFile extends SchemaCon
     }
     if (isReference()) {
       this.reader = new HalfStoreFileReader(this.fs, this.referencePath,
-          this.cacheConf, this.reference, this.dataBlockEncoder);
+          this.cacheConf, this.reference);
     } else {
-      this.reader = new Reader(this.fs, this.path, this.cacheConf,
-          this.dataBlockEncoder);
+      this.reader = new Reader(this.fs, this.path, this.cacheConf);
     }
 
     if (isSchemaConfigured()) {
@@ -707,8 +677,8 @@ public class StoreFile extends SchemaCon
   public static Writer createWriter(final FileSystem fs, final Path dir,
       final int blocksize, Configuration conf, CacheConfig cacheConf)
   throws IOException {
-    return createWriter(fs, dir, blocksize, null, new NoOpDataBlockEncoder(),
-        null, conf, cacheConf, BloomType.NONE, 0);
+    return createWriter(fs, dir, blocksize, null, null, conf, cacheConf,
+        BloomType.NONE, 0);
   }
 
   /**
@@ -719,7 +689,6 @@ public class StoreFile extends SchemaCon
    * Creates a file with a unique name in this directory.
    * @param blocksize
    * @param algorithm Pass null to get default.
-   * @param dataBlockEncoder Pass null to disable data block encoding.
    * @param c Pass null to get default.
    * @param conf HBase system configuration. used with bloom filters
    * @param cacheConf Cache configuration and reference.
@@ -732,7 +701,6 @@ public class StoreFile extends SchemaCon
                                               final Path dir,
                                               final int blocksize,
                                               final Compression.Algorithm algorithm,
-                                              final HFileDataBlockEncoder dataBlockEncoder,
                                               final KeyValue.KVComparator c,
                                               final Configuration conf,
                                               final CacheConfig cacheConf,
@@ -750,7 +718,7 @@ public class StoreFile extends SchemaCon
 
     return new Writer(fs, path, blocksize,
         algorithm == null? HFile.DEFAULT_COMPRESSION_ALGORITHM: algorithm,
-        dataBlockEncoder, conf, cacheConf, c == null ? KeyValue.COMPARATOR : c, bloomType,
+        conf, cacheConf, c == null ? KeyValue.COMPARATOR: c, bloomType,
         maxKeyCount);
   }
 
@@ -846,8 +814,6 @@ public class StoreFile extends SchemaCon
     private KeyValue lastDeleteFamilyKV = null;
     private long deleteFamilyCnt = 0;
 
-    protected HFileDataBlockEncoder dataBlockEncoder;
-
     TimeRangeTracker timeRangeTracker = new TimeRangeTracker();
     /* isTimeRangeTrackerSet keeps track if the timeRange has already been set
      * When flushing a memstore, we set TimeRange and use this variable to
@@ -872,16 +838,13 @@ public class StoreFile extends SchemaCon
      * @throws IOException problem writing to FS
      */
     public Writer(FileSystem fs, Path path, int blocksize,
-        Compression.Algorithm compress,
-        HFileDataBlockEncoder dataBlockEncoder, final Configuration conf,
+        Compression.Algorithm compress, final Configuration conf,
         CacheConfig cacheConf,
         final KVComparator comparator, BloomType bloomType, long maxKeys)
         throws IOException {
-      this.dataBlockEncoder = dataBlockEncoder != null ?
-          dataBlockEncoder : new NoOpDataBlockEncoder();
       writer = HFile.getWriterFactory(conf, cacheConf).createWriter(
           fs, path, blocksize,
-          compress, this.dataBlockEncoder, comparator.getRawComparator());
+          compress, comparator.getRawComparator());
 
       this.kvComparator = comparator;
 
@@ -1118,10 +1081,6 @@ public class StoreFile extends SchemaCon
     }
 
     public void close() throws IOException {
-      // (optional) Add data block encoding used to save this file
-      // It is mostly for statistics and debugging purpose.
-      dataBlockEncoder.saveMetadata(this);
-
       boolean hasGeneralBloom = this.closeGeneralBloomFilter();
       boolean hasDeleteFamilyBloom = this.closeDeleteFamilyBloomFilter();
 
@@ -1160,12 +1119,10 @@ public class StoreFile extends SchemaCon
     private byte[] lastBloomKey;
     private long deleteFamilyCnt = -1;
 
-    public Reader(FileSystem fs, Path path, CacheConfig cacheConf,
-        HFileDataBlockEncoder dataBlockEncoder)
+    public Reader(FileSystem fs, Path path, CacheConfig cacheConf)
         throws IOException {
       super(path);
-      reader = HFile.createReader(fs, path, cacheConf,
-          dataBlockEncoder);
+      reader = HFile.createReader(fs, path, cacheConf);
       bloomFilterType = BloomType.NONE;
     }
 
@@ -1305,7 +1262,7 @@ public class StoreFile extends SchemaCon
 
         default:
           return true;
-      }
+      }      
     }
 
     public boolean passesDeleteFamilyBloomFilter(byte[] row, int rowOffset,
@@ -1355,7 +1312,7 @@ public class StoreFile extends SchemaCon
         return true;
 
       byte[] key;
-      switch (bloomFilterType) {
+      switch (bloomFilterType) { 
         case ROW:
           if (col != null) {
             throw new RuntimeException("Row-only Bloom filter called with " +

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java?rev=1223042&r1=1223041&r2=1223042&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java Sun Dec 25 02:49:46 2011
@@ -177,7 +177,7 @@ class StoreFileScanner implements KeyVal
         realSeekDone = true;
       }
     } catch (IOException ioe) {
-      throw new IOException("Could not seek " + this + " " + key, ioe);
+      throw new IOException("Could not seek " + this, ioe);
     }
   }
 

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/SchemaConfigured.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/SchemaConfigured.java?rev=1223042&r1=1223041&r2=1223042&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/SchemaConfigured.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/SchemaConfigured.java Sun Dec 25 02:49:46 2011
@@ -83,15 +83,6 @@ public class SchemaConfigured implements
   }
 
   /**
-   * Creates an instance corresponding to an unknown table and column family.
-   * Used in unit tests. 
-   */
-  public static SchemaConfigured createUnknown() {
-    return new SchemaConfigured(null, SchemaMetrics.UNKNOWN,
-        SchemaMetrics.UNKNOWN);
-  }
-
-  /**
    * Default constructor. Only use when column/family name are not known at
    * construction (i.e. for HFile blocks).
    */

Modified: hbase/trunk/src/main/ruby/hbase/admin.rb
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/ruby/hbase/admin.rb?rev=1223042&r1=1223041&r2=1223042&view=diff
==============================================================================
--- hbase/trunk/src/main/ruby/hbase/admin.rb (original)
+++ hbase/trunk/src/main/ruby/hbase/admin.rb Sun Dec 25 02:49:46 2011
@@ -532,9 +532,6 @@ module Hbase
       family.setInMemory(JBoolean.valueOf(arg[org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY])) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY)
       family.setTimeToLive(JInteger.valueOf(arg[org.apache.hadoop.hbase.HColumnDescriptor::TTL])) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::TTL)
       family.setCompressionType(org.apache.hadoop.hbase.io.hfile.Compression::Algorithm.valueOf(arg[org.apache.hadoop.hbase.HColumnDescriptor::COMPRESSION])) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::COMPRESSION)
-      family.setDataBlockEncodingOnDisk(org.apache.hadoop.hbase.io.encoding.DataBlockEncodingAlgorithms::Algorithm.valueOf(arg[org.apache.hadoop.hbase.HColumnDescriptor::DATA_BLOCK_ENCODING_ON_DISK])) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::DATA_BLOCK_ENCODING_ON_DISK)
-      family.setDataBlockEncodingInCache(org.apache.hadoop.hbase.io.encoding.DataBlockEncodingAlgorithms::Algorithm.valueOf(arg[org.apache.hadoop.hbase.HColumnDescriptor::DATA_BLOCK_ENCODING_IN_CACHE])) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::DATA_BLOCK_ENCODING_IN_CACHE)
-      family.setEncodedDataBlockSeek(JBoolean.valueOf(arg[org.apache.hadoop.hbase.HColumnDescriptor::ENCODED_DATA_BLOCK_SEEK])) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::ENCODED_DATA_BLOCK_SEEK)
       family.setBlocksize(JInteger.valueOf(arg[org.apache.hadoop.hbase.HColumnDescriptor::BLOCKSIZE])) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::BLOCKSIZE)
       family.setMaxVersions(JInteger.valueOf(arg[org.apache.hadoop.hbase.HColumnDescriptor::VERSIONS])) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::VERSIONS)
       family.setMinVersions(JInteger.valueOf(arg[org.apache.hadoop.hbase.HColumnDescriptor::MIN_VERSIONS])) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::MIN_VERSIONS)

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java?rev=1223042&r1=1223041&r2=1223042&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java Sun Dec 25 02:49:46 2011
@@ -221,33 +221,18 @@ public abstract class HBaseTestCase exte
       final int minVersions, final int versions, final int ttl, boolean keepDeleted) {
     HTableDescriptor htd = new HTableDescriptor(name);
     htd.addFamily(new HColumnDescriptor(fam1, minVersions, versions,
-      keepDeleted,
-      HColumnDescriptor.DEFAULT_COMPRESSION, 
-      HColumnDescriptor.DEFAULT_DATA_BLOCK_ENCODING_ON_DISK,
-      HColumnDescriptor.DEFAULT_DATA_BLOCK_ENCODING_IN_CACHE,
-      HColumnDescriptor.DEFAULT_ENCODED_DATA_BLOCK_SEEK,
-      false, false,
-      HColumnDescriptor.DEFAULT_BLOCKSIZE, ttl,
-      HColumnDescriptor.DEFAULT_BLOOMFILTER,
-      HConstants.REPLICATION_SCOPE_LOCAL));
+        keepDeleted, HColumnDescriptor.DEFAULT_COMPRESSION, false, false,
+        HColumnDescriptor.DEFAULT_BLOCKSIZE, ttl,
+        HColumnDescriptor.DEFAULT_BLOOMFILTER,
+        HConstants.REPLICATION_SCOPE_LOCAL));
     htd.addFamily(new HColumnDescriptor(fam2, minVersions, versions,
-        keepDeleted,
-        HColumnDescriptor.DEFAULT_COMPRESSION,
-        HColumnDescriptor.DEFAULT_DATA_BLOCK_ENCODING_ON_DISK,
-        HColumnDescriptor.DEFAULT_DATA_BLOCK_ENCODING_IN_CACHE,
-        HColumnDescriptor.DEFAULT_ENCODED_DATA_BLOCK_SEEK,
-        false, false,
+        keepDeleted, HColumnDescriptor.DEFAULT_COMPRESSION, false, false,
         HColumnDescriptor.DEFAULT_BLOCKSIZE, ttl,
         HColumnDescriptor.DEFAULT_BLOOMFILTER,
         HConstants.REPLICATION_SCOPE_LOCAL));
     htd.addFamily(new HColumnDescriptor(fam3, minVersions, versions,
-        keepDeleted,
-        HColumnDescriptor.DEFAULT_COMPRESSION,
-        HColumnDescriptor.DEFAULT_DATA_BLOCK_ENCODING_ON_DISK,
-        HColumnDescriptor.DEFAULT_DATA_BLOCK_ENCODING_IN_CACHE,
-        HColumnDescriptor.DEFAULT_ENCODED_DATA_BLOCK_SEEK,
-        false, false,
-        HColumnDescriptor.DEFAULT_BLOCKSIZE,  ttl,
+        keepDeleted, HColumnDescriptor.DEFAULT_COMPRESSION, false, false,
+        HColumnDescriptor.DEFAULT_BLOCKSIZE, ttl,
         HColumnDescriptor.DEFAULT_BLOOMFILTER,
         HConstants.REPLICATION_SCOPE_LOCAL));
     return htd;

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java?rev=1223042&r1=1223041&r2=1223042&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java Sun Dec 25 02:49:46 2011
@@ -191,8 +191,7 @@ public class HFilePerformanceEvaluation 
     void setUp() throws Exception {
       writer =
         HFile.getWriterFactoryNoCache(conf).createWriter(this.fs,
-            this.mf, RFILE_BLOCKSIZE, (Compression.Algorithm) null, null,
-            null);
+            this.mf, RFILE_BLOCKSIZE, (Compression.Algorithm) null, null);
     }
 
     @Override
@@ -366,4 +365,4 @@ public class HFilePerformanceEvaluation 
   public static void main(String[] args) throws Exception {
     new HFilePerformanceEvaluation().runBenchmarks();
   }
-}
+}
\ No newline at end of file

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java?rev=1223042&r1=1223041&r2=1223042&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java Sun Dec 25 02:49:46 2011
@@ -143,9 +143,6 @@ public class TestFromClientSide {
          HColumnDescriptor.DEFAULT_VERSIONS,
          true,
          HColumnDescriptor.DEFAULT_COMPRESSION,
-         HColumnDescriptor.DEFAULT_DATA_BLOCK_ENCODING_ON_DISK,
-         HColumnDescriptor.DEFAULT_DATA_BLOCK_ENCODING_IN_CACHE,
-         HColumnDescriptor.DEFAULT_ENCODED_DATA_BLOCK_SEEK,
          HColumnDescriptor.DEFAULT_IN_MEMORY,
          HColumnDescriptor.DEFAULT_BLOCKCACHE,
          HColumnDescriptor.DEFAULT_BLOCKSIZE,

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java?rev=1223042&r1=1223041&r2=1223042&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java Sun Dec 25 02:49:46 2011
@@ -31,7 +31,6 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.*;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
-import org.apache.hadoop.hbase.io.encoding.DataBlockEncodings;
 import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.io.hfile.HFileScanner;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -99,7 +98,7 @@ public class TestHalfStoreFileReader {
       CacheConfig cacheConf)
       throws IOException {
     final HalfStoreFileReader halfreader =
-        new HalfStoreFileReader(fs, p, cacheConf, bottom, null);
+        new HalfStoreFileReader(fs, p, cacheConf, bottom);
     halfreader.loadFileInfo();
     final HFileScanner scanner = halfreader.getScanner(false, false);
 

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java?rev=1223042&r1=1223041&r2=1223042&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java Sun Dec 25 02:49:46 2011
@@ -295,14 +295,7 @@ public class TestHeapSize extends TestCa
       assertEquals(expected, actual);
     }
 
-    // SchemaConfigured
-    LOG.debug("Heap size for: " + SchemaConfigured.class.getName());
-    SchemaConfigured sc = new SchemaConfigured(null, "myTable", "myCF");
-    assertEquals(ClassSize.estimateBase(SchemaConfigured.class, true),
-        sc.heapSize());
-
     // Store Overhead
-    LOG.debug("Heap size for: " + Store.class.getName());
     cl = Store.class;
     actual = Store.FIXED_OVERHEAD;
     expected = ClassSize.estimateBase(cl, false);
@@ -326,6 +319,10 @@ public class TestHeapSize extends TestCa
     // accounted for.  But we have satisfied our two core requirements.
     // Sizing is quite accurate now, and our tests will throw errors if
     // any of these classes are modified without updating overhead sizes.
+
+    SchemaConfigured sc = new SchemaConfigured(null, "myTable", "myCF");
+    assertEquals(ClassSize.estimateBase(SchemaConfigured.class, true),
+        sc.heapSize());
   }
 
   @org.junit.Rule

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java?rev=1223042&r1=1223041&r2=1223042&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java Sun Dec 25 02:49:46 2011
@@ -41,15 +41,9 @@ import org.apache.hadoop.hbase.regionser
 
 public class CacheTestUtils {
 
-  private static final boolean includesMemstoreTS = true;
+  /*Just checks if heapsize grows when something is cached, and gets smaller when the same object is evicted*/
 
-  /**
-   * Just checks if heapsize grows when something is cached, and gets smaller
-   * when the same object is evicted
-   */
-
-  public static void testHeapSizeChanges(final BlockCache toBeTested,
-      final int blockSize) {
+  public static void testHeapSizeChanges(final BlockCache toBeTested, final int blockSize){
     HFileBlockPair[] blocks = generateHFileBlocks(blockSize, 1);
     long heapSize = ((HeapSize) toBeTested).heapSize();
     toBeTested.cacheBlock(blocks[0].blockName, blocks[0].block);
@@ -322,8 +316,7 @@ public class CacheTestUtils {
 
       HFileBlock generated = new HFileBlock(BlockType.DATA,
           onDiskSizeWithoutHeader, uncompressedSizeWithoutHeader,
-          prevBlockOffset, cachedBuffer, HFileBlock.DONT_FILL_HEADER,
-          blockSize, includesMemstoreTS);
+          prevBlockOffset, cachedBuffer, false, blockSize);
 
       String strKey;
       /* No conflicting keys */

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java?rev=1223042&r1=1223041&r2=1223042&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java Sun Dec 25 02:49:46 2011
@@ -33,7 +33,6 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.*;
-import org.apache.hadoop.hbase.io.encoding.DataBlockEncodings;
 import org.apache.hadoop.hbase.regionserver.StoreFile;
 import org.apache.hadoop.hbase.util.BloomFilterFactory;
 import org.junit.After;
@@ -43,7 +42,6 @@ import org.junit.experimental.categories
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 import org.junit.runners.Parameterized.Parameters;
-
 import static org.junit.Assert.*;
 
 /**
@@ -63,12 +61,10 @@ public class TestCacheOnWrite {
   private FileSystem fs;
   private Random rand = new Random(12983177L);
   private Path storeFilePath;
+  private Compression.Algorithm compress;
+  private CacheOnWriteType cowType;
   private BlockCache blockCache;
-  private String testDescription;
-
-  private final CacheOnWriteType cowType;
-  private final Compression.Algorithm compress;
-  private final BlockEncoderTestType encoderType;
+  private String testName;
 
   private static final int DATA_BLOCK_SIZE = 2048;
   private static final int NUM_KV = 25000;
@@ -80,90 +76,49 @@ public class TestCacheOnWrite {
       KeyValue.Type.values().length - 2;
 
   private static enum CacheOnWriteType {
-    DATA_BLOCKS(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY,
-        BlockType.DATA, BlockType.ENCODED_DATA),
-    BLOOM_BLOCKS(CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY,
-        BlockType.BLOOM_CHUNK),
-    INDEX_BLOCKS(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY,
-        BlockType.LEAF_INDEX, BlockType.INTERMEDIATE_INDEX);
+    DATA_BLOCKS(BlockType.DATA, CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY),
+    BLOOM_BLOCKS(BlockType.BLOOM_CHUNK,
+        CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY),
+    INDEX_BLOCKS(BlockType.LEAF_INDEX,
+        CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY);
 
     private final String confKey;
-    private final BlockType blockType1;
-    private final BlockType blockType2;
-
-    private CacheOnWriteType(String confKey, BlockType blockType) {
-      this(confKey, blockType, blockType);
-    }
+    private final BlockType inlineBlockType;
 
-    private CacheOnWriteType(String confKey, BlockType blockType1,
-        BlockType blockType2) {
-      this.blockType1 = blockType1;
-      this.blockType2 = blockType2;
+    private CacheOnWriteType(BlockType inlineBlockType, String confKey) {
+      this.inlineBlockType = inlineBlockType;
       this.confKey = confKey;
     }
 
     public boolean shouldBeCached(BlockType blockType) {
-      return blockType == blockType1 || blockType == blockType2;
+      return blockType == inlineBlockType
+          || blockType == BlockType.INTERMEDIATE_INDEX
+          && inlineBlockType == BlockType.LEAF_INDEX;
     }
 
     public void modifyConf(Configuration conf) {
-      for (CacheOnWriteType cowType : CacheOnWriteType.values()) {
+      for (CacheOnWriteType cowType : CacheOnWriteType.values())
         conf.setBoolean(cowType.confKey, cowType == this);
-      }
     }
 
   }
 
-  private static final DataBlockEncodings.Algorithm ENCODING_ALGO =
-      DataBlockEncodings.Algorithm.PREFIX;
-
-  /** Provides fancy names for four combinations of two booleans */
-  private static enum BlockEncoderTestType {
-    NO_BLOCK_ENCODING(false, false),
-    BLOCK_ENCODING_IN_CACHE_ONLY(false, true),
-    BLOCK_ENCODING_ON_DISK_ONLY(true, false),
-    BLOCK_ENCODING_EVERYWHERE(true, true);
-
-    private final boolean encodeOnDisk;
-    private final boolean encodeInCache;
-
-    BlockEncoderTestType(boolean encodeOnDisk, boolean encodeInCache) {
-      this.encodeOnDisk = encodeOnDisk;
-      this.encodeInCache = encodeInCache;
-    }
-
-    public HFileDataBlockEncoder getEncoder() {
-      // We always use an encoded seeker. It should not have effect if there
-      // is no encoding in cache.
-      return new HFileDataBlockEncoderImpl(
-          encodeOnDisk ? ENCODING_ALGO : DataBlockEncodings.Algorithm.NONE,
-          encodeInCache ? ENCODING_ALGO : DataBlockEncodings.Algorithm.NONE,
-          true);
-    }
-  }
-
   public TestCacheOnWrite(CacheOnWriteType cowType,
-      Compression.Algorithm compress, BlockEncoderTestType encoderType) {
+      Compression.Algorithm compress) {
     this.cowType = cowType;
     this.compress = compress;
-    this.encoderType = encoderType;
-    testDescription = "[cacheOnWrite=" + cowType + ", compress=" + compress + 
-        ", encoderType=" + encoderType + "]";
-    System.out.println(testDescription);
+    testName = "[cacheOnWrite=" + cowType + ", compress=" + compress + "]";
+    System.out.println(testName);
   }
 
   @Parameters
   public static Collection<Object[]> getParameters() {
     List<Object[]> cowTypes = new ArrayList<Object[]>();
-    for (CacheOnWriteType cowType : CacheOnWriteType.values()) {
+    for (CacheOnWriteType cowType : CacheOnWriteType.values())
       for (Compression.Algorithm compress :
            HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
-        for (BlockEncoderTestType encoderType :
-             BlockEncoderTestType.values()) {
-          cowTypes.add(new Object[] { cowType, compress, encoderType });
-        }
+        cowTypes.add(new Object[] { cowType, compress });
       }
-    }
     return cowTypes;
   }
 
@@ -201,10 +156,10 @@ public class TestCacheOnWrite {
 
   private void readStoreFile() throws IOException {
     HFileReaderV2 reader = (HFileReaderV2) HFile.createReader(fs,
-        storeFilePath, cacheConf, encoderType.getEncoder());
+        storeFilePath, cacheConf);
     LOG.info("HFile information: " + reader);
     HFileScanner scanner = reader.getScanner(false, false);
-    assertTrue(testDescription, scanner.seekTo());
+    assertTrue(testName, scanner.seekTo());
 
     long offset = 0;
     HFileBlock prevBlock = null;
@@ -219,11 +174,10 @@ public class TestCacheOnWrite {
       // Flags: don't cache the block, use pread, this is not a compaction.
       HFileBlock block = reader.readBlock(offset, onDiskSize, false, true,
           false);
-      BlockCacheKey blockCacheKey = HFile.getBlockCacheKey(reader.getName(),
-          offset);
+      BlockCacheKey blockCacheKey = HFile.getBlockCacheKey(reader.getName(), offset);
       boolean isCached = blockCache.getBlock(blockCacheKey, true) != null;
       boolean shouldBeCached = cowType.shouldBeCached(block.getBlockType());
-      assertEquals(testDescription + " " + block, shouldBeCached, isCached);
+      assertEquals(testName + " " + block, shouldBeCached, isCached);
       prevBlock = block;
       offset += block.getOnDiskSizeWithHeader();
       BlockType bt = block.getBlockType();
@@ -233,10 +187,8 @@ public class TestCacheOnWrite {
 
     LOG.info("Block count by type: " + blockCountByType);
     String countByType = blockCountByType.toString();
-    BlockType cachedDataBlockType =
-        encoderType.encodeInCache ? BlockType.ENCODED_DATA : BlockType.DATA;
-    assertEquals("{" + cachedDataBlockType
-        + "=1379, LEAF_INDEX=173, BLOOM_CHUNK=9, INTERMEDIATE_INDEX=24}",
+    assertEquals(
+        "{DATA=1379, LEAF_INDEX=173, BLOOM_CHUNK=9, INTERMEDIATE_INDEX=24}",
         countByType);
 
     reader.close();
@@ -262,9 +214,8 @@ public class TestCacheOnWrite {
     Path storeFileParentDir = new Path(TEST_UTIL.getDataTestDir(),
         "test_cache_on_write");
     StoreFile.Writer sfw = StoreFile.createWriter(fs, storeFileParentDir,
-        DATA_BLOCK_SIZE, compress, encoderType.getEncoder(),
-        KeyValue.COMPARATOR, conf, cacheConf, StoreFile.BloomType.ROWCOL,
-        NUM_KV);
+        DATA_BLOCK_SIZE, compress, KeyValue.COMPARATOR, conf,
+        cacheConf, StoreFile.BloomType.ROWCOL, NUM_KV);
 
     final int rowLen = 32;
     for (int i = 0; i < NUM_KV; ++i) {
@@ -285,6 +236,7 @@ public class TestCacheOnWrite {
     storeFilePath = sfw.getPath();
   }
 
+
   @org.junit.Rule
   public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu =
     new org.apache.hadoop.hbase.ResourceCheckerJUnitRule();