You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by jx...@apache.org on 2013/11/13 18:31:04 UTC

svn commit: r1541629 [2/2] - in /hbase/trunk: hbase-client/src/main/java/org/apache/hadoop/hbase/ hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/ hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/ hbase-server/src/main/java/or...

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java?rev=1541629&r1=1541628&r2=1541629&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java Wed Nov 13 17:31:02 2013
@@ -168,11 +168,10 @@ public class StoreFileInfo {
    * Open a Reader for the StoreFile
    * @param fs The current file system to use.
    * @param cacheConf The cache configuration and block cache reference.
-   * @param dataBlockEncoding data block encoding algorithm.
    * @return The StoreFile.Reader for the file
    */
-  public StoreFile.Reader open(final FileSystem fs, final CacheConfig cacheConf,
-      final DataBlockEncoding dataBlockEncoding) throws IOException {
+  public StoreFile.Reader open(final FileSystem fs,
+      final CacheConfig cacheConf) throws IOException {
     FSDataInputStreamWrapper in;
     FileStatus status;
 
@@ -198,19 +197,18 @@ public class StoreFileInfo {
     StoreFile.Reader reader = null;
     if (this.coprocessorHost != null) {
       reader = this.coprocessorHost.preStoreFileReaderOpen(fs, this.getPath(), in, length,
-          cacheConf, dataBlockEncoding, reference);
+        cacheConf, reference);
     }
     if (reader == null) {
       if (this.reference != null) {
-        reader = new HalfStoreFileReader(fs, this.getPath(), in, length, cacheConf, reference,
-            dataBlockEncoding);
+        reader = new HalfStoreFileReader(fs, this.getPath(), in, length, cacheConf, reference);
       } else {
-        reader = new StoreFile.Reader(fs, this.getPath(), in, length, cacheConf, dataBlockEncoding);
+        reader = new StoreFile.Reader(fs, this.getPath(), in, length, cacheConf);
       }
     }
     if (this.coprocessorHost != null) {
       reader = this.coprocessorHost.postStoreFileReaderOpen(fs, this.getPath(), in, length,
-          cacheConf, dataBlockEncoding, reference, reader);
+        cacheConf, reference, reader);
     }
     return reader;
   }

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java?rev=1541629&r1=1541628&r2=1541629&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java Wed Nov 13 17:31:02 2013
@@ -27,7 +27,6 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
@@ -38,7 +37,6 @@ import org.apache.hadoop.hbase.io.hfile.
 import org.apache.hadoop.hbase.io.hfile.HFileWriterV2;
 import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
-import org.apache.hadoop.hbase.regionserver.MultiVersionConsistencyControl;
 import org.apache.hadoop.hbase.regionserver.ScanType;
 import org.apache.hadoop.hbase.regionserver.Store;
 import org.apache.hadoop.hbase.regionserver.StoreFile;
@@ -148,7 +146,7 @@ public abstract class Compactor {
           ", keycount=" + keyCount +
           ", bloomtype=" + r.getBloomFilterType().toString() +
           ", size=" + StringUtils.humanReadableInt(r.length()) +
-          ", encoding=" + r.getHFileReader().getEncodingOnDisk() +
+          ", encoding=" + r.getHFileReader().getDataBlockEncoding() +
           ", seqNum=" + seqNum +
           (calculatePutTs ? ", earliestPutTs=" + earliestPutTs: ""));
       }
@@ -199,7 +197,6 @@ public abstract class Compactor {
     return store.getCoprocessorHost().preCompact(store, scanner, scanType, request);
   }
 
-  @SuppressWarnings("deprecation")
   /**
    * Performs the compaction.
    * @param scanner Where to read from.

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java?rev=1541629&r1=1541628&r2=1541629&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java Wed Nov 13 17:31:02 2013
@@ -31,7 +31,6 @@ import java.util.NavigableSet;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
 
-import com.google.common.collect.ImmutableList;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FileSystem;
@@ -41,17 +40,16 @@ import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.CoprocessorEnvironment;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Increment;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.Increment;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
 import org.apache.hadoop.hbase.io.Reference;
-import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
@@ -68,6 +66,8 @@ import org.apache.hadoop.hbase.regionser
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
 
+import com.google.common.collect.ImmutableList;
+
 /**
  * A sample region observer that tests the RegionObserver interface.
  * It works with TestRegionObserverInterface to provide the test case.
@@ -561,7 +561,7 @@ public class SimpleRegionObserver extend
   @Override
   public Reader preStoreFileReaderOpen(ObserverContext<RegionCoprocessorEnvironment> ctx,
       FileSystem fs, Path p, FSDataInputStreamWrapper in, long size, CacheConfig cacheConf,
-      DataBlockEncoding preferredEncodingInCache, Reference r, Reader reader) throws IOException {
+      Reference r, Reader reader) throws IOException {
     ctPreStoreFileReaderOpen.incrementAndGet();
     return null;
   }
@@ -569,7 +569,7 @@ public class SimpleRegionObserver extend
   @Override
   public Reader postStoreFileReaderOpen(ObserverContext<RegionCoprocessorEnvironment> ctx,
       FileSystem fs, Path p, FSDataInputStreamWrapper in, long size, CacheConfig cacheConf,
-      DataBlockEncoding preferredEncodingInCache, Reference r, Reader reader) throws IOException {
+      Reference r, Reader reader) throws IOException {
     ctPostStoreFileReaderOpen.incrementAndGet();
     return reader;
   }

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java?rev=1541629&r1=1541628&r2=1541629&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java Wed Nov 13 17:31:02 2013
@@ -117,7 +117,7 @@ public class TestHalfStoreFileReader {
       CacheConfig cacheConf)
       throws IOException {
     final HalfStoreFileReader halfreader = new HalfStoreFileReader(fs, p,
-        cacheConf, bottom, DataBlockEncoding.NONE);
+      cacheConf, bottom);
     halfreader.loadFileInfo();
     final HFileScanner scanner = halfreader.getScanner(false, false);
 
@@ -218,7 +218,7 @@ public class TestHalfStoreFileReader {
                                         CacheConfig cacheConfig)
             throws IOException {
       final HalfStoreFileReader halfreader = new HalfStoreFileReader(fs, p,
-              cacheConfig, bottom, DataBlockEncoding.NONE);
+              cacheConfig, bottom);
       halfreader.loadFileInfo();
       final HFileScanner scanner = halfreader.getScanner(false, false);
       scanner.seekBefore(seekBefore.getKey());

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestChangingEncoding.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestChangingEncoding.java?rev=1541629&r1=1541628&r2=1541629&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestChangingEncoding.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestChangingEncoding.java Wed Nov 13 17:31:02 2013
@@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.client.Re
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Threads;
+import org.apache.hadoop.hbase.zookeeper.ZKAssign;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Before;
@@ -101,6 +102,7 @@ public class TestChangingEncoding {
     conf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024 * 1024);
     // ((Log4JLogger)RpcServerImplementation.LOG).getLogger().setLevel(Level.TRACE);
     // ((Log4JLogger)RpcClient.LOG).getLogger().setLevel(Level.TRACE);
+    conf.setBoolean("hbase.online.schema.update.enable", true);
     TEST_UTIL.startMiniCluster();
   }
 
@@ -175,23 +177,30 @@ public class TestChangingEncoding {
   }
 
   private void setEncodingConf(DataBlockEncoding encoding,
-      boolean encodeOnDisk) throws IOException {
+      boolean onlineChange) throws Exception {
     LOG.debug("Setting CF encoding to " + encoding + " (ordinal="
-        + encoding.ordinal() + "), encodeOnDisk=" + encodeOnDisk);
-    admin.disableTable(tableName);
+      + encoding.ordinal() + "), onlineChange=" + onlineChange);
     hcd.setDataBlockEncoding(encoding);
-    hcd.setEncodeOnDisk(encodeOnDisk);
+    if (!onlineChange) {
+      admin.disableTable(tableName);
+    }
     admin.modifyColumn(tableName, hcd);
-    admin.enableTable(tableName);
+    if (!onlineChange) {
+      admin.enableTable(tableName);
+    }
+    // This is a unit test, not integration test. So let's
+    // wait for regions out of transition. Otherwise, for online
+    // encoding change, verification phase may be flaky because
+    // regions could be still in transition.
+    ZKAssign.blockUntilNoRIT(TEST_UTIL.getZooKeeperWatcher());
   }
 
   @Test(timeout=TIMEOUT_MS)
   public void testChangingEncoding() throws Exception {
     prepareTest("ChangingEncoding");
-    for (boolean encodeOnDisk : new boolean[]{false, true}) {
+    for (boolean onlineChange : new boolean[]{false, true}) {
       for (DataBlockEncoding encoding : ENCODINGS_TO_ITERATE) {
-        LOG.info("encoding=" + encoding + ", encodeOnDisk=" + encodeOnDisk);
-        setEncodingConf(encoding, encodeOnDisk);
+        setEncodingConf(encoding, onlineChange);
         writeSomeNewData();
         verifyAllData();
       }
@@ -201,35 +210,9 @@ public class TestChangingEncoding {
   @Test(timeout=TIMEOUT_MS)
   public void testChangingEncodingWithCompaction() throws Exception {
     prepareTest("ChangingEncodingWithCompaction");
-    for (boolean encodeOnDisk : new boolean[]{false, true}) {
+    for (boolean onlineChange : new boolean[]{false, true}) {
       for (DataBlockEncoding encoding : ENCODINGS_TO_ITERATE) {
-        setEncodingConf(encoding, encodeOnDisk);
-        writeSomeNewData();
-        verifyAllData();
-        compactAndWait();
-        verifyAllData();
-      }
-    }
-  }
-
-  @Test(timeout=TIMEOUT_MS)
-  public void testFlippingEncodeOnDisk() throws Exception {
-    prepareTest("FlippingEncodeOnDisk");
-    // The focus of this test case is to flip the "encoding on disk" flag,
-    // so we only try a couple of encodings.
-    DataBlockEncoding[] encodings = new DataBlockEncoding[] {
-        DataBlockEncoding.NONE, DataBlockEncoding.FAST_DIFF };
-    for (DataBlockEncoding encoding : encodings) {
-      boolean[] flagValues;
-      if (encoding == DataBlockEncoding.NONE) {
-        // encodeOnDisk does not matter when not using encoding.
-        flagValues =
-            new boolean[] { HColumnDescriptor.DEFAULT_ENCODE_ON_DISK };
-      } else {
-        flagValues = new boolean[] { false, true, false, true };
-      }
-      for (boolean encodeOnDisk : flagValues) {
-        setEncodingConf(encoding, encodeOnDisk);
+        setEncodingConf(encoding, onlineChange);
         writeSomeNewData();
         verifyAllData();
         compactAndWait();

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java?rev=1541629&r1=1541628&r2=1541629&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java Wed Nov 13 17:31:02 2013
@@ -112,7 +112,6 @@ public class TestEncodedSeekers {
     // Need to disable default row bloom filter for this test to pass.
     HColumnDescriptor hcd = (new HColumnDescriptor(CF_NAME)).setMaxVersions(MAX_VERSIONS).
         setDataBlockEncoding(encoding).
-        setEncodeOnDisk(encodeOnDisk).
         setBlocksize(BLOCK_SIZE).
         setBloomFilterType(BloomType.NONE).
         setCompressTags(compressTags);

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestLoadAndSwitchEncodeOnDisk.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestLoadAndSwitchEncodeOnDisk.java?rev=1541629&r1=1541628&r2=1541629&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestLoadAndSwitchEncodeOnDisk.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestLoadAndSwitchEncodeOnDisk.java Wed Nov 13 17:31:02 2013
@@ -78,7 +78,6 @@ public class TestLoadAndSwitchEncodeOnDi
     assertAllOnLine(t);
 
     admin.disableTable(TABLE);
-    hcd.setEncodeOnDisk(false);
     admin.modifyColumn(TABLE, hcd);
 
     System.err.println("\nRe-enabling table\n");

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java?rev=1541629&r1=1541628&r2=1541629&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java Wed Nov 13 17:31:02 2013
@@ -89,7 +89,6 @@ public class TestCacheOnWrite {
   private static final int INDEX_BLOCK_SIZE = 512;
   private static final int BLOOM_BLOCK_SIZE = 4096;
   private static final BloomType BLOOM_TYPE = BloomType.ROWCOL;
-  private static final ChecksumType CKTYPE = ChecksumType.CRC32;
   private static final int CKBYTES = 512;
 
   /** The number of valid key types possible in a store file */
@@ -136,22 +135,21 @@ public class TestCacheOnWrite {
 
   /** Provides fancy names for three combinations of two booleans */
   private static enum BlockEncoderTestType {
+    NO_BLOCK_ENCODING_NOOP(true, false),
     NO_BLOCK_ENCODING(false, false),
-    BLOCK_ENCODING_IN_CACHE_ONLY(false, true),
-    BLOCK_ENCODING_EVERYWHERE(true, true);
+    BLOCK_ENCODING_EVERYWHERE(false, true);
 
-    private final boolean encodeOnDisk;
-    private final boolean encodeInCache;
+    private final boolean noop;
+    private final boolean encode;
 
-    BlockEncoderTestType(boolean encodeOnDisk, boolean encodeInCache) {
-      this.encodeOnDisk = encodeOnDisk;
-      this.encodeInCache = encodeInCache;
+    BlockEncoderTestType(boolean noop, boolean encode) {
+      this.encode = encode;
+      this.noop = noop;
     }
 
     public HFileDataBlockEncoder getEncoder() {
-      return new HFileDataBlockEncoderImpl(
-          encodeOnDisk ? ENCODING_ALGO : DataBlockEncoding.NONE,
-          encodeInCache ? ENCODING_ALGO : DataBlockEncoding.NONE);
+      return noop ? NoOpDataBlockEncoder.INSTANCE : new HFileDataBlockEncoderImpl(
+        encode ? ENCODING_ALGO : DataBlockEncoding.NONE);
     }
   }
 
@@ -221,11 +219,9 @@ public class TestCacheOnWrite {
   private void readStoreFile(boolean useTags) throws IOException {
     AbstractHFileReader reader;
     if (useTags) {
-      reader = (HFileReaderV3) HFile.createReaderWithEncoding(fs, storeFilePath, cacheConf,
-          encoder.getEncodingInCache());
+      reader = (HFileReaderV3) HFile.createReader(fs, storeFilePath, cacheConf);
     } else {
-      reader = (HFileReaderV2) HFile.createReaderWithEncoding(fs, storeFilePath, cacheConf,
-          encoder.getEncodingInCache());
+      reader = (HFileReaderV2) HFile.createReader(fs, storeFilePath, cacheConf);
     }
     LOG.info("HFile information: " + reader);
     final boolean cacheBlocks = false;
@@ -239,7 +235,7 @@ public class TestCacheOnWrite {
         new EnumMap<BlockType, Integer>(BlockType.class);
 
     DataBlockEncoding encodingInCache =
-        encoderType.getEncoder().getEncodingInCache();
+        encoderType.getEncoder().getDataBlockEncoding();
     while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) {
       long onDiskSize = -1;
       if (prevBlock != null) {
@@ -272,7 +268,7 @@ public class TestCacheOnWrite {
     LOG.info("Block count by type: " + blockCountByType);
     String countByType = blockCountByType.toString();
     BlockType cachedDataBlockType =
-        encoderType.encodeInCache ? BlockType.ENCODED_DATA : BlockType.DATA;
+        encoderType.encode ? BlockType.ENCODED_DATA : BlockType.DATA;
     if (useTags) {
       assertEquals("{" + cachedDataBlockType
           + "=1550, LEAF_INDEX=173, BLOOM_CHUNK=9, INTERMEDIATE_INDEX=20}", countByType);
@@ -309,8 +305,7 @@ public class TestCacheOnWrite {
         "test_cache_on_write");
     HFileContext meta = new HFileContextBuilder().withCompression(compress)
         .withBytesPerCheckSum(CKBYTES).withChecksumType(ChecksumType.NULL)
-        .withBlockSize(DATA_BLOCK_SIZE).withDataBlockEncodingInCache(encoder.getEncodingInCache())
-        .withDataBlockEncodingOnDisk(encoder.getEncodingOnDisk())
+        .withBlockSize(DATA_BLOCK_SIZE).withDataBlockEncoding(encoder.getDataBlockEncoding())
         .withIncludesTags(useTags).build();
     StoreFile.Writer sfw = new StoreFile.WriterBuilder(conf, cacheConf, fs)
         .withOutputDir(storeFileParentDir).withComparator(KeyValue.COMPARATOR)
@@ -376,9 +371,7 @@ public class TestCacheOnWrite {
             .setCompressionType(compress)
             .setBloomFilterType(BLOOM_TYPE)
             .setMaxVersions(maxVersions)
-            .setDataBlockEncoding(encoder.getEncodingInCache())
-            .setEncodeOnDisk(encoder.getEncodingOnDisk() !=
-                DataBlockEncoding.NONE)
+            .setDataBlockEncoding(encoder.getDataBlockEncoding())
     );
     int rowIdx = 0;
     long ts = EnvironmentEdgeManager.currentTimeMillis();
@@ -416,6 +409,7 @@ public class TestCacheOnWrite {
     Map<BlockType, Integer> blockTypesInCache =
         blockCache.getBlockTypeCountsForTest();
     LOG.debug("Block types in cache: " + blockTypesInCache);
+    assertNull(blockTypesInCache.get(BlockType.ENCODED_DATA));
     assertNull(blockTypesInCache.get(BlockType.DATA));
     region.close();
     blockCache.shutdown();

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockCompatibility.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockCompatibility.java?rev=1541629&r1=1541628&r2=1541629&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockCompatibility.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockCompatibility.java Wed Nov 13 17:31:02 2013
@@ -260,8 +260,7 @@ public class TestHFileBlockCompatibility
               + algo + "_" + encoding.toString());
           FSDataOutputStream os = fs.create(path);
           HFileDataBlockEncoder dataBlockEncoder =
-              new HFileDataBlockEncoderImpl(encoding, encoding,
-                  TestHFileBlockCompatibility.Writer.DUMMY_HEADER);
+              new HFileDataBlockEncoderImpl(encoding);
           TestHFileBlockCompatibility.Writer hbw =
               new TestHFileBlockCompatibility.Writer(algo,
                   dataBlockEncoder, includesMemstoreTS, includesTag);
@@ -429,7 +428,7 @@ public class TestHFileBlockCompatibility
               .build();
       defaultBlockEncodingCtx = new HFileBlockDefaultEncodingContext(null, DUMMY_HEADER, meta);
       dataBlockEncodingCtx =
-          this.dataBlockEncoder.newOnDiskDataBlockEncodingContext(
+          this.dataBlockEncoder.newDataBlockEncodingContext(
               DUMMY_HEADER, meta);
       baosInMemory = new ByteArrayOutputStream();
 

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java?rev=1541629&r1=1541628&r2=1541629&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java Wed Nov 13 17:31:02 2013
@@ -25,8 +25,6 @@ import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
 
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.SmallTests;
 import org.apache.hadoop.hbase.io.HeapSize;
@@ -36,7 +34,6 @@ import org.apache.hadoop.hbase.io.encodi
 import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext;
 import org.apache.hadoop.hbase.util.ChecksumType;
 import org.apache.hadoop.hbase.util.test.RedundantKVGenerator;
-import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.runner.RunWith;
@@ -46,9 +43,6 @@ import org.junit.runners.Parameterized.P
 @RunWith(Parameterized.class)
 @Category(SmallTests.class)
 public class TestHFileDataBlockEncoder {
-  private Configuration conf;
-  private final HBaseTestingUtility TEST_UTIL =
-      new HBaseTestingUtility();
   private HFileDataBlockEncoderImpl blockEncoder;
   private RedundantKVGenerator generator = new RedundantKVGenerator();
   private boolean includesMemstoreTS;
@@ -61,34 +55,26 @@ public class TestHFileDataBlockEncoder {
       boolean includesMemstoreTS) {
     this.blockEncoder = blockEncoder;
     this.includesMemstoreTS = includesMemstoreTS;
-    System.err.println("On-disk encoding: " + blockEncoder.getEncodingOnDisk()
-        + ", in-cache encoding: " + blockEncoder.getEncodingInCache()
+    System.err.println("Encoding: " + blockEncoder.getDataBlockEncoding()
         + ", includesMemstoreTS: " + includesMemstoreTS);
   }
 
   /**
-   * Preparation before JUnit test.
-   */
-  @Before
-  public void setUp() {
-    conf = TEST_UTIL.getConfiguration();
-  }
-
-  /**
    * Test putting and taking out blocks into cache with different
    * encoding options.
    */
   @Test
-  public void testEncodingWithCache() {
+  public void testEncodingWithCache() throws IOException {
     testEncodingWithCacheInternals(false);
     testEncodingWithCacheInternals(true);
   }
 
-  private void testEncodingWithCacheInternals(boolean useTag) {
+  private void testEncodingWithCacheInternals(boolean useTag) throws IOException {
     HFileBlock block = getSampleHFileBlock(useTag);
+    HFileBlock cacheBlock = createBlockOnDisk(block, useTag);
+
     LruBlockCache blockCache =
         new LruBlockCache(8 * 1024 * 1024, 32 * 1024);
-    HFileBlock cacheBlock = blockEncoder.diskToCacheFormat(block, false);
     BlockCacheKey cacheKey = new BlockCacheKey("test", 0);
     blockCache.cacheBlock(cacheKey, cacheBlock);
 
@@ -97,7 +83,7 @@ public class TestHFileDataBlockEncoder {
 
     HFileBlock returnedBlock = (HFileBlock) heapSize;;
 
-    if (blockEncoder.getEncodingInCache() ==
+    if (blockEncoder.getDataBlockEncoding() ==
         DataBlockEncoding.NONE) {
       assertEquals(block.getBufferWithHeader(),
           returnedBlock.getBufferWithHeader());
@@ -135,15 +121,14 @@ public class TestHFileDataBlockEncoder {
     HFileBlock block = new HFileBlock(BlockType.DATA, size, size, -1, buf,
         HFileBlock.FILL_HEADER, 0,
         0, hfileContext);
-    HFileBlock cacheBlock = blockEncoder
-        .diskToCacheFormat(createBlockOnDisk(block, useTags), false);
+    HFileBlock cacheBlock = createBlockOnDisk(block, useTags);
     assertEquals(headerSize, cacheBlock.getDummyHeaderForVersion().length);
   }
 
   private HFileBlock createBlockOnDisk(HFileBlock block, boolean useTags) throws IOException {
     int size;
     HFileBlockEncodingContext context = new HFileBlockDefaultEncodingContext(
-        blockEncoder.getEncodingOnDisk(),
+        blockEncoder.getDataBlockEncoding(),
         HConstants.HFILEBLOCK_DUMMY_HEADER, block.getHFileContext());
     context.setDummyHeader(block.getDummyHeaderForVersion());
     blockEncoder.beforeWriteToDisk(block.getBufferWithoutHeader(), context, block.getBlockType());
@@ -155,44 +140,30 @@ public class TestHFileDataBlockEncoder {
   }
 
   /**
-   * Test writing to disk.
+   * Test encoding.
    * @throws IOException
    */
   @Test
-  public void testEncodingWritePath() throws IOException {
-    testEncodingWritePathInternals(false);
-    testEncodingWritePathInternals(true);
+  public void testEncoding() throws IOException {
+    testEncodingInternals(false);
+    testEncodingInternals(true);
   }
 
-  private void testEncodingWritePathInternals(boolean useTag) throws IOException {
+  private void testEncodingInternals(boolean useTag) throws IOException {
     // usually we have just block without headers, but don't complicate that
     HFileBlock block = getSampleHFileBlock(useTag);
     HFileBlock blockOnDisk = createBlockOnDisk(block, useTag);
 
-    if (blockEncoder.getEncodingOnDisk() !=
+    if (blockEncoder.getDataBlockEncoding() !=
         DataBlockEncoding.NONE) {
       assertEquals(BlockType.ENCODED_DATA, blockOnDisk.getBlockType());
-      assertEquals(blockEncoder.getEncodingOnDisk().getId(),
+      assertEquals(blockEncoder.getDataBlockEncoding().getId(),
           blockOnDisk.getDataBlockEncodingId());
     } else {
       assertEquals(BlockType.DATA, blockOnDisk.getBlockType());
     }
   }
 
-  /**
-   * Test converting blocks from disk to cache format.
-   */
-  @Test
-  public void testEncodingReadPath() {
-    testEncodingReadPathInternals(false);
-    testEncodingReadPathInternals(true);
-  }
-
-  private void testEncodingReadPathInternals(boolean useTag) {
-    HFileBlock origBlock = getSampleHFileBlock(useTag);
-    blockEncoder.diskToCacheFormat(origBlock, false);
-  }
-
   private HFileBlock getSampleHFileBlock(boolean useTag) {
     ByteBuffer keyValues = RedundantKVGenerator.convertKvToByteBuffer(
         generator.generateTestKeyValues(60, useTag), includesMemstoreTS);
@@ -224,17 +195,10 @@ public class TestHFileDataBlockEncoder {
         new ArrayList<Object[]>();
 
     for (DataBlockEncoding diskAlgo : DataBlockEncoding.values()) {
-      for (DataBlockEncoding cacheAlgo : DataBlockEncoding.values()) {
-        if (diskAlgo != cacheAlgo && diskAlgo != DataBlockEncoding.NONE) {
-          // We allow (1) the same encoding on disk and in cache, and
-          // (2) some encoding in cache but no encoding on disk (for testing).
-          continue;
-        }
-        for (boolean includesMemstoreTS : new boolean[] {false, true}) {
-          configurations.add(new Object[] {
-              new HFileDataBlockEncoderImpl(diskAlgo, cacheAlgo),
-              new Boolean(includesMemstoreTS)});
-        }
+      for (boolean includesMemstoreTS : new boolean[] {false, true}) {
+        configurations.add(new Object[] {
+            new HFileDataBlockEncoderImpl(diskAlgo),
+            new Boolean(includesMemstoreTS)});
       }
     }
 

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java?rev=1541629&r1=1541628&r2=1541629&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java Wed Nov 13 17:31:02 2013
@@ -50,7 +50,6 @@ import org.apache.hadoop.hbase.io.hfile.
 import org.apache.hadoop.hbase.io.hfile.HFileContext;
 import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
 import org.apache.hadoop.hbase.io.hfile.HFileReaderV2;
-import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.io.compress.CompressionOutputStream;
 import org.apache.hadoop.io.compress.Compressor;
@@ -594,7 +593,7 @@ public class DataBlockEncodingTool {
     CacheConfig cacheConf = new CacheConfig(conf);
     FileSystem fs = FileSystem.get(conf);
     StoreFile hsf = new StoreFile(fs, path, conf, cacheConf,
-        BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
+      BloomType.NONE);
 
     StoreFile.Reader reader = hsf.createReader();
     reader.loadFileInfo();

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/EncodedSeekPerformanceTest.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/EncodedSeekPerformanceTest.java?rev=1541629&r1=1541628&r2=1541629&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/EncodedSeekPerformanceTest.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/EncodedSeekPerformanceTest.java Wed Nov 13 17:31:02 2013
@@ -27,11 +27,8 @@ import org.apache.hadoop.hbase.HBaseTest
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
-import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder;
 import org.apache.hadoop.hbase.io.hfile.LruBlockCache;
-import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder;
 
 /**
  * Test seek performance for encoded data blocks. Read an HFile and do several
@@ -61,8 +58,7 @@ public class EncodedSeekPerformanceTest 
 
     // read all of the key values
     StoreFile storeFile = new StoreFile(testingUtility.getTestFileSystem(),
-        path, configuration, cacheConf, BloomType.NONE,
-        NoOpDataBlockEncoder.INSTANCE);
+        path, configuration, cacheConf, BloomType.NONE);
 
     StoreFile.Reader reader = storeFile.createReader();
     StoreFileScanner scanner = reader.getStoreFileScanner(true, false);
@@ -88,11 +84,11 @@ public class EncodedSeekPerformanceTest 
     return seeks;
   }
 
-  private void runTest(Path path, HFileDataBlockEncoder blockEncoder,
+  private void runTest(Path path, DataBlockEncoding blockEncoding,
       List<KeyValue> seeks) throws IOException {
     // read all of the key values
     StoreFile storeFile = new StoreFile(testingUtility.getTestFileSystem(),
-        path, configuration, cacheConf, BloomType.NONE, blockEncoder);
+      path, configuration, cacheConf, BloomType.NONE);
 
     long totalSize = 0;
 
@@ -137,7 +133,7 @@ public class EncodedSeekPerformanceTest 
     storeFile.closeReader(cacheConf.shouldEvictOnClose());
     clearBlockCache();
 
-    System.out.println(blockEncoder);
+    System.out.println(blockEncoding);
     System.out.printf("  Read speed:       %8.2f (MB/s)\n", readInMbPerSec);
     System.out.printf("  Seeks per second: %8.2f (#/s)\n", seeksPerSec);
     System.out.printf("  Total KV size:    %d\n", totalSize);
@@ -148,12 +144,12 @@ public class EncodedSeekPerformanceTest 
    * @param encoders List of encoders which will be used for tests.
    * @throws IOException if there is a bug while reading from disk
    */
-  public void runTests(Path path, List<HFileDataBlockEncoder> encoders)
+  public void runTests(Path path, DataBlockEncoding[] encodings)
       throws IOException {
     List<KeyValue> seeks = prepareListOfTestSeeks(path);
 
-    for (HFileDataBlockEncoder blockEncoder : encoders) {
-      runTest(path, blockEncoder, seeks);
+    for (DataBlockEncoding blockEncoding : encodings) {
+      runTest(path, blockEncoding, seeks);
     }
   }
 
@@ -169,16 +165,10 @@ public class EncodedSeekPerformanceTest 
     }
 
     Path path = new Path(args[0]);
-    List<HFileDataBlockEncoder> encoders =
-        new ArrayList<HFileDataBlockEncoder>();
-
-    for (DataBlockEncoding encodingAlgo : DataBlockEncoding.values()) {
-      encoders.add(new HFileDataBlockEncoderImpl(DataBlockEncoding.NONE,
-          encodingAlgo));
-    }
 
+    // TODO, this test doesn't work as expected any more. Need to fix.
     EncodedSeekPerformanceTest utility = new EncodedSeekPerformanceTest();
-    utility.runTests(path, encoders);
+    utility.runTests(path, DataBlockEncoding.values());
 
     System.exit(0);
   }

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/HFileReadWriteTest.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/HFileReadWriteTest.java?rev=1541629&r1=1541628&r2=1541629&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/HFileReadWriteTest.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/HFileReadWriteTest.java Wed Nov 13 17:31:02 2013
@@ -61,10 +61,7 @@ import org.apache.hadoop.hbase.io.encodi
 import org.apache.hadoop.hbase.io.hfile.BlockCache;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.HFile;
-import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder;
-import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl;
 import org.apache.hadoop.hbase.io.hfile.HFilePrettyPrinter;
-import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.LoadTestTool;
 import org.apache.hadoop.hbase.util.MD5Hash;
@@ -146,9 +143,6 @@ public class HFileReadWriteTest {
   private int numReadThreads;
   private int durationSec;
   private DataBlockEncoding dataBlockEncoding;
-  private boolean encodeInCacheOnly;
-  private HFileDataBlockEncoder dataBlockEncoder =
-      NoOpDataBlockEncoder.INSTANCE;
 
   private BloomType bloomType = BloomType.NONE;
   private int blockSize;
@@ -194,8 +188,6 @@ public class HFileReadWriteTest {
         "reader threads" + Workload.RANDOM_READS.onlyUsedFor());
     options.addOption(LoadTestTool.OPT_DATA_BLOCK_ENCODING, true,
         LoadTestTool.OPT_DATA_BLOCK_ENCODING_USAGE);
-    options.addOption(LoadTestTool.OPT_ENCODE_IN_CACHE_ONLY, false,
-        LoadTestTool.OPT_ENCODE_IN_CACHE_ONLY_USAGE);
     options.addOptionGroup(Workload.getOptionGroup());
 
     if (args.length == 0) {
@@ -247,23 +239,9 @@ public class HFileReadWriteTest {
           BLOOM_FILTER_OPTION));
     }
 
-    encodeInCacheOnly =
-        cmdLine.hasOption(LoadTestTool.OPT_ENCODE_IN_CACHE_ONLY);
-
     if (cmdLine.hasOption(LoadTestTool.OPT_DATA_BLOCK_ENCODING)) {
       dataBlockEncoding = DataBlockEncoding.valueOf(
           cmdLine.getOptionValue(LoadTestTool.OPT_DATA_BLOCK_ENCODING));
-      // Optionally encode on disk, always encode in cache.
-      dataBlockEncoder = new HFileDataBlockEncoderImpl(
-          encodeInCacheOnly ? DataBlockEncoding.NONE : dataBlockEncoding,
-          dataBlockEncoding);
-    } else {
-      if (encodeInCacheOnly) {
-        LOG.error("The -" + LoadTestTool.OPT_ENCODE_IN_CACHE_ONLY +
-            " option does not make sense without -" +
-            LoadTestTool.OPT_DATA_BLOCK_ENCODING);
-        return false;
-      }
     }
 
     blockSize = conf.getInt("hfile.min.blocksize.size", 65536);
@@ -463,7 +441,7 @@ public class HFileReadWriteTest {
     // We are passing the ROWCOL Bloom filter type, but StoreFile will still
     // use the Bloom filter type specified in the HFile.
     return new StoreFile(fs, filePath, conf, cacheConf,
-        BloomType.ROWCOL, dataBlockEncoder);
+      BloomType.ROWCOL);
   }
 
   public static int charToHex(int c) {

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockStoreFile.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockStoreFile.java?rev=1541629&r1=1541628&r2=1541629&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockStoreFile.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockStoreFile.java Wed Nov 13 17:31:02 2013
@@ -26,7 +26,6 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.KeyValue.KVComparator;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
-import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder;
 import org.apache.hadoop.hbase.util.Bytes;
 
 /** A mock used so our tests don't deal with actual StoreFiles */
@@ -41,8 +40,7 @@ public class MockStoreFile extends Store
   MockStoreFile(HBaseTestingUtility testUtil, Path testPath,
       long length, long ageInDisk, boolean isRef, long sequenceid) throws IOException {
     super(testUtil.getTestFileSystem(), testPath, testUtil.getConfiguration(),
-          new CacheConfig(testUtil.getConfiguration()), BloomType.NONE,
-          NoOpDataBlockEncoder.INSTANCE);
+      new CacheConfig(testUtil.getConfiguration()), BloomType.NONE);
     this.length = length;
     this.isRef = isRef;
     this.ageInDisk = ageInDisk;

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java?rev=1541629&r1=1541628&r2=1541629&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java Wed Nov 13 17:31:02 2013
@@ -216,7 +216,7 @@ public class TestCacheOnWriteInSchema {
     CacheConfig cacheConf = store.getCacheConfig();
     BlockCache cache = cacheConf.getBlockCache();
     StoreFile sf = new StoreFile(fs, path, conf, cacheConf,
-        BloomType.ROWCOL, null);
+      BloomType.ROWCOL);
     HFileReaderV2 reader = (HFileReaderV2) sf.createReader().getHFileReader();
     try {
       // Open a scanner with (on read) caching disabled

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java?rev=1541629&r1=1541628&r2=1541629&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java Wed Nov 13 17:31:02 2013
@@ -72,7 +72,6 @@ import org.apache.hadoop.hbase.io.hfile.
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
-import org.apache.hadoop.hbase.regionserver.compactions.Compactor;
 import org.apache.hadoop.hbase.regionserver.compactions.DefaultCompactor;
 import org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy;
 import org.apache.hadoop.hbase.regionserver.wal.HLog;
@@ -212,8 +211,7 @@ public class TestCompaction {
       final DataBlockEncoding inCache = DataBlockEncoding.PREFIX;
       final DataBlockEncoding onDisk = inCacheOnly ? DataBlockEncoding.NONE :
           inCache;
-      store.setDataBlockEncoderInTest(new HFileDataBlockEncoderImpl(
-          onDisk, inCache));
+      store.setDataBlockEncoderInTest(new HFileDataBlockEncoderImpl(onDisk));
     }
 
     majorCompaction();

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java?rev=1541629&r1=1541628&r2=1541629&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java Wed Nov 13 17:31:02 2013
@@ -45,7 +45,6 @@ import org.apache.hadoop.hbase.io.hfile.
 import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.io.hfile.HFileContext;
 import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
-import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder;
 import org.apache.hadoop.hbase.io.hfile.TestHFileWriterV2;
 import org.apache.hadoop.hbase.util.BloomFilterFactory;
 import org.apache.hadoop.hbase.util.ByteBloomFilter;
@@ -196,8 +195,7 @@ public class TestCompoundBloomFilter {
 
   private void readStoreFile(int t, BloomType bt, List<KeyValue> kvs,
       Path sfPath) throws IOException {
-    StoreFile sf = new StoreFile(fs, sfPath, conf, cacheConf, bt,
-        NoOpDataBlockEncoder.INSTANCE);
+    StoreFile sf = new StoreFile(fs, sfPath, conf, cacheConf, bt);
     StoreFile.Reader r = sf.createReader();
     final boolean pread = true; // does not really matter
     StoreFileScanner scanner = r.getStoreFileScanner(true, pread);

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java?rev=1541629&r1=1541628&r2=1541629&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java Wed Nov 13 17:31:02 2013
@@ -51,7 +51,6 @@ import org.apache.hadoop.hbase.io.hfile.
 import org.apache.hadoop.hbase.io.hfile.HFileContext;
 import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
 import org.apache.hadoop.hbase.io.hfile.HFileScanner;
-import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.junit.Assume;
 import org.junit.Test;
@@ -90,8 +89,7 @@ public class TestFSErrorsExposed {
         writer, Bytes.toBytes("cf"), Bytes.toBytes("qual"));
 
     StoreFile sf = new StoreFile(fs, writer.getPath(),
-        util.getConfiguration(), cacheConf, BloomType.NONE,
-        NoOpDataBlockEncoder.INSTANCE);
+      util.getConfiguration(), cacheConf, BloomType.NONE);
 
     StoreFile.Reader reader = sf.createReader();
     HFileScanner scanner = reader.getScanner(false, true);
@@ -141,7 +139,7 @@ public class TestFSErrorsExposed {
         writer, Bytes.toBytes("cf"), Bytes.toBytes("qual"));
 
     StoreFile sf = new StoreFile(fs, writer.getPath(), util.getConfiguration(),
-        cacheConf, BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
+      cacheConf, BloomType.NONE);
 
     List<StoreFileScanner> scanners = StoreFileScanner.getScannersForStoreFiles(
         Collections.singletonList(sf), false, true, false,

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java?rev=1541629&r1=1541628&r2=1541629&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java Wed Nov 13 17:31:02 2013
@@ -148,6 +148,7 @@ public class TestStore extends TestCase 
     init(methodName, conf, htd, hcd);
   }
 
+  @SuppressWarnings("deprecation")
   private void init(String methodName, Configuration conf, HTableDescriptor htd,
       HColumnDescriptor hcd) throws IOException {
     //Setting up a Store
@@ -193,7 +194,7 @@ public class TestStore extends TestCase 
     // Verify that compression and encoding settings are respected
     HFile.Reader reader = HFile.createReader(fs, path, new CacheConfig(conf));
     assertEquals(hcd.getCompressionType(), reader.getCompressionAlgorithm());
-    assertEquals(hcd.getDataBlockEncoding(), reader.getEncodingOnDisk());
+    assertEquals(hcd.getDataBlockEncoding(), reader.getDataBlockEncoding());
     reader.close();
   }
 
@@ -559,7 +560,7 @@ public class TestStore extends TestCase 
 
     long computedSize=0;
     for (KeyValue kv : this.store.memstore.kvset) {
-      long kvsize = this.store.memstore.heapSizeChange(kv, true);
+      long kvsize = MemStore.heapSizeChange(kv, true);
       //System.out.println(kv + " size= " + kvsize + " kvsize= " + kv.heapSize());
       computedSize += kvsize;
     }

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java?rev=1541629&r1=1541628&r2=1541629&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java Wed Nov 13 17:31:02 2013
@@ -51,7 +51,6 @@ import org.apache.hadoop.hbase.io.hfile.
 import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder;
 import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl;
 import org.apache.hadoop.hbase.io.hfile.HFileScanner;
-import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder;
 import org.apache.hadoop.hbase.util.BloomFilterFactory;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.ChecksumType;
@@ -106,7 +105,7 @@ public class TestStoreFile extends HBase
 
     Path sfPath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath());
     StoreFile sf = new StoreFile(this.fs, sfPath, conf, cacheConf,
-        BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
+      BloomType.NONE);
     checkHalfHFile(regionFs, sf);
   }
 
@@ -158,7 +157,7 @@ public class TestStoreFile extends HBase
 
     Path hsfPath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath());
     StoreFile hsf = new StoreFile(this.fs, hsfPath, conf, cacheConf,
-        BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
+      BloomType.NONE);
     StoreFile.Reader reader = hsf.createReader();
     // Split on a row, not in middle of row.  Midkey returned by reader
     // may be in middle of row.  Create new one with empty column and
@@ -171,7 +170,7 @@ public class TestStoreFile extends HBase
     HRegionInfo splitHri = new HRegionInfo(hri.getTable(), null, midRow);
     Path refPath = splitStoreFile(regionFs, splitHri, TEST_FAMILY, hsf, midRow, true);
     StoreFile refHsf = new StoreFile(this.fs, refPath, conf, cacheConf,
-        BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
+      BloomType.NONE);
     // Now confirm that I can read from the reference and that it only gets
     // keys from top half of the file.
     HFileScanner s = refHsf.createReader().getScanner(false, false);
@@ -211,7 +210,7 @@ public class TestStoreFile extends HBase
     // Try to open store file from link
     StoreFileInfo storeFileInfo = new StoreFileInfo(testConf, this.fs, linkFilePath);
     StoreFile hsf = new StoreFile(this.fs, storeFileInfo, testConf, cacheConf,
-        BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
+      BloomType.NONE);
     assertTrue(storeFileInfo.isLink());
 
     // Now confirm that I can read from the link
@@ -262,8 +261,7 @@ public class TestStoreFile extends HBase
     // <root>/clone/splitB/<cf>/<reftohfilelink>
     HRegionInfo splitHriA = new HRegionInfo(hri.getTable(), null, SPLITKEY);
     HRegionInfo splitHriB = new HRegionInfo(hri.getTable(), SPLITKEY, null);
-    StoreFile f = new StoreFile(fs, linkFilePath, testConf, cacheConf, BloomType.NONE,
-        NoOpDataBlockEncoder.INSTANCE);
+    StoreFile f = new StoreFile(fs, linkFilePath, testConf, cacheConf, BloomType.NONE);
     Path pathA = splitStoreFile(cloneRegionFs, splitHriA, TEST_FAMILY, f, SPLITKEY, true); // top
     Path pathB = splitStoreFile(cloneRegionFs, splitHriB, TEST_FAMILY, f, SPLITKEY, false);// bottom
 
@@ -275,7 +273,7 @@ public class TestStoreFile extends HBase
 
     // Try to open store file from link
     StoreFile hsfA = new StoreFile(this.fs, pathA, testConf, cacheConf,
-        BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
+      BloomType.NONE);
 
     // Now confirm that I can read from the ref to link
     int count = 1;
@@ -288,7 +286,7 @@ public class TestStoreFile extends HBase
 
     // Try to open store file from link
     StoreFile hsfB = new StoreFile(this.fs, pathB, testConf, cacheConf,
-        BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
+      BloomType.NONE);
 
     // Now confirm that I can read from the ref to link
     HFileScanner sB = hsfB.createReader().getScanner(false, false);
@@ -318,10 +316,10 @@ public class TestStoreFile extends HBase
         midRow, null);
     Path bottomPath = splitStoreFile(regionFs, bottomHri, TEST_FAMILY, f, midRow, false);
     // Make readers on top and bottom.
-    StoreFile.Reader top = new StoreFile(this.fs, topPath, conf, cacheConf, BloomType.NONE,
-        NoOpDataBlockEncoder.INSTANCE).createReader();
-    StoreFile.Reader bottom = new StoreFile(this.fs, bottomPath, conf, cacheConf, BloomType.NONE,
-        NoOpDataBlockEncoder.INSTANCE).createReader();
+    StoreFile.Reader top = new StoreFile(
+      this.fs, topPath, conf, cacheConf, BloomType.NONE).createReader();
+    StoreFile.Reader bottom = new StoreFile(
+      this.fs, bottomPath, conf, cacheConf, BloomType.NONE).createReader();
     ByteBuffer previous = null;
     LOG.info("Midkey: " + midKV.toString());
     ByteBuffer bbMidkeyBytes = ByteBuffer.wrap(midkey);
@@ -379,8 +377,7 @@ public class TestStoreFile extends HBase
       
       assertNull(bottomPath);
       
-      top = new StoreFile(this.fs, topPath, conf, cacheConf, BloomType.NONE,
-          NoOpDataBlockEncoder.INSTANCE).createReader();
+      top = new StoreFile(this.fs, topPath, conf, cacheConf, BloomType.NONE).createReader();
       // Now read from the top.
       first = true;
       topScanner = top.getScanner(false, false);
@@ -414,8 +411,8 @@ public class TestStoreFile extends HBase
       topPath = splitStoreFile(regionFs,topHri, TEST_FAMILY, f, badmidkey, true);
       bottomPath = splitStoreFile(regionFs, bottomHri, TEST_FAMILY, f, badmidkey, false);
       assertNull(topPath);
-      bottom = new StoreFile(this.fs, bottomPath, conf, cacheConf, BloomType.NONE,
-          NoOpDataBlockEncoder.INSTANCE).createReader();
+      bottom = new StoreFile(this.fs, bottomPath, conf, cacheConf,
+        BloomType.NONE).createReader();
       first = true;
       bottomScanner = bottom.getScanner(false, false);
       while ((!bottomScanner.isSeeked() && bottomScanner.seekTo()) ||
@@ -461,8 +458,7 @@ public class TestStoreFile extends HBase
     }
     writer.close();
 
-    StoreFile.Reader reader = new StoreFile.Reader(fs, f, cacheConf,
-        DataBlockEncoding.NONE);
+    StoreFile.Reader reader = new StoreFile.Reader(fs, f, cacheConf);
     reader.loadFileInfo();
     reader.loadBloomfilter();
     StoreFileScanner scanner = reader.getStoreFileScanner(false, false);
@@ -543,7 +539,7 @@ public class TestStoreFile extends HBase
     }
     writer.close();
 
-    StoreFile.Reader reader = new StoreFile.Reader(fs, f, cacheConf, DataBlockEncoding.NONE);
+    StoreFile.Reader reader = new StoreFile.Reader(fs, f, cacheConf);
     reader.loadFileInfo();
     reader.loadBloomfilter();
 
@@ -588,7 +584,7 @@ public class TestStoreFile extends HBase
     writeStoreFile(writer);
     writer.close();
 
-    StoreFile.Reader reader = new StoreFile.Reader(fs, f, cacheConf, DataBlockEncoding.NONE);
+    StoreFile.Reader reader = new StoreFile.Reader(fs, f, cacheConf);
 
     // Now do reseek with empty KV to position to the beginning of the file
 
@@ -647,8 +643,7 @@ public class TestStoreFile extends HBase
       }
       writer.close();
 
-      StoreFile.Reader reader = new StoreFile.Reader(fs, f, cacheConf,
-          DataBlockEncoding.NONE);
+      StoreFile.Reader reader = new StoreFile.Reader(fs, f, cacheConf);
       reader.loadFileInfo();
       reader.loadBloomfilter();
       StoreFileScanner scanner = reader.getStoreFileScanner(false, false);
@@ -793,7 +788,7 @@ public class TestStoreFile extends HBase
     writer.close();
 
     StoreFile hsf = new StoreFile(this.fs, writer.getPath(), conf, cacheConf,
-        BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
+      BloomType.NONE);
     StoreFile.Reader reader = hsf.createReader();
     StoreFileScanner scanner = reader.getStoreFileScanner(false, false);
     TreeSet<byte[]> columns = new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
@@ -836,7 +831,7 @@ public class TestStoreFile extends HBase
     Path pathCowOff = new Path(baseDir, "123456789");
     StoreFile.Writer writer = writeStoreFile(conf, cacheConf, pathCowOff, 3);
     StoreFile hsf = new StoreFile(this.fs, writer.getPath(), conf, cacheConf,
-        BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
+      BloomType.NONE);
     LOG.debug(hsf.getPath().toString());
 
     // Read this file, we should see 3 misses
@@ -858,7 +853,7 @@ public class TestStoreFile extends HBase
     Path pathCowOn = new Path(baseDir, "123456788");
     writer = writeStoreFile(conf, cacheConf, pathCowOn, 3);
     hsf = new StoreFile(this.fs, writer.getPath(), conf, cacheConf,
-        BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
+      BloomType.NONE);
 
     // Read this file, we should see 3 hits
     reader = hsf.createReader();
@@ -874,13 +869,13 @@ public class TestStoreFile extends HBase
 
     // Let's read back the two files to ensure the blocks exactly match
     hsf = new StoreFile(this.fs, pathCowOff, conf, cacheConf,
-        BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
+      BloomType.NONE);
     StoreFile.Reader readerOne = hsf.createReader();
     readerOne.loadFileInfo();
     StoreFileScanner scannerOne = readerOne.getStoreFileScanner(true, true);
     scannerOne.seek(KeyValue.LOWESTKEY);
     hsf = new StoreFile(this.fs, pathCowOn, conf, cacheConf,
-        BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
+      BloomType.NONE);
     StoreFile.Reader readerTwo = hsf.createReader();
     readerTwo.loadFileInfo();
     StoreFileScanner scannerTwo = readerTwo.getStoreFileScanner(true, true);
@@ -911,7 +906,7 @@ public class TestStoreFile extends HBase
     conf.setBoolean("hbase.rs.evictblocksonclose", true);
     cacheConf = new CacheConfig(conf);
     hsf = new StoreFile(this.fs, pathCowOff, conf, cacheConf,
-        BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
+      BloomType.NONE);
     reader = hsf.createReader();
     reader.close(cacheConf.shouldEvictOnClose());
 
@@ -925,7 +920,7 @@ public class TestStoreFile extends HBase
     conf.setBoolean("hbase.rs.evictblocksonclose", false);
     cacheConf = new CacheConfig(conf);
     hsf = new StoreFile(this.fs, pathCowOn, conf, cacheConf,
-        BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
+      BloomType.NONE);
     reader = hsf.createReader();
     reader.close(cacheConf.shouldEvictOnClose());
 
@@ -995,14 +990,12 @@ public class TestStoreFile extends HBase
         DataBlockEncoding.FAST_DIFF;
     HFileDataBlockEncoder dataBlockEncoder =
         new HFileDataBlockEncoderImpl(
-            dataBlockEncoderAlgo,
             dataBlockEncoderAlgo);
     cacheConf = new CacheConfig(conf);
     HFileContext meta = new HFileContextBuilder().withBlockSize(StoreFile.DEFAULT_BLOCKSIZE_SMALL)
         .withChecksumType(CKTYPE)
         .withBytesPerCheckSum(CKBYTES)
-        .withDataBlockEncodingInCache(dataBlockEncoderAlgo)
-        .withDataBlockEncodingOnDisk(dataBlockEncoderAlgo)
+        .withDataBlockEncoding(dataBlockEncoderAlgo)
         .build();
     // Make a store file and write data to it.
     StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs)
@@ -1013,7 +1006,7 @@ public class TestStoreFile extends HBase
     writer.close();
 
     StoreFile storeFile = new StoreFile(fs, writer.getPath(), conf,
-        cacheConf, BloomType.NONE, dataBlockEncoder);
+      cacheConf, BloomType.NONE);
     StoreFile.Reader reader = storeFile.createReader();
 
     Map<byte[], byte[]> fileInfo = reader.loadFileInfo();

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java?rev=1541629&r1=1541628&r2=1541629&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java Wed Nov 13 17:31:02 2013
@@ -96,11 +96,6 @@ public class LoadTestTool extends Abstra
   private static final String OPT_COMPRESSION = "compression";
   public static final String OPT_DATA_BLOCK_ENCODING =
       HColumnDescriptor.DATA_BLOCK_ENCODING.toLowerCase();
-  public static final String OPT_ENCODE_IN_CACHE_ONLY =
-      "encode_in_cache_only";
-  public static final String OPT_ENCODE_IN_CACHE_ONLY_USAGE =
-      "If this is specified, data blocks will only be encoded in block " +
-      "cache but not on disk";
 
   public static final String OPT_INMEMORY = "in_memory";
   public static final String OPT_USAGE_IN_MEMORY = "Tries to keep the HFiles of the CF " +
@@ -143,7 +138,6 @@ public class LoadTestTool extends Abstra
 
   // Column family options
   protected DataBlockEncoding dataBlockEncodingAlgo;
-  protected boolean encodeInCacheOnly;
   protected Compression.Algorithm compressAlgo;
   protected BloomType bloomType;
   private boolean inMemoryCF;
@@ -215,7 +209,6 @@ public class LoadTestTool extends Abstra
       }
       if (dataBlockEncodingAlgo != null) {
         columnDesc.setDataBlockEncoding(dataBlockEncodingAlgo);
-        columnDesc.setEncodeOnDisk(!encodeInCacheOnly);
       }
       if (inMemoryCF) {
         columnDesc.setInMemory(inMemoryCF);
@@ -253,7 +246,6 @@ public class LoadTestTool extends Abstra
         "separate puts for every column in a row");
     addOptNoArg(OPT_BATCHUPDATE, "Whether to use batch as opposed to " +
         "separate updates for every column in a row");
-    addOptNoArg(OPT_ENCODE_IN_CACHE_ONLY, OPT_ENCODE_IN_CACHE_ONLY_USAGE);
     addOptNoArg(OPT_INMEMORY, OPT_USAGE_IN_MEMORY);
     addOptNoArg(OPT_USETAGS, OPT_USAGE_USETAG);
     addOptWithArg(OPT_NUM_TAGS,  OPT_USAGE_NUM_TAGS + " The default is 1:1");
@@ -307,7 +299,6 @@ public class LoadTestTool extends Abstra
       System.out.println("Key range: [" + startKey + ".." + (endKey - 1) + "]");
     }
 
-    encodeInCacheOnly = cmd.hasOption(OPT_ENCODE_IN_CACHE_ONLY);
     parseColumnFamilyOptions(cmd);
 
     if (isWrite) {
@@ -381,10 +372,6 @@ public class LoadTestTool extends Abstra
     String dataBlockEncodingStr = cmd.getOptionValue(OPT_DATA_BLOCK_ENCODING);
     dataBlockEncodingAlgo = dataBlockEncodingStr == null ? null :
         DataBlockEncoding.valueOf(dataBlockEncodingStr);
-    if (dataBlockEncodingAlgo == DataBlockEncoding.NONE && encodeInCacheOnly) {
-      throw new IllegalArgumentException("-" + OPT_ENCODE_IN_CACHE_ONLY + " " +
-          "does not make sense when data block encoding is not used");
-    }
 
     String compressStr = cmd.getOptionValue(OPT_COMPRESSION);
     compressAlgo = compressStr == null ? Compression.Algorithm.NONE :

Modified: hbase/trunk/hbase-shell/src/main/ruby/hbase/admin.rb
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-shell/src/main/ruby/hbase/admin.rb?rev=1541629&r1=1541628&r2=1541629&view=diff
==============================================================================
--- hbase/trunk/hbase-shell/src/main/ruby/hbase/admin.rb (original)
+++ hbase/trunk/hbase-shell/src/main/ruby/hbase/admin.rb Wed Nov 13 17:31:02 2013
@@ -614,7 +614,6 @@ module Hbase
       family.setInMemory(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY)
       family.setTimeToLive(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::TTL))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::TTL)
       family.setDataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::DATA_BLOCK_ENCODING))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::DATA_BLOCK_ENCODING)
-      family.setEncodeOnDisk(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::ENCODE_ON_DISK))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::ENCODE_ON_DISK)
       family.setBlocksize(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::BLOCKSIZE))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::BLOCKSIZE)
       family.setMaxVersions(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::VERSIONS))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::VERSIONS)
       family.setMinVersions(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::MIN_VERSIONS))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::MIN_VERSIONS)

Modified: hbase/trunk/src/main/docbkx/shell.xml
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/docbkx/shell.xml?rev=1541629&r1=1541628&r2=1541629&view=diff
==============================================================================
Binary files - no diff available.