You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by te...@apache.org on 2014/04/11 17:55:03 UTC

svn commit: r1586704 - in /hbase/branches/0.96/hbase-server/src: main/java/org/apache/hadoop/hbase/mapreduce/ test/java/org/apache/hadoop/hbase/mapreduce/

Author: tedyu
Date: Fri Apr 11 15:55:03 2014
New Revision: 1586704

URL: http://svn.apache.org/r1586704
Log:
HBASE-10921 Port HBASE-10323 'Auto detect data block encoding in HFileOutputFormat' to 0.96 (Kashif)


Modified:
    hbase/branches/0.96/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java
    hbase/branches/0.96/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
    hbase/branches/0.96/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java
    hbase/branches/0.96/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java

Modified: hbase/branches/0.96/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.96/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java?rev=1586704&r1=1586703&r2=1586704&view=diff
==============================================================================
--- hbase/branches/0.96/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java (original)
+++ hbase/branches/0.96/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java Fri Apr 11 15:55:03 2014
@@ -30,15 +30,20 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
+import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder;
+import org.apache.hadoop.hbase.regionserver.BloomType;
 import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.mapreduce.RecordWriter;
 import org.apache.hadoop.mapreduce.TaskAttemptContext;
 import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
 
+import com.google.common.annotations.VisibleForTesting;
+
 /**
  * Writes HFiles. Passed KeyValues must arrive in order.
  * Writes current time as the sequence id for the file. Sets the major compacted
- * attribute on created hfiles. Calling write(null,null) will forceably roll
+ * attribute on created hfiles. Calling write(null,null) will forcibly roll
  * all HFiles being written.
  * <p>
  * Using this class as part of a MapReduce job is best done
@@ -52,6 +57,13 @@ import org.apache.hadoop.mapreduce.lib.o
 public class HFileOutputFormat extends FileOutputFormat<ImmutableBytesWritable, KeyValue> {
   static Log LOG = LogFactory.getLog(HFileOutputFormat.class);
 
+  // This constant is public since the client can modify this when setting
+  // up their conf object and thus refer to this symbol.
+  // It is present for backwards compatibility reasons. Use it only to
+  // override the auto-detection of datablock encoding.
+  public static final String DATABLOCK_ENCODING_OVERRIDE_CONF_KEY =
+    HFileOutputFormat2.DATABLOCK_ENCODING_OVERRIDE_CONF_KEY;
+
   public RecordWriter<ImmutableBytesWritable, KeyValue> getRecordWriter(final TaskAttemptContext context)
   throws IOException, InterruptedException {
     return HFileOutputFormat2.createRecordWriter(context);
@@ -77,20 +89,57 @@ public class HFileOutputFormat extends F
   }
 
   /**
-   * Run inside the task to deserialize column family to compression algorithm
-   * map from the
-   * configuration.
-   *
-   * Package-private for unit tests only.
+   * Runs inside the task to deserialize column family to compression algorithm
+   * map from the configuration.
    *
-   * @return a map from column family to the name of the configured compression
-   *         algorithm
+   * @param conf to read the serialized values from
+   * @return a map from column family to the configured compression algorithm
    */
-  static Map<byte[], String> createFamilyCompressionMap(Configuration conf) {
+  @VisibleForTesting
+  static Map<byte[], Algorithm> createFamilyCompressionMap(Configuration
+      conf) {
     return HFileOutputFormat2.createFamilyCompressionMap(conf);
   }
 
   /**
+   * Runs inside the task to deserialize column family to bloom filter type
+   * map from the configuration.
+   *
+   * @param conf to read the serialized values from
+   * @return a map from column family to the the configured bloom filter type
+   */
+  @VisibleForTesting
+  static Map<byte[], BloomType> createFamilyBloomTypeMap(Configuration conf) {
+    return HFileOutputFormat2.createFamilyBloomTypeMap(conf);
+  }
+
+  /**
+   * Runs inside the task to deserialize column family to block size
+   * map from the configuration.
+   *
+   * @param conf to read the serialized values from
+   * @return a map from column family to the configured block size
+   */
+  @VisibleForTesting
+  static Map<byte[], Integer> createFamilyBlockSizeMap(Configuration conf) {
+    return HFileOutputFormat2.createFamilyBlockSizeMap(conf);
+  }
+
+  /**
+   * Runs inside the task to deserialize column family to data block encoding
+   * type map from the configuration.
+   *
+   * @param conf to read the serialized values from
+   * @return a map from column family to HFileDataBlockEncoder for the
+   *         configured data block type for the family
+   */
+  @VisibleForTesting
+  static Map<byte[], HFileDataBlockEncoder> createFamilyDataBlockEncodingMap(
+      Configuration conf) {
+    return HFileOutputFormat2.createFamilyDataBlockEncodingMap(conf);
+  }
+
+  /**
    * Configure <code>job</code> with a TotalOrderPartitioner, partitioning against
    * <code>splitPoints</code>. Cleans up the partitions file after job exists.
    */
@@ -103,25 +152,58 @@ public class HFileOutputFormat extends F
    * Serialize column family to compression algorithm map to configuration.
    * Invoked while configuring the MR job for incremental load.
    *
-   * Package-private for unit tests only.
-   *
+   * @param table to read the properties from
+   * @param conf to persist serialized values into
    * @throws IOException
    *           on failure to read column family descriptors
    */
   @edu.umd.cs.findbugs.annotations.SuppressWarnings(
       value="RCN_REDUNDANT_NULLCHECK_OF_NONNULL_VALUE")
+  @VisibleForTesting
   static void configureCompression(HTable table, Configuration conf) throws IOException {
     HFileOutputFormat2.configureCompression(table, conf);
   }
 
   /**
+   * Serialize column family to block size map to configuration.
+   * Invoked while configuring the MR job for incremental load.
+   *
+   * @param table to read the properties from
+   * @param conf to persist serialized values into
+   * @throws IOException
+   *           on failure to read column family descriptors
+   */
+  @VisibleForTesting
+  static void configureBlockSize(HTable table, Configuration conf) throws IOException {
+    HFileOutputFormat2.configureBlockSize(table, conf);
+  }
+
+  /**
    * Serialize column family to bloom type map to configuration.
    * Invoked while configuring the MR job for incremental load.
    *
+   * @param table to read the properties from
+   * @param conf to persist serialized values into
    * @throws IOException
    *           on failure to read column family descriptors
    */
+  @VisibleForTesting
   static void configureBloomType(HTable table, Configuration conf) throws IOException {
     HFileOutputFormat2.configureBloomType(table, conf);
   }
+
+  /**
+   * Serialize column family to data block encoding map to configuration.
+   * Invoked while configuring the MR job for incremental load.
+   *
+   * @param table to read the properties from
+   * @param conf to persist serialized values into
+   * @throws IOException
+   *           on failure to read column family descriptors
+   */
+  @VisibleForTesting
+  static void configureDataBlockEncoding(HTable table,
+      Configuration conf) throws IOException {
+    HFileOutputFormat2.configureDataBlockEncoding(table, conf);
+  }
 }

Modified: hbase/branches/0.96/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.96/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java?rev=1586704&r1=1586703&r2=1586704&view=diff
==============================================================================
--- hbase/branches/0.96/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java (original)
+++ hbase/branches/0.96/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java Fri Apr 11 15:55:03 2014
@@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.client.HT
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.io.compress.Compression;
+import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
 import org.apache.hadoop.hbase.io.hfile.AbstractHFileWriter;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
@@ -68,10 +69,12 @@ import org.apache.hadoop.mapreduce.lib.o
 import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
 import org.apache.hadoop.mapreduce.lib.partition.TotalOrderPartitioner;
 
+import com.google.common.annotations.VisibleForTesting;
+
 /**
  * Writes HFiles. Passed Cells must arrive in order.
  * Writes current time as the sequence id for the file. Sets the major compacted
- * attribute on created hfiles. Calling write(null,null) will forceably roll
+ * attribute on created hfiles. Calling write(null,null) will forcibly roll
  * all HFiles being written.
  * <p>
  * Using this class as part of a MapReduce job is best done
@@ -82,11 +85,26 @@ import org.apache.hadoop.mapreduce.lib.p
 @InterfaceStability.Stable
 public class HFileOutputFormat2 extends FileOutputFormat<ImmutableBytesWritable, Cell> {
   static Log LOG = LogFactory.getLog(HFileOutputFormat2.class);
-  static final String COMPRESSION_CONF_KEY = "hbase.hfileoutputformat.families.compression";
-  private static final String BLOOM_TYPE_CONF_KEY = "hbase.hfileoutputformat.families.bloomtype";
-  private static final String DATABLOCK_ENCODING_CONF_KEY =
-     "hbase.mapreduce.hfileoutputformat.datablock.encoding";
-  private static final String BLOCK_SIZE_CONF_KEY = "hbase.mapreduce.hfileoutputformat.blocksize";
+
+  // The following constants are private since these are used by
+  // HFileOutputFormat2 to internally transfer data between job setup and
+  // reducer run using conf.
+  // These should not be changed by the client.
+  private static final String COMPRESSION_FAMILIES_CONF_KEY =
+      "hbase.hfileoutputformat.families.compression";
+  private static final String BLOOM_TYPE_FAMILIES_CONF_KEY =
+      "hbase.hfileoutputformat.families.bloomtype";
+  private static final String BLOCK_SIZE_FAMILIES_CONF_KEY =
+      "hbase.mapreduce.hfileoutputformat.blocksize";
+  private static final String DATABLOCK_ENCODING_FAMILIES_CONF_KEY =
+      "hbase.mapreduce.hfileoutputformat.families.datablock.encoding";
+
+  // This constant is public since the client can modify this when setting
+  // up their conf object and thus refer to this symbol.
+  // It is present for backwards compatibility reasons. Use it only to
+  // override the auto-detection of datablock encoding.
+  public static final String DATABLOCK_ENCODING_OVERRIDE_CONF_KEY =
+      "hbase.mapreduce.hfileoutputformat.datablock.encoding";
 
   public RecordWriter<ImmutableBytesWritable, Cell> getRecordWriter(final TaskAttemptContext context)
   throws IOException, InterruptedException {
@@ -105,29 +123,26 @@ public class HFileOutputFormat2 extends 
     final long maxsize = conf.getLong(HConstants.HREGION_MAX_FILESIZE,
         HConstants.DEFAULT_MAX_FILE_SIZE);
     // Invented config.  Add to hbase-*.xml if other than default compression.
-    final String defaultCompression = conf.get("hfile.compression",
+    final String defaultCompressionStr = conf.get("hfile.compression",
         Compression.Algorithm.NONE.getName());
+    final Algorithm defaultCompression = AbstractHFileWriter
+        .compressionByName(defaultCompressionStr);
     final boolean compactionExclude = conf.getBoolean(
         "hbase.mapreduce.hfileoutputformat.compaction.exclude", false);
 
     // create a map from column family to the compression algorithm
-    final Map<byte[], String> compressionMap = createFamilyCompressionMap(conf);
-    final Map<byte[], String> bloomTypeMap = createFamilyBloomMap(conf);
-    final Map<byte[], String> blockSizeMap = createFamilyBlockSizeMap(conf);
-
-    String dataBlockEncodingStr = conf.get(DATABLOCK_ENCODING_CONF_KEY);
-    final HFileDataBlockEncoder encoder;
-    if (dataBlockEncodingStr == null) {
-      encoder = NoOpDataBlockEncoder.INSTANCE;
+    final Map<byte[], Algorithm> compressionMap = createFamilyCompressionMap(conf);
+    final Map<byte[], BloomType> bloomTypeMap = createFamilyBloomTypeMap(conf);
+    final Map<byte[], Integer> blockSizeMap = createFamilyBlockSizeMap(conf);
+
+    String dataBlockEncodingStr = conf.get(DATABLOCK_ENCODING_OVERRIDE_CONF_KEY);
+    final Map<byte[], HFileDataBlockEncoder> datablockEncodingMap =
+        createFamilyDataBlockEncodingMap(conf);
+    final HFileDataBlockEncoder overriddenEncoder;
+    if (dataBlockEncodingStr != null) {
+      overriddenEncoder = getDataBlockEncoderFromString(dataBlockEncodingStr);
     } else {
-      try {
-        encoder = new HFileDataBlockEncoderImpl(DataBlockEncoding
-            .valueOf(dataBlockEncodingStr));
-      } catch (IllegalArgumentException ex) {
-        throw new RuntimeException(
-            "Invalid data block encoding type configured for the param "
-                + DATABLOCK_ENCODING_CONF_KEY + " : " + dataBlockEncodingStr);
-      }
+      overriddenEncoder = null;
     }
 
     return new RecordWriter<ImmutableBytesWritable, V>() {
@@ -205,21 +220,20 @@ public class HFileOutputFormat2 extends 
           throws IOException {
         WriterLength wl = new WriterLength();
         Path familydir = new Path(outputdir, Bytes.toString(family));
-        String compression = compressionMap.get(family);
+        Algorithm compression = compressionMap.get(family);
         compression = compression == null ? defaultCompression : compression;
-        String bloomTypeStr = bloomTypeMap.get(family);
-        BloomType bloomType = BloomType.NONE;
-        if (bloomTypeStr != null) {
-          bloomType = BloomType.valueOf(bloomTypeStr);
-        }
-        String blockSizeString = blockSizeMap.get(family);
-        int blockSize = blockSizeString == null ? HConstants.DEFAULT_BLOCKSIZE
-            : Integer.parseInt(blockSizeString);
+        BloomType bloomType = bloomTypeMap.get(family);
+        bloomType = bloomType == null ? BloomType.NONE : bloomType;
+        Integer blockSize = blockSizeMap.get(family);
+        blockSize = blockSize == null ? HConstants.DEFAULT_BLOCKSIZE : blockSize;
+        HFileDataBlockEncoder encoder = overriddenEncoder;
+        encoder = encoder == null ? datablockEncodingMap.get(family) : encoder;
+        encoder = encoder == null ? NoOpDataBlockEncoder.INSTANCE : encoder;
         Configuration tempConf = new Configuration(conf);
         tempConf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.0f);
         wl.writer = new StoreFile.WriterBuilder(conf, new CacheConfig(tempConf), fs, blockSize)
             .withOutputDir(familydir)
-            .withCompression(AbstractHFileWriter.compressionByName(compression))
+            .withCompression(compression)
             .withBloomType(bloomType)
             .withComparator(KeyValue.COMPARATOR)
             .withDataBlockEncoder(encoder)
@@ -374,62 +388,111 @@ public class HFileOutputFormat2 extends 
     configureCompression(table, conf);
     configureBloomType(table, conf);
     configureBlockSize(table, conf);
+    configureDataBlockEncoding(table, conf);
 
     TableMapReduceUtil.addDependencyJars(job);
     TableMapReduceUtil.initCredentials(job);
     LOG.info("Incremental table " + Bytes.toString(table.getTableName()) + " output configured.");
   }
 
-  private static void configureBlockSize(HTable table, Configuration conf) throws IOException {
-    StringBuilder blockSizeConfigValue = new StringBuilder();
-    HTableDescriptor tableDescriptor = table.getTableDescriptor();
-    if(tableDescriptor == null){
-      // could happen with mock table instance
-      return;
-    }
-    Collection<HColumnDescriptor> families = tableDescriptor.getFamilies();
-    int i = 0;
-    for (HColumnDescriptor familyDescriptor : families) {
-      if (i++ > 0) {
-        blockSizeConfigValue.append('&');
-      }
-      blockSizeConfigValue.append(URLEncoder.encode(
-          familyDescriptor.getNameAsString(), "UTF-8"));
-      blockSizeConfigValue.append('=');
-      blockSizeConfigValue.append(URLEncoder.encode(
-          String.valueOf(familyDescriptor.getBlocksize()), "UTF-8"));
+  /**
+   * Runs inside the task to deserialize column family to compression algorithm
+   * map from the configuration.
+   *
+   * @param conf to read the serialized values from
+   * @return a map from column family to the configured compression algorithm
+   */
+  @VisibleForTesting
+  static Map<byte[], Algorithm> createFamilyCompressionMap(Configuration
+      conf) {
+    Map<byte[], String> stringMap = createFamilyConfValueMap(conf,
+        COMPRESSION_FAMILIES_CONF_KEY);
+    Map<byte[], Algorithm> compressionMap = new TreeMap<byte[],
+        Algorithm>(Bytes.BYTES_COMPARATOR);
+    for (Map.Entry<byte[], String> e : stringMap.entrySet()) {
+      Algorithm algorithm = AbstractHFileWriter.compressionByName
+          (e.getValue());
+      compressionMap.put(e.getKey(), algorithm);
     }
-    // Get rid of the last ampersand
-    conf.set(BLOCK_SIZE_CONF_KEY, blockSizeConfigValue.toString());
+    return compressionMap;
   }
 
   /**
-   * Run inside the task to deserialize column family to compression algorithm
-   * map from the
-   * configuration.
+   * Runs inside the task to deserialize column family to bloom filter type
+   * map from the configuration.
    *
-   * Package-private for unit tests only.
+   * @param conf to read the serialized values from
+   * @return a map from column family to the the configured bloom filter type
+   */
+  @VisibleForTesting
+  static Map<byte[], BloomType> createFamilyBloomTypeMap(Configuration conf) {
+    Map<byte[], String> stringMap = createFamilyConfValueMap(conf,
+        BLOOM_TYPE_FAMILIES_CONF_KEY);
+    Map<byte[], BloomType> bloomTypeMap = new TreeMap<byte[],
+        BloomType>(Bytes.BYTES_COMPARATOR);
+    for (Map.Entry<byte[], String> e : stringMap.entrySet()) {
+      BloomType bloomType = BloomType.valueOf(e.getValue());
+      bloomTypeMap.put(e.getKey(), bloomType);
+    }
+    return bloomTypeMap;
+  }
+
+  /**
+   * Runs inside the task to deserialize column family to block size
+   * map from the configuration.
    *
-   * @return a map from column family to the name of the configured compression
-   *         algorithm
+   * @param conf to read the serialized values from
+   * @return a map from column family to the configured block size
    */
-  static Map<byte[], String> createFamilyCompressionMap(Configuration conf) {
-    return createFamilyConfValueMap(conf, COMPRESSION_CONF_KEY);
+  @VisibleForTesting
+  static Map<byte[], Integer> createFamilyBlockSizeMap(Configuration conf) {
+    Map<byte[], String> stringMap = createFamilyConfValueMap(conf,
+        BLOCK_SIZE_FAMILIES_CONF_KEY);
+    Map<byte[], Integer> blockSizeMap = new TreeMap<byte[],
+        Integer>(Bytes.BYTES_COMPARATOR);
+    for (Map.Entry<byte[], String> e : stringMap.entrySet()) {
+      Integer blockSize = Integer.parseInt(e.getValue());
+      blockSizeMap.put(e.getKey(), blockSize);
+    }
+    return blockSizeMap;
   }
 
-  private static Map<byte[], String> createFamilyBloomMap(Configuration conf) {
-    return createFamilyConfValueMap(conf, BLOOM_TYPE_CONF_KEY);
+  /**
+   * Runs inside the task to deserialize column family to data block encoding type map from the
+   * configuration.
+   * 
+   * @param conf to read the serialized values from
+   * @return a map from column family to HFileDataBlockEncoder for the configured data block type
+   *         for the family
+   */
+  @VisibleForTesting
+  static Map<byte[], HFileDataBlockEncoder> createFamilyDataBlockEncodingMap(Configuration conf) {
+    Map<byte[], String> stringMap =
+        createFamilyConfValueMap(conf, DATABLOCK_ENCODING_FAMILIES_CONF_KEY);
+    Map<byte[], HFileDataBlockEncoder> encoderMap =
+        new TreeMap<byte[], HFileDataBlockEncoder>(Bytes.BYTES_COMPARATOR);
+    for (Map.Entry<byte[], String> e : stringMap.entrySet()) {
+      encoderMap.put(e.getKey(), getDataBlockEncoderFromString(e.getValue()));
+    }
+    return encoderMap;
   }
 
-  private static Map<byte[], String> createFamilyBlockSizeMap(Configuration conf) {
-    return createFamilyConfValueMap(conf, BLOCK_SIZE_CONF_KEY);
+  private static HFileDataBlockEncoder getDataBlockEncoderFromString(String dataBlockEncodingStr) {
+    HFileDataBlockEncoder encoder;
+    try {
+      encoder = new HFileDataBlockEncoderImpl(DataBlockEncoding.valueOf(dataBlockEncodingStr));
+    } catch (IllegalArgumentException ex) {
+      throw new RuntimeException("Invalid data block encoding type configured for the param "
+          + DATABLOCK_ENCODING_FAMILIES_CONF_KEY + " : " + dataBlockEncodingStr);
+    }
+    return encoder;
   }
 
   /**
    * Run inside the task to deserialize column family to given conf value map.
    *
-   * @param conf
-   * @param confName
+   * @param conf to read the serialized values from
+   * @param confName conf key to read from the configuration
    * @return a map of column family to the given configuration value
    */
   private static Map<byte[], String> createFamilyConfValueMap(Configuration conf, String confName) {
@@ -474,13 +537,14 @@ public class HFileOutputFormat2 extends 
    * Serialize column family to compression algorithm map to configuration.
    * Invoked while configuring the MR job for incremental load.
    *
-   * Package-private for unit tests only.
-   *
+   * @param table to read the properties from
+   * @param conf to persist serialized values into
    * @throws IOException
    *           on failure to read column family descriptors
    */
   @edu.umd.cs.findbugs.annotations.SuppressWarnings(
       value="RCN_REDUNDANT_NULLCHECK_OF_NONNULL_VALUE")
+  @VisibleForTesting
   static void configureCompression(HTable table, Configuration conf) throws IOException {
     StringBuilder compressionConfigValue = new StringBuilder();
     HTableDescriptor tableDescriptor = table.getTableDescriptor();
@@ -499,16 +563,53 @@ public class HFileOutputFormat2 extends 
       compressionConfigValue.append(URLEncoder.encode(familyDescriptor.getCompression().getName(), "UTF-8"));
     }
     // Get rid of the last ampersand
-    conf.set(COMPRESSION_CONF_KEY, compressionConfigValue.toString());
+    conf.set(COMPRESSION_FAMILIES_CONF_KEY, compressionConfigValue.toString());
+  }
+
+  /**
+   * Serialize column family to block size map to configuration.
+   * Invoked while configuring the MR job for incremental load.
+   *
+   * @param table to read the properties from
+   * @param conf to persist serialized values into
+   * @throws IOException
+   *           on failure to read column family descriptors
+   */
+  @VisibleForTesting
+  static void configureBlockSize(
+      HTable table, Configuration conf) throws IOException {
+    StringBuilder blockSizeConfigValue = new StringBuilder();
+    HTableDescriptor tableDescriptor = table.getTableDescriptor();
+    if (tableDescriptor == null) {
+      // could happen with mock table instance
+      return;
+    }
+    Collection<HColumnDescriptor> families = tableDescriptor.getFamilies();
+    int i = 0;
+    for (HColumnDescriptor familyDescriptor : families) {
+      if (i++ > 0) {
+        blockSizeConfigValue.append('&');
+      }
+      blockSizeConfigValue.append(URLEncoder.encode(
+          familyDescriptor.getNameAsString(), "UTF-8"));
+      blockSizeConfigValue.append('=');
+      blockSizeConfigValue.append(URLEncoder.encode(
+          String.valueOf(familyDescriptor.getBlocksize()), "UTF-8"));
+    }
+    // Get rid of the last ampersand
+    conf.set(BLOCK_SIZE_FAMILIES_CONF_KEY, blockSizeConfigValue.toString());
   }
 
   /**
    * Serialize column family to bloom type map to configuration.
    * Invoked while configuring the MR job for incremental load.
    *
+   * @param table to read the properties from
+   * @param conf to persist serialized values into
    * @throws IOException
    *           on failure to read column family descriptors
    */
+  @VisibleForTesting
   static void configureBloomType(HTable table, Configuration conf) throws IOException {
     HTableDescriptor tableDescriptor = table.getTableDescriptor();
     if (tableDescriptor == null) {
@@ -530,6 +631,44 @@ public class HFileOutputFormat2 extends 
       }
       bloomTypeConfigValue.append(URLEncoder.encode(bloomType, "UTF-8"));
     }
-    conf.set(BLOOM_TYPE_CONF_KEY, bloomTypeConfigValue.toString());
+    conf.set(BLOOM_TYPE_FAMILIES_CONF_KEY, bloomTypeConfigValue.toString());
+  }
+
+  /**
+   * Serialize column family to data block encoding map to configuration.
+   * Invoked while configuring the MR job for incremental load.
+   *
+   * @param table to read the properties from
+   * @param conf to persist serialized values into
+   * @throws IOException
+   *           on failure to read column family descriptors
+   */
+  @VisibleForTesting
+  static void configureDataBlockEncoding(HTable table,
+      Configuration conf) throws IOException {
+    HTableDescriptor tableDescriptor = table.getTableDescriptor();
+    if (tableDescriptor == null) {
+      // could happen with mock table instance
+      return;
+    }
+    StringBuilder dataBlockEncodingConfigValue = new StringBuilder();
+    Collection<HColumnDescriptor> families = tableDescriptor.getFamilies();
+    int i = 0;
+    for (HColumnDescriptor familyDescriptor : families) {
+      if (i++ > 0) {
+        dataBlockEncodingConfigValue.append('&');
+      }
+      dataBlockEncodingConfigValue.append(
+          URLEncoder.encode(familyDescriptor.getNameAsString(), "UTF-8"));
+      dataBlockEncodingConfigValue.append('=');
+      DataBlockEncoding encoding = familyDescriptor.getDataBlockEncoding();
+      if (encoding == null) {
+        encoding = DataBlockEncoding.NONE;
+      }
+      dataBlockEncodingConfigValue.append(URLEncoder.encode(encoding.toString(),
+          "UTF-8"));
+    }
+    conf.set(DATABLOCK_ENCODING_FAMILIES_CONF_KEY,
+        dataBlockEncodingConfigValue.toString());
   }
 }

Modified: hbase/branches/0.96/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.96/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java?rev=1586704&r1=1586703&r2=1586704&view=diff
==============================================================================
--- hbase/branches/0.96/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java (original)
+++ hbase/branches/0.96/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java Fri Apr 11 15:55:03 2014
@@ -66,10 +66,14 @@ import org.apache.hadoop.hbase.client.Sc
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.io.compress.Compression;
 import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
+import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.HFile;
+import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder;
 import org.apache.hadoop.hbase.io.hfile.HFile.Reader;
+import org.apache.hadoop.hbase.regionserver.BloomType;
 import org.apache.hadoop.hbase.regionserver.HStore;
+import org.apache.hadoop.hbase.regionserver.StoreFile;
 import org.apache.hadoop.hbase.regionserver.TimeRangeTracker;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
@@ -474,36 +478,40 @@ public class TestHFileOutputFormat  {
   }
 
   /**
-   * Test for
-   * {@link HFileOutputFormat#createFamilyCompressionMap(Configuration)}. Tests
-   * that the compression map is correctly deserialized from configuration
+   * Test for {@link HFileOutputFormat#configureCompression(HTable,
+   * Configuration)} and {@link HFileOutputFormat#createFamilyCompressionMap
+   * (Configuration)}.
+   * Tests that the compression map is correctly serialized into
+   * and deserialized from configuration
    *
    * @throws IOException
    */
   @Test
-  public void testCreateFamilyCompressionMap() throws IOException {
+  public void testSerializeDeserializeFamilyCompressionMap() throws IOException {
     for (int numCfs = 0; numCfs <= 3; numCfs++) {
       Configuration conf = new Configuration(this.util.getConfiguration());
-      Map<String, Compression.Algorithm> familyToCompression = getMockColumnFamilies(numCfs);
+      Map<String, Compression.Algorithm> familyToCompression =
+          getMockColumnFamiliesForCompression(numCfs);
       HTable table = Mockito.mock(HTable.class);
-      setupMockColumnFamilies(table, familyToCompression);
+      setupMockColumnFamiliesForCompression(table, familyToCompression);
       HFileOutputFormat.configureCompression(table, conf);
 
       // read back family specific compression setting from the configuration
-      Map<byte[], String> retrievedFamilyToCompressionMap = HFileOutputFormat.createFamilyCompressionMap(conf);
+      Map<byte[], Algorithm> retrievedFamilyToCompressionMap = HFileOutputFormat
+          .createFamilyCompressionMap(conf);
 
       // test that we have a value for all column families that matches with the
       // used mock values
       for (Entry<String, Algorithm> entry : familyToCompression.entrySet()) {
-        assertEquals("Compression configuration incorrect for column family:" + entry.getKey(), entry.getValue()
-                     .getName(), retrievedFamilyToCompressionMap.get(entry.getKey().getBytes()));
+        assertEquals("Compression configuration incorrect for column family:"
+            + entry.getKey(), entry.getValue(),
+            retrievedFamilyToCompressionMap.get(entry.getKey().getBytes()));
       }
     }
   }
 
-  private void setupMockColumnFamilies(HTable table,
-    Map<String, Compression.Algorithm> familyToCompression) throws IOException
-  {
+  private void setupMockColumnFamiliesForCompression(HTable table,
+      Map<String, Compression.Algorithm> familyToCompression) throws IOException {
     HTableDescriptor mockTableDescriptor = new HTableDescriptor(TABLE_NAME);
     for (Entry<String, Compression.Algorithm> entry : familyToCompression.entrySet()) {
       mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey())
@@ -515,21 +523,12 @@ public class TestHFileOutputFormat  {
     Mockito.doReturn(mockTableDescriptor).when(table).getTableDescriptor();
   }
 
-  private void setupMockStartKeys(HTable table) throws IOException {
-    byte[][] mockKeys = new byte[][] {
-        HConstants.EMPTY_BYTE_ARRAY,
-        Bytes.toBytes("aaa"),
-        Bytes.toBytes("ggg"),
-        Bytes.toBytes("zzz")
-    };
-    Mockito.doReturn(mockKeys).when(table).getStartKeys();
-  }
-
   /**
    * @return a map from column family names to compression algorithms for
    *         testing column family compression. Column family names have special characters
    */
-  private Map<String, Compression.Algorithm> getMockColumnFamilies(int numCfs) {
+  private Map<String, Compression.Algorithm>
+      getMockColumnFamiliesForCompression (int numCfs) {
     Map<String, Compression.Algorithm> familyToCompression = new HashMap<String, Compression.Algorithm>();
     // use column family names having special characters
     if (numCfs-- > 0) {
@@ -548,6 +547,238 @@ public class TestHFileOutputFormat  {
   }
 
   /**
+   * Test for {@link HFileOutputFormat#configureBloomType(HTable,
+   * Configuration)} and {@link HFileOutputFormat#createFamilyBloomTypeMap
+   * (Configuration)}.
+   * Tests that the compression map is correctly serialized into
+   * and deserialized from configuration
+   *
+   * @throws IOException
+   */
+  @Test
+  public void testSerializeDeserializeFamilyBloomTypeMap() throws IOException {
+    for (int numCfs = 0; numCfs <= 2; numCfs++) {
+      Configuration conf = new Configuration(this.util.getConfiguration());
+      Map<String, BloomType> familyToBloomType =
+          getMockColumnFamiliesForBloomType(numCfs);
+      HTable table = Mockito.mock(HTable.class);
+      setupMockColumnFamiliesForBloomType(table,
+          familyToBloomType);
+      HFileOutputFormat.configureBloomType(table, conf);
+
+      // read back family specific data block encoding settings from the
+      // configuration
+      Map<byte[], BloomType> retrievedFamilyToBloomTypeMap =
+          HFileOutputFormat
+              .createFamilyBloomTypeMap(conf);
+
+      // test that we have a value for all column families that matches with the
+      // used mock values
+      for (Entry<String, BloomType> entry : familyToBloomType.entrySet()) {
+        assertEquals("BloomType configuration incorrect for column family:"
+            + entry.getKey(), entry.getValue(),
+            retrievedFamilyToBloomTypeMap.get(entry.getKey().getBytes()));
+      }
+    }
+  }
+
+  private void setupMockColumnFamiliesForBloomType(HTable table,
+      Map<String, BloomType> familyToDataBlockEncoding) throws IOException {
+    HTableDescriptor mockTableDescriptor = new HTableDescriptor(TABLE_NAME);
+    for (Entry<String, BloomType> entry : familyToDataBlockEncoding.entrySet()) {
+      mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey())
+          .setMaxVersions(1)
+          .setBloomFilterType(entry.getValue())
+          .setBlockCacheEnabled(false)
+          .setTimeToLive(0));
+    }
+    Mockito.doReturn(mockTableDescriptor).when(table).getTableDescriptor();
+  }
+
+  /**
+   * @return a map from column family names to compression algorithms for
+   *         testing column family compression. Column family names have special characters
+   */
+  private Map<String, BloomType>
+  getMockColumnFamiliesForBloomType (int numCfs) {
+    Map<String, BloomType> familyToBloomType =
+        new HashMap<String, BloomType>();
+    // use column family names having special characters
+    if (numCfs-- > 0) {
+      familyToBloomType.put("Family1!@#!@#&", BloomType.ROW);
+    }
+    if (numCfs-- > 0) {
+      familyToBloomType.put("Family2=asdads&!AASD",
+          BloomType.ROWCOL);
+    }
+    if (numCfs-- > 0) {
+      familyToBloomType.put("Family3", BloomType.NONE);
+    }
+    return familyToBloomType;
+  }
+
+  /**
+   * Test for {@link HFileOutputFormat#configureBlockSize(HTable,
+   * Configuration)} and {@link HFileOutputFormat#createFamilyBlockSizeMap
+   * (Configuration)}.
+   * Tests that the compression map is correctly serialized into
+   * and deserialized from configuration
+   *
+   * @throws IOException
+   */
+  @Test
+  public void testSerializeDeserializeFamilyBlockSizeMap() throws IOException {
+    for (int numCfs = 0; numCfs <= 3; numCfs++) {
+      Configuration conf = new Configuration(this.util.getConfiguration());
+      Map<String, Integer> familyToBlockSize =
+          getMockColumnFamiliesForBlockSize(numCfs);
+      HTable table = Mockito.mock(HTable.class);
+      setupMockColumnFamiliesForBlockSize(table,
+          familyToBlockSize);
+      HFileOutputFormat.configureBlockSize(table, conf);
+
+      // read back family specific data block encoding settings from the
+      // configuration
+      Map<byte[], Integer> retrievedFamilyToBlockSizeMap =
+          HFileOutputFormat
+              .createFamilyBlockSizeMap(conf);
+
+      // test that we have a value for all column families that matches with the
+      // used mock values
+      for (Entry<String, Integer> entry : familyToBlockSize.entrySet()
+          ) {
+        assertEquals("BlockSize configuration incorrect for column family:"
+            + entry.getKey(), entry.getValue(),
+            retrievedFamilyToBlockSizeMap.get(entry.getKey().getBytes()));
+      }
+    }
+  }
+
+  private void setupMockColumnFamiliesForBlockSize(HTable table,
+      Map<String, Integer> familyToDataBlockEncoding) throws IOException {
+    HTableDescriptor mockTableDescriptor = new HTableDescriptor(TABLE_NAME);
+    for (Entry<String, Integer> entry : familyToDataBlockEncoding.entrySet()) {
+      mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey())
+          .setMaxVersions(1)
+          .setBlocksize(entry.getValue())
+          .setBlockCacheEnabled(false)
+          .setTimeToLive(0));
+    }
+    Mockito.doReturn(mockTableDescriptor).when(table).getTableDescriptor();
+  }
+
+  /**
+   * @return a map from column family names to compression algorithms for
+   *         testing column family compression. Column family names have special characters
+   */
+  private Map<String, Integer>
+  getMockColumnFamiliesForBlockSize (int numCfs) {
+    Map<String, Integer> familyToBlockSize =
+        new HashMap<String, Integer>();
+    // use column family names having special characters
+    if (numCfs-- > 0) {
+      familyToBlockSize.put("Family1!@#!@#&", 1234);
+    }
+    if (numCfs-- > 0) {
+      familyToBlockSize.put("Family2=asdads&!AASD",
+          Integer.MAX_VALUE);
+    }
+    if (numCfs-- > 0) {
+      familyToBlockSize.put("Family2=asdads&!AASD",
+          Integer.MAX_VALUE);
+    }
+    if (numCfs-- > 0) {
+      familyToBlockSize.put("Family3", 0);
+    }
+    return familyToBlockSize;
+  }
+
+  /**
+   * Test for {@link HFileOutputFormat#configureDataBlockEncoding(HTable,
+   * Configuration)} and {@link HFileOutputFormat#createFamilyDataBlockEncodingMap
+   * (Configuration)}.
+   * Tests that the compression map is correctly serialized into
+   * and deserialized from configuration
+   *
+   * @throws IOException
+   */
+  @Test
+  public void testSerializeDeserializeFamilyDataBlockEncodingMap() throws IOException {
+    for (int numCfs = 0; numCfs <= 3; numCfs++) {
+      Configuration conf = new Configuration(this.util.getConfiguration());
+      Map<String, DataBlockEncoding> familyToDataBlockEncoding =
+          getMockColumnFamiliesForDataBlockEncoding(numCfs);
+      HTable table = Mockito.mock(HTable.class);
+      setupMockColumnFamiliesForDataBlockEncoding(table,
+          familyToDataBlockEncoding);
+      HFileOutputFormat.configureDataBlockEncoding(table, conf);
+
+      // read back family specific data block encoding settings from the configuration
+      Map<byte[], HFileDataBlockEncoder> retrievedFamilyToDataBlockEncodingMap =
+          HFileOutputFormat
+              .createFamilyDataBlockEncodingMap(conf);
+
+      // test that we have a value for all column families that matches with the
+      // used mock values
+      for (Entry<String, DataBlockEncoding> entry : familyToDataBlockEncoding.entrySet()) {
+        assertEquals("DataBlockEncoding configuration incorrect for column family:"
+            + entry.getKey(), entry.getValue(),
+            retrievedFamilyToDataBlockEncodingMap.get(entry.getKey().getBytes
+                ()).getDataBlockEncoding());
+      }
+    }
+  }
+
+  private void setupMockColumnFamiliesForDataBlockEncoding(HTable table,
+      Map<String, DataBlockEncoding> familyToDataBlockEncoding) throws IOException {
+    HTableDescriptor mockTableDescriptor = new HTableDescriptor(TABLE_NAME);
+    for (Entry<String, DataBlockEncoding> entry : familyToDataBlockEncoding.entrySet()) {
+      mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey())
+          .setMaxVersions(1)
+          .setDataBlockEncoding(entry.getValue())
+          .setBlockCacheEnabled(false)
+          .setTimeToLive(0));
+    }
+    Mockito.doReturn(mockTableDescriptor).when(table).getTableDescriptor();
+  }
+
+  /**
+   * @return a map from column family names to compression algorithms for
+   *         testing column family compression. Column family names have special characters
+   */
+  private Map<String, DataBlockEncoding>
+      getMockColumnFamiliesForDataBlockEncoding (int numCfs) {
+    Map<String, DataBlockEncoding> familyToDataBlockEncoding =
+        new HashMap<String, DataBlockEncoding>();
+    // use column family names having special characters
+    if (numCfs-- > 0) {
+      familyToDataBlockEncoding.put("Family1!@#!@#&", DataBlockEncoding.DIFF);
+    }
+    if (numCfs-- > 0) {
+      familyToDataBlockEncoding.put("Family2=asdads&!AASD",
+          DataBlockEncoding.FAST_DIFF);
+    }
+    if (numCfs-- > 0) {
+      familyToDataBlockEncoding.put("Family2=asdads&!AASD",
+          DataBlockEncoding.PREFIX);
+    }
+    if (numCfs-- > 0) {
+      familyToDataBlockEncoding.put("Family3", DataBlockEncoding.NONE);
+    }
+    return familyToDataBlockEncoding;
+  }
+
+  private void setupMockStartKeys(HTable table) throws IOException {
+    byte[][] mockKeys = new byte[][] {
+        HConstants.EMPTY_BYTE_ARRAY,
+        Bytes.toBytes("aaa"),
+        Bytes.toBytes("ggg"),
+        Bytes.toBytes("zzz")
+    };
+    Mockito.doReturn(mockKeys).when(table).getStartKeys();
+  }
+
+  /**
    * Test that {@link HFileOutputFormat} RecordWriter uses compression settings
    * from the column family descriptor
    */
@@ -570,7 +801,7 @@ public class TestHFileOutputFormat  {
       configuredCompression.put(Bytes.toString(family),
                                 supportedAlgos[familyIndex++ % supportedAlgos.length]);
     }
-    setupMockColumnFamilies(table, configuredCompression);
+    setupMockColumnFamiliesForCompression(table, configuredCompression);
 
     // set up the table to return some mock keys
     setupMockStartKeys(table);

Modified: hbase/branches/0.96/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.96/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java?rev=1586704&r1=1586703&r2=1586704&view=diff
==============================================================================
--- hbase/branches/0.96/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java (original)
+++ hbase/branches/0.96/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java Fri Apr 11 15:55:03 2014
@@ -62,10 +62,14 @@ import org.apache.hadoop.hbase.client.Sc
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.io.compress.Compression;
 import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
+import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.HFile;
+import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder;
 import org.apache.hadoop.hbase.io.hfile.HFile.Reader;
+import org.apache.hadoop.hbase.regionserver.BloomType;
 import org.apache.hadoop.hbase.regionserver.HStore;
+import org.apache.hadoop.hbase.regionserver.StoreFile;
 import org.apache.hadoop.hbase.regionserver.TimeRangeTracker;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
@@ -472,36 +476,40 @@ public class TestHFileOutputFormat2  {
   }
 
   /**
-   * Test for
-   * {@link HFileOutputFormat2#createFamilyCompressionMap(Configuration)}. Tests
-   * that the compression map is correctly deserialized from configuration
+   * Test for {@link HFileOutputFormat2#configureCompression(HTable,
+   * Configuration)} and {@link HFileOutputFormat2#createFamilyCompressionMap
+   * (Configuration)}.
+   * Tests that the compression map is correctly serialized into
+   * and deserialized from configuration
    *
    * @throws IOException
    */
   @Test
-  public void testCreateFamilyCompressionMap() throws IOException {
+  public void testSerializeDeserializeFamilyCompressionMap() throws IOException {
     for (int numCfs = 0; numCfs <= 3; numCfs++) {
       Configuration conf = new Configuration(this.util.getConfiguration());
-      Map<String, Compression.Algorithm> familyToCompression = getMockColumnFamilies(numCfs);
+      Map<String, Compression.Algorithm> familyToCompression =
+          getMockColumnFamiliesForCompression(numCfs);
       HTable table = Mockito.mock(HTable.class);
-      setupMockColumnFamilies(table, familyToCompression);
+      setupMockColumnFamiliesForCompression(table, familyToCompression);
       HFileOutputFormat2.configureCompression(table, conf);
 
       // read back family specific compression setting from the configuration
-      Map<byte[], String> retrievedFamilyToCompressionMap = HFileOutputFormat2.createFamilyCompressionMap(conf);
+      Map<byte[], Algorithm> retrievedFamilyToCompressionMap = HFileOutputFormat2
+          .createFamilyCompressionMap(conf);
 
       // test that we have a value for all column families that matches with the
       // used mock values
       for (Entry<String, Algorithm> entry : familyToCompression.entrySet()) {
-        assertEquals("Compression configuration incorrect for column family:" + entry.getKey(), entry.getValue()
-                     .getName(), retrievedFamilyToCompressionMap.get(entry.getKey().getBytes()));
+        assertEquals("Compression configuration incorrect for column family:"
+            + entry.getKey(), entry.getValue(),
+            retrievedFamilyToCompressionMap.get(entry.getKey().getBytes()));
       }
     }
   }
 
-  private void setupMockColumnFamilies(HTable table,
-    Map<String, Compression.Algorithm> familyToCompression) throws IOException
-  {
+  private void setupMockColumnFamiliesForCompression(HTable table,
+      Map<String, Compression.Algorithm> familyToCompression) throws IOException {
     HTableDescriptor mockTableDescriptor = new HTableDescriptor(TABLE_NAME);
     for (Entry<String, Compression.Algorithm> entry : familyToCompression.entrySet()) {
       mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey())
@@ -513,22 +521,14 @@ public class TestHFileOutputFormat2  {
     Mockito.doReturn(mockTableDescriptor).when(table).getTableDescriptor();
   }
 
-  private void setupMockStartKeys(HTable table) throws IOException {
-    byte[][] mockKeys = new byte[][] {
-        HConstants.EMPTY_BYTE_ARRAY,
-        Bytes.toBytes("aaa"),
-        Bytes.toBytes("ggg"),
-        Bytes.toBytes("zzz")
-    };
-    Mockito.doReturn(mockKeys).when(table).getStartKeys();
-  }
-
   /**
    * @return a map from column family names to compression algorithms for
    *         testing column family compression. Column family names have special characters
    */
-  private Map<String, Compression.Algorithm> getMockColumnFamilies(int numCfs) {
-    Map<String, Compression.Algorithm> familyToCompression = new HashMap<String, Compression.Algorithm>();
+  private Map<String, Compression.Algorithm>
+      getMockColumnFamiliesForCompression (int numCfs) {
+    Map<String, Compression.Algorithm> familyToCompression
+      = new HashMap<String, Compression.Algorithm>();
     // use column family names having special characters
     if (numCfs-- > 0) {
       familyToCompression.put("Family1!@#!@#&", Compression.Algorithm.LZO);
@@ -546,6 +546,239 @@ public class TestHFileOutputFormat2  {
   }
 
   /**
+   * Test for {@link HFileOutputFormat2#configureBloomType(HTable,
+   * Configuration)} and {@link HFileOutputFormat2#createFamilyBloomTypeMap
+   * (Configuration)}.
+   * Tests that the compression map is correctly serialized into
+   * and deserialized from configuration
+   *
+   * @throws IOException
+   */
+  @Test
+  public void testSerializeDeserializeFamilyBloomTypeMap() throws IOException {
+    for (int numCfs = 0; numCfs <= 2; numCfs++) {
+      Configuration conf = new Configuration(this.util.getConfiguration());
+      Map<String, BloomType> familyToBloomType =
+          getMockColumnFamiliesForBloomType(numCfs);
+      HTable table = Mockito.mock(HTable.class);
+      setupMockColumnFamiliesForBloomType(table,
+          familyToBloomType);
+      HFileOutputFormat2.configureBloomType(table, conf);
+
+      // read back family specific data block encoding settings from the
+      // configuration
+      Map<byte[], BloomType> retrievedFamilyToBloomTypeMap =
+          HFileOutputFormat2
+              .createFamilyBloomTypeMap(conf);
+
+      // test that we have a value for all column families that matches with the
+      // used mock values
+      for (Entry<String, BloomType> entry : familyToBloomType.entrySet()) {
+        assertEquals("BloomType configuration incorrect for column family:"
+            + entry.getKey(), entry.getValue(),
+            retrievedFamilyToBloomTypeMap.get(entry.getKey().getBytes()));
+      }
+    }
+  }
+
+  private void setupMockColumnFamiliesForBloomType(HTable table,
+      Map<String, BloomType> familyToDataBlockEncoding) throws IOException {
+    HTableDescriptor mockTableDescriptor = new HTableDescriptor(TABLE_NAME);
+    for (Entry<String, BloomType> entry : familyToDataBlockEncoding.entrySet()) {
+      mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey())
+          .setMaxVersions(1)
+          .setBloomFilterType(entry.getValue())
+          .setBlockCacheEnabled(false)
+          .setTimeToLive(0));
+    }
+    Mockito.doReturn(mockTableDescriptor).when(table).getTableDescriptor();
+  }
+
+  /**
+   * @return a map from column family names to compression algorithms for
+   *         testing column family compression. Column family names have special characters
+   */
+  private Map<String, BloomType>
+  getMockColumnFamiliesForBloomType (int numCfs) {
+    Map<String, BloomType> familyToBloomType =
+        new HashMap<String, BloomType>();
+    // use column family names having special characters
+    if (numCfs-- > 0) {
+      familyToBloomType.put("Family1!@#!@#&", BloomType.ROW);
+    }
+    if (numCfs-- > 0) {
+      familyToBloomType.put("Family2=asdads&!AASD",
+          BloomType.ROWCOL);
+    }
+    if (numCfs-- > 0) {
+      familyToBloomType.put("Family3", BloomType.NONE);
+    }
+    return familyToBloomType;
+  }
+
+  /**
+   * Test for {@link HFileOutputFormat2#configureBlockSize(HTable,
+   * Configuration)} and {@link HFileOutputFormat2#createFamilyBlockSizeMap
+   * (Configuration)}.
+   * Tests that the compression map is correctly serialized into
+   * and deserialized from configuration
+   *
+   * @throws IOException
+   */
+  @Test
+  public void testSerializeDeserializeFamilyBlockSizeMap() throws IOException {
+    for (int numCfs = 0; numCfs <= 3; numCfs++) {
+      Configuration conf = new Configuration(this.util.getConfiguration());
+      Map<String, Integer> familyToBlockSize =
+          getMockColumnFamiliesForBlockSize(numCfs);
+      HTable table = Mockito.mock(HTable.class);
+      setupMockColumnFamiliesForBlockSize(table,
+          familyToBlockSize);
+      HFileOutputFormat2.configureBlockSize(table, conf);
+
+      // read back family specific data block encoding settings from the
+      // configuration
+      Map<byte[], Integer> retrievedFamilyToBlockSizeMap =
+          HFileOutputFormat2
+              .createFamilyBlockSizeMap(conf);
+
+      // test that we have a value for all column families that matches with the
+      // used mock values
+      for (Entry<String, Integer> entry : familyToBlockSize.entrySet()
+          ) {
+        assertEquals("BlockSize configuration incorrect for column family:"
+            + entry.getKey(), entry.getValue(),
+            retrievedFamilyToBlockSizeMap.get(entry.getKey().getBytes()));
+      }
+    }
+  }
+
+  private void setupMockColumnFamiliesForBlockSize(HTable table,
+      Map<String, Integer> familyToDataBlockEncoding) throws IOException {
+    HTableDescriptor mockTableDescriptor = new HTableDescriptor(TABLE_NAME);
+    for (Entry<String, Integer> entry : familyToDataBlockEncoding.entrySet()) {
+      mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey())
+          .setMaxVersions(1)
+          .setBlocksize(entry.getValue())
+          .setBlockCacheEnabled(false)
+          .setTimeToLive(0));
+    }
+    Mockito.doReturn(mockTableDescriptor).when(table).getTableDescriptor();
+  }
+
+  /**
+   * @return a map from column family names to compression algorithms for
+   *         testing column family compression. Column family names have special characters
+   */
+  private Map<String, Integer>
+  getMockColumnFamiliesForBlockSize (int numCfs) {
+    Map<String, Integer> familyToBlockSize =
+        new HashMap<String, Integer>();
+    // use column family names having special characters
+    if (numCfs-- > 0) {
+      familyToBlockSize.put("Family1!@#!@#&", 1234);
+    }
+    if (numCfs-- > 0) {
+      familyToBlockSize.put("Family2=asdads&!AASD",
+          Integer.MAX_VALUE);
+    }
+    if (numCfs-- > 0) {
+      familyToBlockSize.put("Family2=asdads&!AASD",
+          Integer.MAX_VALUE);
+    }
+    if (numCfs-- > 0) {
+      familyToBlockSize.put("Family3", 0);
+    }
+    return familyToBlockSize;
+  }
+
+    /**
+   * Test for {@link HFileOutputFormat2#configureDataBlockEncoding(HTable,
+   * Configuration)} and {@link HFileOutputFormat2#createFamilyDataBlockEncodingMap
+   * (Configuration)}.
+   * Tests that the compression map is correctly serialized into
+   * and deserialized from configuration
+   *
+   * @throws IOException
+   */
+  @Test
+  public void testSerializeDeserializeFamilyDataBlockEncodingMap() throws IOException {
+    for (int numCfs = 0; numCfs <= 3; numCfs++) {
+      Configuration conf = new Configuration(this.util.getConfiguration());
+      Map<String, DataBlockEncoding> familyToDataBlockEncoding =
+        getMockColumnFamiliesForDataBlockEncoding(numCfs);
+      HTable table = Mockito.mock(HTable.class);
+      setupMockColumnFamiliesForDataBlockEncoding(table,
+          familyToDataBlockEncoding);
+      HFileOutputFormat2.configureDataBlockEncoding(table, conf);
+
+      // read back family specific data block encoding settings from the
+      // configuration
+      Map<byte[], HFileDataBlockEncoder> retrievedFamilyToDataBlockEncodingMap =
+          HFileOutputFormat2
+          .createFamilyDataBlockEncodingMap(conf);
+
+      // test that we have a value for all column families that matches with the
+      // used mock values
+      for (Entry<String, DataBlockEncoding> entry : familyToDataBlockEncoding.entrySet()) {
+        assertEquals("DataBlockEncoding configuration incorrect for column family:"
+            + entry.getKey(), entry.getValue(),
+            retrievedFamilyToDataBlockEncodingMap.get(entry.getKey().getBytes
+                ()).getDataBlockEncoding());
+      }
+    }
+  }
+
+  private void setupMockColumnFamiliesForDataBlockEncoding(HTable table,
+      Map<String, DataBlockEncoding> familyToDataBlockEncoding) throws IOException {
+    HTableDescriptor mockTableDescriptor = new HTableDescriptor(TABLE_NAME);
+    for (Entry<String, DataBlockEncoding> entry : familyToDataBlockEncoding.entrySet()) {
+      mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey())
+          .setMaxVersions(1)
+          .setDataBlockEncoding(entry.getValue())
+          .setBlockCacheEnabled(false)
+          .setTimeToLive(0));
+    }
+    Mockito.doReturn(mockTableDescriptor).when(table).getTableDescriptor();
+  }
+
+  /**
+   * @return a map from column family names to compression algorithms for
+   *         testing column family compression. Column family names have special characters
+   */
+  private Map<String, DataBlockEncoding>
+      getMockColumnFamiliesForDataBlockEncoding (int numCfs) {
+    Map<String, DataBlockEncoding> familyToDataBlockEncoding =
+        new HashMap<String, DataBlockEncoding>();
+    // use column family names having special characters
+    if (numCfs-- > 0) {
+      familyToDataBlockEncoding.put("Family1!@#!@#&", DataBlockEncoding.DIFF);
+    }
+    if (numCfs-- > 0) {
+      familyToDataBlockEncoding.put("Family2=asdads&!AASD",
+          DataBlockEncoding.FAST_DIFF);
+    }
+    if (numCfs-- > 0) {
+      familyToDataBlockEncoding.put("Family2=asdads&!AASD",
+          DataBlockEncoding.PREFIX);
+    }
+    if (numCfs-- > 0) {
+      familyToDataBlockEncoding.put("Family3", DataBlockEncoding.NONE);
+    }
+    return familyToDataBlockEncoding;
+  }
+
+  private void setupMockStartKeys(HTable table) throws IOException {
+    byte[][] mockKeys = new byte[][] {
+        HConstants.EMPTY_BYTE_ARRAY,
+        Bytes.toBytes("aaa"),
+        Bytes.toBytes("ggg"),
+        Bytes.toBytes("zzz")
+    };
+    Mockito.doReturn(mockKeys).when(table).getStartKeys();
+  }
+
+  /**
    * Test that {@link HFileOutputFormat2} RecordWriter uses compression settings
    * from the column family descriptor
    */
@@ -568,7 +801,7 @@ public class TestHFileOutputFormat2  {
       configuredCompression.put(Bytes.toString(family),
                                 supportedAlgos[familyIndex++ % supportedAlgos.length]);
     }
-    setupMockColumnFamilies(table, configuredCompression);
+    setupMockColumnFamiliesForCompression(table, configuredCompression);
 
     // set up the table to return some mock keys
     setupMockStartKeys(table);