You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by te...@apache.org on 2013/03/29 20:46:43 UTC

svn commit: r1462622 - in /hbase/branches/0.94/src: main/java/org/apache/hadoop/hbase/ main/java/org/apache/hadoop/hbase/regionserver/ main/ruby/ main/ruby/hbase/ main/ruby/shell/commands/ test/java/org/apache/hadoop/hbase/ test/java/org/apache/hadoop/...

Author: tedyu
Date: Fri Mar 29 19:46:43 2013
New Revision: 1462622

URL: http://svn.apache.org/r1462622
Log:
HBASE-8176 Backport HBASE-5335 "Dynamic Schema Configurations" to 0.94 (clockfly)


Added:
    hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/regionserver/CompoundConfiguration.java
    hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java
    hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundConfiguration.java
Modified:
    hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
    hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/HConstants.java
    hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
    hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
    hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java
    hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
    hbase/branches/0.94/src/main/ruby/hbase.rb
    hbase/branches/0.94/src/main/ruby/hbase/admin.rb
    hbase/branches/0.94/src/main/ruby/shell/commands/alter.rb
    hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java
    hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java
    hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java

Modified: hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java?rev=1462622&r1=1462621&r2=1462622&view=diff
==============================================================================
--- hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java (original)
+++ hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java Fri Mar 29 19:46:43 2013
@@ -24,8 +24,9 @@ import java.io.DataOutput;
 import java.io.IOException;
 import java.util.Collections;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.Map;
-
+import java.util.Set;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
 import org.apache.hadoop.hbase.io.hfile.Compression;
@@ -179,6 +180,8 @@ public class HColumnDescriptor implement
   public static final boolean DEFAULT_EVICT_BLOCKS_ON_CLOSE = false;
 
   private final static Map<String, String> DEFAULT_VALUES = new HashMap<String, String>();
+  private final static Set<ImmutableBytesWritable> RESERVED_KEYWORDS
+    = new HashSet<ImmutableBytesWritable>();
   static {
       DEFAULT_VALUES.put(BLOOMFILTER, DEFAULT_BLOOMFILTER);
       DEFAULT_VALUES.put(REPLICATION_SCOPE, String.valueOf(DEFAULT_REPLICATION_SCOPE));
@@ -202,13 +205,16 @@ public class HColumnDescriptor implement
           String.valueOf(DEFAULT_CACHE_BLOOMS_ON_WRITE));
       DEFAULT_VALUES.put(EVICT_BLOCKS_ON_CLOSE,
           String.valueOf(DEFAULT_EVICT_BLOCKS_ON_CLOSE));
+      for (String s : DEFAULT_VALUES.keySet()) {
+        RESERVED_KEYWORDS.add(new ImmutableBytesWritable(Bytes.toBytes(s)));
+      }
   }
 
   // Column family name
   private byte [] name;
 
   // Column metadata
-  protected Map<ImmutableBytesWritable,ImmutableBytesWritable> values =
+  protected final Map<ImmutableBytesWritable,ImmutableBytesWritable> values =
     new HashMap<ImmutableBytesWritable,ImmutableBytesWritable>();
 
   /*
@@ -469,6 +475,7 @@ public class HColumnDescriptor implement
    * @return All values.
    */
   public Map<ImmutableBytesWritable,ImmutableBytesWritable> getValues() {
+    // shallow pointer copy
     return Collections.unmodifiableMap(values);
   }
 
@@ -496,7 +503,11 @@ public class HColumnDescriptor implement
    * @return this (for chained invocation)
    */
   public HColumnDescriptor setValue(String key, String value) {
-    setValue(Bytes.toBytes(key), Bytes.toBytes(value));
+    if (value == null) {
+      remove(Bytes.toBytes(key));
+    } else {
+      setValue(Bytes.toBytes(key), Bytes.toBytes(value));
+    }
     return this;
   }
 
@@ -863,16 +874,7 @@ public class HColumnDescriptor implement
     s.append(" => '");
     s.append(Bytes.toString(name));
     s.append("'");
-    for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
-        values.entrySet()) {
-      String key = Bytes.toString(e.getKey().get());
-      String value = Bytes.toString(e.getValue().get());
-      s.append(", ");
-      s.append(key);
-      s.append(" => '");
-      s.append(value);
-      s.append("'");
-    }
+    s.append(getValues(true));
     s.append('}');
     return s.toString();
   }
@@ -887,20 +889,62 @@ public class HColumnDescriptor implement
     s.append(" => '");
     s.append(Bytes.toString(name));
     s.append("'");
-    for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
-        values.entrySet()) {
-      String key = Bytes.toString(e.getKey().get());
-      String value = Bytes.toString(e.getValue().get());
-      if(DEFAULT_VALUES.get(key) == null || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) {
+    s.append(getValues(false));
+    s.append('}');
+    return s.toString();
+  }
+
+  private StringBuilder getValues(boolean printDefaults) {
+    StringBuilder s = new StringBuilder();
+
+    boolean hasConfigKeys = false;
+
+    // print all reserved keys first
+    for (ImmutableBytesWritable k : values.keySet()) {
+      if (!RESERVED_KEYWORDS.contains(k)) {
+        hasConfigKeys = true;
+        continue;
+      }
+      String key = Bytes.toString(k.get());
+      String value = Bytes.toString(values.get(k).get());
+      if (printDefaults
+          || !DEFAULT_VALUES.containsKey(key)
+          || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) {
         s.append(", ");
         s.append(key);
-        s.append(" => '");
-        s.append(value);
-        s.append("'");
+
+        s.append(" => ");
+        s.append('\'').append(value).append('\'');
       }
     }
-    s.append('}');
-    return s.toString();
+
+    // print all non-reserved, advanced config keys as a separate subset
+    if (hasConfigKeys) {
+      s.append(", ");
+      s.append(HConstants.CONFIG).append(" => ");
+      s.append('{');
+      boolean printComma = false;
+      for (ImmutableBytesWritable k : values.keySet()) {
+        if (RESERVED_KEYWORDS.contains(k)) {
+          continue;
+        }
+        String key = Bytes.toString(k.get());
+        String value = Bytes.toString(values.get(k).get());
+        if (printComma) {
+          s.append(", ");
+        }
+        printComma = true;
+        s.append('\'').append(key).append('\'');
+        s.append(" => ");
+        s.append('\'').append(value).append('\'');
+      }
+      s.append('}');
+    }
+    return s;
+  }
+    
+  public static Map<String, String> getDefaultValues() {
+    return Collections.unmodifiableMap(DEFAULT_VALUES);
   }
 
   /**

Modified: hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/HConstants.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/HConstants.java?rev=1462622&r1=1462621&r2=1462622&view=diff
==============================================================================
--- hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/HConstants.java (original)
+++ hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/HConstants.java Fri Mar 29 19:46:43 2013
@@ -414,6 +414,7 @@ public final class HConstants {
   public static final String NAME = "NAME";
   public static final String VERSIONS = "VERSIONS";
   public static final String IN_MEMORY = "IN_MEMORY";
+  public static final String CONFIG = "CONFIG";
 
   /**
    * This is a retry backoff multiplier table similar to the BSD TCP syn

Modified: hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java?rev=1462622&r1=1462621&r2=1462622&view=diff
==============================================================================
--- hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java (original)
+++ hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java Fri Mar 29 19:46:43 2013
@@ -30,6 +30,8 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import java.util.HashSet;
+import java.util.TreeSet;
 import java.util.TreeMap;
 import java.util.regex.Matcher;
 
@@ -65,7 +67,7 @@ public class HTableDescriptor implements
    * includes values like IS_ROOT, IS_META, DEFERRED_LOG_FLUSH, SPLIT_POLICY,
    * MAX_FILE_SIZE, READONLY, MEMSTORE_FLUSHSIZE etc...
    */
-  protected Map<ImmutableBytesWritable, ImmutableBytesWritable> values =
+  protected final Map<ImmutableBytesWritable, ImmutableBytesWritable> values =
     new HashMap<ImmutableBytesWritable, ImmutableBytesWritable>();
 
   private static final String FAMILIES = "FAMILIES";
@@ -160,6 +162,26 @@ public class HTableDescriptor implements
    * the contents are flushed to the store files
    */
   public static final long DEFAULT_MEMSTORE_FLUSH_SIZE = 1024*1024*128L;
+  private final static Map<String, String> DEFAULT_VALUES
+    = new HashMap<String, String>();
+  private final static Set<ImmutableBytesWritable> RESERVED_KEYWORDS
+    = new HashSet<ImmutableBytesWritable>();
+  static {
+    DEFAULT_VALUES.put(MAX_FILESIZE,
+        String.valueOf(HConstants.DEFAULT_MAX_FILE_SIZE));
+    DEFAULT_VALUES.put(READONLY, String.valueOf(DEFAULT_READONLY));
+    DEFAULT_VALUES.put(MEMSTORE_FLUSHSIZE,
+        String.valueOf(DEFAULT_MEMSTORE_FLUSH_SIZE));
+    DEFAULT_VALUES.put(DEFERRED_LOG_FLUSH,
+        String.valueOf(DEFAULT_DEFERRED_LOG_FLUSH));
+    for (String s : DEFAULT_VALUES.keySet()) {
+      RESERVED_KEYWORDS.add(new ImmutableBytesWritable(Bytes.toBytes(s)));
+    }
+    RESERVED_KEYWORDS.add(IS_ROOT_KEY);
+    RESERVED_KEYWORDS.add(IS_META_KEY);
+  }
+
+  
 
   private volatile Boolean meta = null;
   private volatile Boolean root = null;
@@ -429,7 +451,8 @@ public class HTableDescriptor implements
    * @see #values
    */
   public Map<ImmutableBytesWritable,ImmutableBytesWritable> getValues() {
-     return Collections.unmodifiableMap(values);
+    // shallow pointer copy 
+    return Collections.unmodifiableMap(values);
   }
 
   /**
@@ -469,7 +492,11 @@ public class HTableDescriptor implements
    * @see #values
    */
   public void setValue(String key, String value) {
-    setValue(Bytes.toBytes(key), Bytes.toBytes(value));
+    if (value == null) {
+      remove(Bytes.toBytes(key));
+    } else {
+      setValue(Bytes.toBytes(key), Bytes.toBytes(value));
+    }
   }
 
   /**
@@ -681,36 +708,11 @@ public class HTableDescriptor implements
   @Override
   public String toString() {
     StringBuilder s = new StringBuilder();
-    s.append('{');
-    s.append(HConstants.NAME);
-    s.append(" => '");
-    s.append(Bytes.toString(name));
-    s.append("'");
-    for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
-        values.entrySet()) {
-      String key = Bytes.toString(e.getKey().get());
-      String value = Bytes.toString(e.getValue().get());
-      if (key == null) {
-        continue;
-      }
-      String upperCase = key.toUpperCase();
-      if (upperCase.equals(IS_ROOT) || upperCase.equals(IS_META)) {
-        // Skip. Don't bother printing out read-only values if false.
-        if (value.toLowerCase().equals(Boolean.FALSE.toString())) {
-          continue;
-        }
-      }
-      s.append(", ");
-      s.append(Bytes.toString(e.getKey().get()));
-      s.append(" => '");
-      s.append(Bytes.toString(e.getValue().get()));
-      s.append("'");
-    }
-    s.append(", ");
-    s.append(FAMILIES);
-    s.append(" => ");
-    s.append(families.values());
-    s.append('}');
+    s.append('\'').append(Bytes.toString(name)).append('\'');
+    s.append(getValues(true));
+    for (HColumnDescriptor f : families.values()) {
+      s.append(", ").append(f);
+    }
     return s.toString();
   }
 
@@ -720,44 +722,87 @@ public class HTableDescriptor implements
    */
   public String toStringCustomizedValues() {
     StringBuilder s = new StringBuilder();
-    s.append('{');
-    s.append(HConstants.NAME);
-    s.append(" => '");
-    s.append(Bytes.toString(name));
-    s.append("'");
-    for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
-        values.entrySet()) {
-      String key = Bytes.toString(e.getKey().get());
-      String value = Bytes.toString(e.getValue().get());
-      if (key == null) {
+    s.append('\'').append(Bytes.toString(name)).append('\'');
+    s.append(getValues(false));
+    for(HColumnDescriptor hcd : families.values()) {
+      s.append(", ").append(hcd.toStringCustomizedValues());
+    }
+    return s.toString();
+  }
+
+  private StringBuilder getValues(boolean printDefaults) {
+    StringBuilder s = new StringBuilder();
+
+    // step 1: set partitioning and pruning
+    Set<ImmutableBytesWritable> reservedKeys = new TreeSet<ImmutableBytesWritable>();
+    Set<ImmutableBytesWritable> configKeys = new TreeSet<ImmutableBytesWritable>();
+    for (ImmutableBytesWritable k : values.keySet()) {
+      if (k == null || k.get() == null) continue;
+      String key = Bytes.toString(k.get());
+      // in this section, print out reserved keywords + coprocessor info
+      if (!RESERVED_KEYWORDS.contains(k) && !key.startsWith("coprocessor$")) {
+        configKeys.add(k);        
         continue;
       }
-      String upperCase = key.toUpperCase();
-      if (upperCase.equals(IS_ROOT) || upperCase.equals(IS_META)) {
+      // only print out IS_ROOT/IS_META if true
+      String value = Bytes.toString(values.get(k).get());
+      if (key.equalsIgnoreCase(IS_ROOT) || key.equalsIgnoreCase(IS_META)) {
         // Skip. Don't bother printing out read-only values if false.
         if (value.toLowerCase().equals(Boolean.FALSE.toString())) {
           continue;
         }
       }
+
+      // see if a reserved key is a default value. may not want to print it out
+      if (printDefaults
+          || !DEFAULT_VALUES.containsKey(key)
+          || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) {
+        reservedKeys.add(k);
+      }
+    }
+
+
+
+    // early exit optimization
+    if (reservedKeys.isEmpty() && configKeys.isEmpty()) return s;
+
+    // step 2: printing
+    s.append(", {METHOD => 'table_att'");
+
+    // print all reserved keys first
+    for (ImmutableBytesWritable k : reservedKeys) {
+      String key = Bytes.toString(k.get());
+      String value = Bytes.toString(values.get(k).get());
+
       s.append(", ");
-      s.append(Bytes.toString(e.getKey().get()));
-      s.append(" => '");
-      s.append(Bytes.toString(e.getValue().get()));
-      s.append("'");
-    }
-    s.append(", ");
-    s.append(FAMILIES);
-    s.append(" => [");
-    int size = families.values().size();
-    int i = 0;
-    for(HColumnDescriptor hcd : families.values()) {
-      s.append(hcd.toStringCustomizedValues());
-      i++;
-      if( i != size)
-        s.append(", ");
+      s.append(key);
+      s.append(" => ");
+      s.append('\'').append(value).append('\'');
     }
-    s.append("]}");
-    return s.toString();
+    if (!configKeys.isEmpty()) {
+      // print all non-reserved, advanced config keys as a separate subset
+      s.append(", ");
+      s.append(HConstants.CONFIG).append(" => ");
+      s.append("{");
+      boolean printComma = false;
+      for (ImmutableBytesWritable k : configKeys) {
+        String key = Bytes.toString(k.get());
+        String value = Bytes.toString(values.get(k).get());
+        if (printComma) s.append(", ");
+        printComma = true;
+        s.append('\'').append(key).append('\'');
+        s.append(" => ");
+        s.append('\'').append(value).append('\'');
+      }
+      s.append("}");
+    }
+
+    s.append('}'); // end METHOD
+    return s;
+  }
+                
+  public static Map<String, String> getDefaultValues() {
+    return Collections.unmodifiableMap(DEFAULT_VALUES);
   }
 
   /**

Added: hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/regionserver/CompoundConfiguration.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/regionserver/CompoundConfiguration.java?rev=1462622&view=auto
==============================================================================
--- hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/regionserver/CompoundConfiguration.java (added)
+++ hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/regionserver/CompoundConfiguration.java Fri Mar 29 19:46:43 2013
@@ -0,0 +1,466 @@
+/**
+ * Copyright The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.util.StringUtils;
+
+/**
+ * Do a shallow merge of multiple KV configuration pools. This is a very useful
+ * utility class to easily add per-object configurations in addition to wider
+ * scope settings. This is different from Configuration.addResource()
+ * functionality, which performs a deep merge and mutates the common data
+ * structure.
+ * <p>
+ * For clarity: the shallow merge allows the user to mutate either of the
+ * configuration objects and have changes reflected everywhere. In contrast to a
+ * deep merge, that requires you to explicitly know all applicable copies to
+ * propagate changes.
+ * <p>
+ * This class is package private because we expect significant refactoring here
+ * on the HBase side when certain HDFS changes are added & ubiquitous. Will
+ * revisit expanding access at that point.
+ */
+@InterfaceAudience.Private
+class CompoundConfiguration extends Configuration {
+  /**
+   * Default Constructor. Initializes empty configuration
+   */
+  public CompoundConfiguration() {
+  }
+
+  // Devs: these APIs are the same contract as their counterparts in
+  // Configuration.java
+  private static interface ImmutableConfigMap {
+    String get(String key);
+    String getRaw(String key);
+    Class<?> getClassByName(String name) throws ClassNotFoundException;
+    int size();
+  }
+
+  protected List<ImmutableConfigMap> configs
+    = new ArrayList<ImmutableConfigMap>();
+
+  /****************************************************************************
+   * These initial APIs actually required original thought
+   ***************************************************************************/
+
+  /**
+   * Add Hadoop Configuration object to config list
+   * @param conf configuration object
+   * @return this, for builder pattern
+   */
+  public CompoundConfiguration add(final Configuration conf) {
+    if (conf instanceof CompoundConfiguration) {
+      this.configs.addAll(0, ((CompoundConfiguration) conf).configs);
+      return this;
+    }
+    // put new config at the front of the list (top priority)
+    this.configs.add(0, new ImmutableConfigMap() {
+      Configuration c = conf;
+
+      @Override
+      public String get(String key) {
+        return c.get(key);
+      }
+
+      @Override
+      public String getRaw(String key) {
+        return c.getRaw(key);
+      }
+
+      @Override
+      public Class<?> getClassByName(String name)
+          throws ClassNotFoundException {
+        return c.getClassByName(name);
+      }
+
+      @Override
+      public int size() {
+        return c.size();
+      }
+
+      @Override
+      public String toString() {
+        return c.toString();
+      }
+    });
+    return this;
+  }
+
+  /**
+   * Add ImmutableBytesWritable map to config list. This map is generally
+   * created by HTableDescriptor or HColumnDescriptor, but can be abstractly
+   * used.
+   *
+   * @param map
+   *          ImmutableBytesWritable map
+   * @return this, for builder pattern
+   */
+  public CompoundConfiguration add(
+      final Map<ImmutableBytesWritable, ImmutableBytesWritable> map) {
+    // put new map at the front of the list (top priority)
+    this.configs.add(0, new ImmutableConfigMap() {
+      Map<ImmutableBytesWritable, ImmutableBytesWritable> m = map;
+
+      @Override
+      public String get(String key) {
+        ImmutableBytesWritable ibw = new ImmutableBytesWritable(Bytes
+            .toBytes(key));
+        if (!m.containsKey(ibw))
+          return null;
+        ImmutableBytesWritable value = m.get(ibw);
+        if (value == null || value.get() == null)
+          return null;
+        return Bytes.toString(value.get());
+      }
+
+      @Override
+      public String getRaw(String key) {
+        return get(key);
+      }
+
+      @Override
+      public Class<?> getClassByName(String name)
+      throws ClassNotFoundException {
+        return null;
+      }
+
+      @Override
+      public int size() {
+        // TODO Auto-generated method stub
+        return m.size();
+      }
+
+      @Override
+      public String toString() {
+        return m.toString();
+      }
+    });
+    return this;
+  }
+
+  @Override
+  public String toString() {
+    StringBuffer sb = new StringBuffer();
+    sb.append("CompoundConfiguration: " + this.configs.size() + " configs");
+    for (ImmutableConfigMap m : this.configs) {
+      sb.append(this.configs);
+    }
+    return sb.toString();
+  }
+
+  @Override
+  public String get(String key) {
+    for (ImmutableConfigMap m : this.configs) {
+      String value = m.get(key);
+      if (value != null) {
+        return value;
+      }
+    }
+    return null;
+  }
+
+  @Override
+  public String getRaw(String key) {
+    for (ImmutableConfigMap m : this.configs) {
+      String value = m.getRaw(key);
+      if (value != null) {
+        return value;
+      }
+    }
+    return null;
+  }
+
+  @Override
+  public Class<?> getClassByName(String name) throws ClassNotFoundException {
+    for (ImmutableConfigMap m : this.configs) {
+      try {
+        Class<?> value = m.getClassByName(name);
+        if (value != null) {
+          return value;
+        }
+      } catch (ClassNotFoundException e) {
+        // don't propagate an exception until all configs fail
+        continue;
+      }
+    }
+    throw new ClassNotFoundException();
+  }
+
+  @Override
+  public int size() {
+    int ret = 0;
+    for (ImmutableConfigMap m : this.configs) {
+      ret += m.size();
+    }
+    return ret;
+  }
+
+  /***************************************************************************
+   * You should just ignore everything below this line unless there's a bug in
+   * Configuration.java...
+   *
+   * Below get APIs are directly copied from Configuration.java Oh, how I wish
+   * this wasn't so! A tragically-sad example of why you use interfaces instead
+   * of inheritance.
+   *
+   * Why the duplication? We basically need to override Configuration.getProps
+   * or we'd need protected access to Configuration.properties so we can modify
+   * that pointer. There are a bunch of functions in the base Configuration that
+   * call getProps() and we need to use our derived version instead of the base
+   * version. We need to make a generic implementation that works across all
+   * HDFS versions. We should modify Configuration.properties in HDFS 1.0 to be
+   * protected, but we still need to have this code until that patch makes it to
+   * all the HDFS versions we support.
+   ***************************************************************************/
+
+  @Override
+  public String get(String name, String defaultValue) {
+    String ret = get(name);
+    return ret == null ? defaultValue : ret;
+  }
+
+  @Override
+  public int getInt(String name, int defaultValue) {
+    String valueString = get(name);
+    if (valueString == null)
+      return defaultValue;
+    try {
+      String hexString = getHexDigits(valueString);
+      if (hexString != null) {
+        return Integer.parseInt(hexString, 16);
+      }
+      return Integer.parseInt(valueString);
+    } catch (NumberFormatException e) {
+      return defaultValue;
+    }
+  }
+
+  @Override
+  public long getLong(String name, long defaultValue) {
+    String valueString = get(name);
+    if (valueString == null)
+      return defaultValue;
+    try {
+      String hexString = getHexDigits(valueString);
+      if (hexString != null) {
+        return Long.parseLong(hexString, 16);
+      }
+      return Long.parseLong(valueString);
+    } catch (NumberFormatException e) {
+      return defaultValue;
+    }
+  }
+
+  protected String getHexDigits(String value) {
+    boolean negative = false;
+    String str = value;
+    String hexString = null;
+    if (value.startsWith("-")) {
+      negative = true;
+      str = value.substring(1);
+    }
+    if (str.startsWith("0x") || str.startsWith("0X")) {
+      hexString = str.substring(2);
+      if (negative) {
+        hexString = "-" + hexString;
+      }
+      return hexString;
+    }
+    return null;
+  }
+
+  @Override
+  public float getFloat(String name, float defaultValue) {
+    String valueString = get(name);
+    if (valueString == null)
+      return defaultValue;
+    try {
+      return Float.parseFloat(valueString);
+    } catch (NumberFormatException e) {
+      return defaultValue;
+    }
+  }
+
+  @Override
+  public boolean getBoolean(String name, boolean defaultValue) {
+    String valueString = get(name);
+    if ("true".equals(valueString))
+      return true;
+    else if ("false".equals(valueString))
+      return false;
+    else return defaultValue;
+  }
+
+  @Override
+  public IntegerRanges getRange(String name, String defaultValue) {
+    return new IntegerRanges(get(name, defaultValue));
+  }
+
+  @Override
+  public Collection<String> getStringCollection(String name) {
+    String valueString = get(name);
+    return StringUtils.getStringCollection(valueString);
+  }
+
+  @Override
+  public String[] getStrings(String name) {
+    String valueString = get(name);
+    return StringUtils.getStrings(valueString);
+  }
+
+  @Override
+  public String[] getStrings(String name, String... defaultValue) {
+    String valueString = get(name);
+    if (valueString == null) {
+      return defaultValue;
+    } else {
+      return StringUtils.getStrings(valueString);
+    }
+  }
+
+  @Override
+  public Class<?>[] getClasses(String name, Class<?>... defaultValue) {
+    String[] classnames = getStrings(name);
+    if (classnames == null)
+      return defaultValue;
+    try {
+      Class<?>[] classes = new Class<?>[classnames.length];
+      for (int i = 0; i < classnames.length; i++) {
+        classes[i] = getClassByName(classnames[i]);
+      }
+      return classes;
+    } catch (ClassNotFoundException e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  @Override
+  public Class<?> getClass(String name, Class<?> defaultValue) {
+    String valueString = get(name);
+    if (valueString == null)
+      return defaultValue;
+    try {
+      return getClassByName(valueString);
+    } catch (ClassNotFoundException e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  @Override
+  public <U> Class<? extends U> getClass(String name,
+      Class<? extends U> defaultValue, Class<U> xface) {
+    try {
+      Class<?> theClass = getClass(name, defaultValue);
+      if (theClass != null && !xface.isAssignableFrom(theClass))
+        throw new RuntimeException(theClass + " not " + xface.getName());
+      else if (theClass != null)
+        return theClass.asSubclass(xface);
+      else
+        return null;
+    } catch (Exception e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  /*******************************************************************
+   * This class is immutable. Quickly abort any attempts to alter it *
+   *******************************************************************/
+
+  @Override
+  public void clear() {
+    throw new UnsupportedOperationException("Immutable Configuration");
+  }
+
+  @Override
+  public Iterator<Map.Entry<String, String>> iterator() {
+    throw new UnsupportedOperationException("Immutable Configuration");
+  }
+
+  @Override
+  public void set(String name, String value) {
+    throw new UnsupportedOperationException("Immutable Configuration");
+  }
+  @Override
+  public void setIfUnset(String name, String value) {
+    throw new UnsupportedOperationException("Immutable Configuration");
+  }
+  @Override
+  public void setInt(String name, int value) {
+    throw new UnsupportedOperationException("Immutable Configuration");
+  }
+  @Override
+  public void setLong(String name, long value) {
+    throw new UnsupportedOperationException("Immutable Configuration");
+  }
+  @Override
+  public void setFloat(String name, float value) {
+    throw new UnsupportedOperationException("Immutable Configuration");
+  }
+  @Override
+  public void setBoolean(String name, boolean value) {
+    throw new UnsupportedOperationException("Immutable Configuration");
+  }
+  @Override
+  public void setBooleanIfUnset(String name, boolean value) {
+    throw new UnsupportedOperationException("Immutable Configuration");
+  }
+  @Override
+  public void setStrings(String name, String... values) {
+    throw new UnsupportedOperationException("Immutable Configuration");
+  }
+  @Override
+  public void setClass(String name, Class<?> theClass, Class<?> xface) {
+    throw new UnsupportedOperationException("Immutable Configuration");
+  }
+  @Override
+  public void setClassLoader(ClassLoader classLoader) {
+    throw new UnsupportedOperationException("Immutable Configuration");
+  }
+
+  @Override
+  public void readFields(DataInput in) throws IOException {
+    throw new UnsupportedOperationException("Immutable Configuration");
+  }
+
+  @Override
+  public void write(DataOutput out) throws IOException {
+    throw new UnsupportedOperationException("Immutable Configuration");
+  }
+
+  @Override
+  public void writeXml(OutputStream out) throws IOException {
+    throw new UnsupportedOperationException("Immutable Configuration");
+  }
+};

Modified: hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java?rev=1462622&r1=1462621&r2=1462622&view=diff
==============================================================================
--- hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java (original)
+++ hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java Fri Mar 29 19:46:43 2013
@@ -243,6 +243,7 @@ public class HRegion implements HeapSize
   private final HLog log;
   private final FileSystem fs;
   private final Configuration conf;
+  final Configuration baseConf;
   private final int rowLockWaitDuration;
   static final int DEFAULT_ROWLOCK_WAIT_DURATION = 30000;
 
@@ -378,6 +379,7 @@ public class HRegion implements HeapSize
     this.conf = null;
     this.rowLockWaitDuration = DEFAULT_ROWLOCK_WAIT_DURATION;
     this.rsServices = null;
+    this.baseConf = null;
     this.fs = null;
     this.timestampSlop = HConstants.LATEST_TIMESTAMP;
     this.memstoreFlushSize = 0L;
@@ -396,6 +398,17 @@ public class HRegion implements HeapSize
     this.deferredLogSyncDisabled = false;
   }
 
+  
+  /**
+   * HRegion copy constructor. Useful when reopening a closed region (normally
+   * for unit tests)
+   * @param other original object
+   */
+  public HRegion(HRegion other) {
+    this(other.getTableDir(), other.getLog(), other.getFilesystem(),
+        other.baseConf, other.getRegionInfo(), other.getTableDesc(), null);
+  }
+  
   /**
    * HRegion constructor.  his constructor should only be used for testing and
    * extensions.  Instances of HRegion should be instantiated with the
@@ -419,14 +432,24 @@ public class HRegion implements HeapSize
    *
    * @see HRegion#newHRegion(Path, HLog, FileSystem, Configuration, HRegionInfo, HTableDescriptor, RegionServerServices)
    */
-  public HRegion(Path tableDir, HLog log, FileSystem fs, Configuration conf,
+  public HRegion(Path tableDir, HLog log, FileSystem fs, Configuration confParam,
     final HRegionInfo regionInfo, final HTableDescriptor htd,
       RegionServerServices rsServices) {
     this.tableDir = tableDir;
     this.comparator = regionInfo.getComparator();
     this.log = log;
     this.fs = fs;
-    this.conf = conf;
+    if (confParam instanceof CompoundConfiguration) {
+       throw new IllegalArgumentException("Need original base configuration");
+    }
+    // 'conf' renamed to 'confParam' b/c we use this.conf in the constructor
+    this.baseConf = confParam;
+    if (htd != null) {
+      this.conf = new CompoundConfiguration().add(confParam).add(htd.getValues());
+    }
+    else {
+      this.conf = new CompoundConfiguration().add(confParam);
+    }
     this.rowLockWaitDuration = conf.getInt("hbase.rowlock.wait.duration",
                     DEFAULT_ROWLOCK_WAIT_DURATION);
 
@@ -1159,6 +1182,17 @@ public class HRegion implements HeapSize
     return this.conf;
   }
 
+    /**
+   * A split takes the config from the parent region & passes it to the daughter
+   * region's constructor. If 'conf' was passed, you would end up using the HTD
+   * of the parent region in addition to the new daughter HTD. Pass 'baseConf'
+   * to the daughter regions to avoid this tricky dedupe problem.
+   * @return Configuration object
+   */
+  Configuration getBaseConf() {
+    return this.baseConf;
+  }
+
   /** @return region directory Path */
   public Path getRegionDir() {
     return this.regiondir;
@@ -4493,7 +4527,7 @@ public class HRegion implements HeapSize
       listPaths(fs, b.getRegionDir());
     }
 
-    Configuration conf = a.getConf();
+    Configuration conf = a.getBaseConf();
     HTableDescriptor tabledesc = a.getTableDesc();
     HLog log = a.getLog();
     Path tableDir = a.getTableDir();
@@ -5354,7 +5388,7 @@ public class HRegion implements HeapSize
   public static final long FIXED_OVERHEAD = ClassSize.align(
       ClassSize.OBJECT +
       ClassSize.ARRAY +
-      35 * ClassSize.REFERENCE + 2 * Bytes.SIZEOF_INT +
+      36 * ClassSize.REFERENCE + 2 * Bytes.SIZEOF_INT +
       (7 * Bytes.SIZEOF_LONG) +
       Bytes.SIZEOF_BOOLEAN);
 

Modified: hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java?rev=1462622&r1=1462621&r2=1462622&view=diff
==============================================================================
--- hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java (original)
+++ hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java Fri Mar 29 19:46:43 2013
@@ -697,7 +697,7 @@ public class SplitTransaction {
     Path regionDir = getSplitDirForDaughter(this.parent.getFilesystem(),
       this.splitdir, hri);
     HRegion r = HRegion.newHRegion(this.parent.getTableDir(),
-      this.parent.getLog(), fs, this.parent.getConf(),
+      this.parent.getLog(), fs, this.parent.getBaseConf(),
       hri, this.parent.getTableDesc(), rsServices);
     long halfParentReadRequestCount = this.parent.getReadRequestsCount() / 2;
     r.readRequestsCount.set(halfParentReadRequestCount);

Modified: hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java?rev=1462622&r1=1462621&r2=1462622&view=diff
==============================================================================
--- hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java (original)
+++ hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java Fri Mar 29 19:46:43 2013
@@ -179,9 +179,10 @@ public class Store extends SchemaConfigu
    * @throws IOException
    */
   protected Store(Path basedir, HRegion region, HColumnDescriptor family,
-    FileSystem fs, Configuration conf)
+      FileSystem fs, Configuration confParam)
   throws IOException {
-    super(conf, region.getRegionInfo().getTableNameAsString(),
+    super(new CompoundConfiguration().add(confParam).add(
+        family.getValues()), region.getTableDesc().getNameAsString(),
         Bytes.toString(family.getName()));
     HRegionInfo info = region.getRegionInfo();
     this.fs = fs;
@@ -189,7 +190,8 @@ public class Store extends SchemaConfigu
     this.homedir = createStoreHomeDir(this.fs, p);
     this.region = region;
     this.family = family;
-    this.conf = conf;
+    // 'conf' renamed to 'confParam' b/c we use this.conf in the constructor
+    this.conf = new CompoundConfiguration().add(confParam).add(family.getValues());
     this.blocksize = family.getBlocksize();
 
     this.dataBlockEncoder =
@@ -221,6 +223,8 @@ public class Store extends SchemaConfigu
       conf.getInt("hbase.hstore.compaction.min",
         /*old name*/ conf.getInt("hbase.hstore.compactionThreshold", 3)));
 
+    LOG.info("hbase.hstore.compaction.min = " + this.minFilesToCompact);
+    
     // Setting up cache configuration for this family
     this.cacheConf = new CacheConfig(conf, family);
     this.blockingStoreFileCount =

Modified: hbase/branches/0.94/src/main/ruby/hbase.rb
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/ruby/hbase.rb?rev=1462622&r1=1462621&r2=1462622&view=diff
==============================================================================
--- hbase/branches/0.94/src/main/ruby/hbase.rb (original)
+++ hbase/branches/0.94/src/main/ruby/hbase.rb Fri Mar 29 19:46:43 2013
@@ -40,6 +40,7 @@ module HBaseConstants
   NAME = org.apache.hadoop.hbase.HConstants::NAME
   VERSIONS = org.apache.hadoop.hbase.HConstants::VERSIONS
   IN_MEMORY = org.apache.hadoop.hbase.HConstants::IN_MEMORY
+  CONFIG = org.apache.hadoop.hbase.HConstants::CONFIG
   STOPROW = "STOPROW"
   STARTROW = "STARTROW"
   ENDROW = STOPROW

Modified: hbase/branches/0.94/src/main/ruby/hbase/admin.rb
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/ruby/hbase/admin.rb?rev=1462622&r1=1462621&r2=1462622&view=diff
==============================================================================
--- hbase/branches/0.94/src/main/ruby/hbase/admin.rb (original)
+++ hbase/branches/0.94/src/main/ruby/hbase/admin.rb Fri Mar 29 19:46:43 2013
@@ -20,6 +20,7 @@
 
 include Java
 java_import org.apache.hadoop.hbase.util.Pair
+java_import org.apache.hadoop.hbase.util.RegionSplitter
 
 # Wrapper for org.apache.hadoop.hbase.client.HBaseAdmin
 
@@ -189,38 +190,65 @@ module Hbase
           raise(ArgumentError, "#{arg.class} of #{arg.inspect} is not of Hash or String type")
         end
 
-        if arg.kind_of?(Hash) and (arg.has_key?(SPLITS) or arg.has_key?(SPLITS_FILE))
-          if arg.has_key?(SPLITS_FILE)
-            unless File.exist?(arg[SPLITS_FILE])
-              raise(ArgumentError, "Splits file #{arg[SPLITS_FILE]} doesn't exist")
-            end
-            arg[SPLITS] = []
-            File.foreach(arg[SPLITS_FILE]) do |line|
-              arg[SPLITS].push(line.strip())
+        if arg.kind_of?(String)
+          # the arg is a string, default action is to add a column to the table
+          htd.addFamily(hcd(arg, htd))
+        else
+          # arg is a hash.  4 possibilities:
+          if (arg.has_key?(SPLITS) or arg.has_key?(SPLITS_FILE))
+            if arg.has_key?(SPLITS_FILE)
+              unless File.exist?(arg[SPLITS_FILE])
+                raise(ArgumentError, "Splits file #{arg[SPLITS_FILE]} doesn't exist")
+              end
+              arg[SPLITS] = []
+              File.foreach(arg[SPLITS_FILE]) do |line|
+                arg[SPLITS].push(line.strip())
+              end
             end
-          end
 
-          splits = Java::byte[][arg[SPLITS].size].new
-          idx = 0
-          arg[SPLITS].each do |split|
-            splits[idx] = split.to_java_bytes
-            idx = idx + 1
-          end
-        elsif arg.kind_of?(Hash) and (arg.has_key?(NUMREGIONS) or arg.has_key?(SPLITALGO))
-          raise(ArgumentError, "Column family configuration should be specified in a separate clause") if arg.has_key?(NAME)
-          raise(ArgumentError, "Number of regions must be specified") unless arg.has_key?(NUMREGIONS)
-          raise(ArgumentError, "Split algorithm must be specified") unless arg.has_key?(SPLITALGO)
-          raise(ArgumentError, "Number of regions must be geter than 1") unless arg[NUMREGIONS] > 1
-          num_regions = arg[NUMREGIONS]
-          split_algo = org.apache.hadoop.hbase.util.RegionSplitter.newSplitAlgoInstance(@conf, arg[SPLITALGO])
-          splits = split_algo.split(JInteger.valueOf(num_regions))
-        else
-          # Add column to the table
-          descriptor = hcd(arg, htd)
-          if arg[COMPRESSION_COMPACT]
-            descriptor.setValue(COMPRESSION_COMPACT, arg[COMPRESSION_COMPACT])
+            splits = Java::byte[][arg[SPLITS].size].new
+            idx = 0
+            arg[SPLITS].each do |split|
+              splits[idx] = split.to_java_bytes
+              idx = idx + 1
+            end
+          elsif (arg.has_key?(NUMREGIONS) or arg.has_key?(SPLITALGO))
+            # (1) deprecated region pre-split API
+            raise(ArgumentError, "Column family configuration should be specified in a separate clause") if arg.has_key?(NAME)
+            raise(ArgumentError, "Number of regions must be specified") unless arg.has_key?(NUMREGIONS)
+            raise(ArgumentError, "Split algorithm must be specified") unless arg.has_key?(SPLITALGO)
+            raise(ArgumentError, "Number of regions must be greater than 1") unless arg[NUMREGIONS] > 1
+            num_regions = arg[NUMREGIONS]
+            split_algo = RegionSplitter.newSplitAlgoInstance(@conf, arg[SPLITALGO])
+            splits = split_algo.split(JInteger.valueOf(num_regions))
+          elsif (method = arg.delete(METHOD))
+            # (2) table_att modification
+            raise(ArgumentError, "table_att is currently the only supported method") unless method == 'table_att'
+            raise(ArgumentError, "NUMREGIONS & SPLITALGO must both be specified") unless arg.has_key?(NUMREGIONS) == arg.has_key?(split_algo)
+            htd.setMaxFileSize(JLong.valueOf(arg[MAX_FILESIZE])) if arg[MAX_FILESIZE]
+            htd.setReadOnly(JBoolean.valueOf(arg[READONLY])) if arg[READONLY]
+            htd.setMemStoreFlushSize(JLong.valueOf(arg[MEMSTORE_FLUSHSIZE])) if arg[MEMSTORE_FLUSHSIZE]
+            htd.setDeferredLogFlush(JBoolean.valueOf(arg[DEFERRED_LOG_FLUSH])) if arg[DEFERRED_LOG_FLUSH]
+            htd.setValue(COMPRESSION_COMPACT, arg[COMPRESSION_COMPACT]) if arg[COMPRESSION_COMPACT]
+            if arg[NUMREGIONS]
+              raise(ArgumentError, "Number of regions must be greater than 1") unless arg[NUMREGIONS] > 1
+              num_regions = arg[NUMREGIONS]
+              split_algo = RegionSplitter.newSplitAlgoInstance(@conf, arg[SPLITALGO])
+              splits = split_algo.split(JInteger.valueOf(num_regions))
+            end
+            if arg[CONFIG]
+              raise(ArgumentError, "#{CONFIG} must be a Hash type") unless arg.kind_of?(Hash)
+              for k,v in arg[CONFIG]
+                v = v.to_s unless v.nil?
+                htd.setValue(k, v)
+              end
+            end
+          else
+            # (3) column family spec
+            descriptor = hcd(arg, htd)
+            htd.setValue(COMPRESSION_COMPACT, arg[COMPRESSION_COMPACT]) if arg[COMPRESSION_COMPACT]
+            htd.addFamily(hcd(arg, htd))
           end
-          htd.addFamily(descriptor)
         end
       end
 
@@ -421,6 +449,13 @@ module Hbase
             end
           end
 
+          if arg[CONFIG]
+            raise(ArgumentError, "#{CONFIG} must be a Hash type") unless arg.kind_of?(Hash)
+            for k,v in arg[CONFIG]
+              v = v.to_s unless v.nil?
+              htd.setValue(k, v)
+            end
+          end
           @admin.modifyTable(table_name.to_java_bytes, htd)
           if wait == true
             puts "Updating all regions with the new schema..."
@@ -561,6 +596,14 @@ module Hbase
           family.setCompressionType(org.apache.hadoop.hbase.io.hfile.Compression::Algorithm.valueOf(compression))
         end
       end
+
+      if arg[CONFIG]
+        raise(ArgumentError, "#{CONFIG} must be a Hash type") unless arg.kind_of?(Hash)
+        for k,v in arg[CONFIG]
+          v = v.to_s unless v.nil?
+          family.setValue(k, v)
+        end
+      end
       return family
     end
 

Modified: hbase/branches/0.94/src/main/ruby/shell/commands/alter.rb
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/ruby/shell/commands/alter.rb?rev=1462622&r1=1462621&r2=1462622&view=diff
==============================================================================
--- hbase/branches/0.94/src/main/ruby/shell/commands/alter.rb (original)
+++ hbase/branches/0.94/src/main/ruby/shell/commands/alter.rb Fri Mar 29 19:46:43 2013
@@ -40,6 +40,9 @@ To delete the 'f1' column family in tabl
 or a shorter version:
 
   hbase> alter 't1', 'delete' => 'f1'
+  
+You can also change the column family config by set attribute CONFIG like this:
+  hbase> alter 'test',  NAME=>'f', CONFIG => {'hbase.hstore.compaction.min' => '5'}
 
 You can also change table-scope attributes like MAX_FILESIZE
 MEMSTORE_FLUSHSIZE, READONLY, and DEFERRED_LOG_FLUSH.
@@ -47,6 +50,9 @@ MEMSTORE_FLUSHSIZE, READONLY, and DEFERR
 For example, to change the max size of a family to 128MB, do:
 
   hbase> alter 't1', METHOD => 'table_att', MAX_FILESIZE => '134217728'
+  
+You can also change the table-scope by set attribute CONFIG like this:
+  hbase> alter 'test', METHOD=>'table_att', CONFIG => {'hbase.hstore.compaction.min' => '5'}  
 
 You can add a table coprocessor by setting a table coprocessor attribute:
 

Modified: hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java?rev=1462622&r1=1462621&r2=1462622&view=diff
==============================================================================
--- hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java (original)
+++ hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java Fri Mar 29 19:46:43 2013
@@ -196,9 +196,7 @@ public abstract class HBaseTestCase exte
 
   protected HRegion openClosedRegion(final HRegion closedRegion)
   throws IOException {
-    HRegion r = new HRegion(closedRegion.getTableDir(), closedRegion.getLog(),
-        closedRegion.getFilesystem(), closedRegion.getConf(),
-        closedRegion.getRegionInfo(), closedRegion.getTableDesc(), null);
+    HRegion r = new HRegion(closedRegion);
     r.initialize();
     return r;
   }

Added: hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java?rev=1462622&view=auto
==============================================================================
--- hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java (added)
+++ hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java Fri Mar 29 19:46:43 2013
@@ -0,0 +1,260 @@
+/**
+ * Copyright The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import java.util.ArrayList;
+import java.util.Random;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.LargeTests;
+import org.apache.hadoop.hbase.ipc.HRegionInterface;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Pair;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import com.google.common.collect.Lists;
+
+@Category(LargeTests.class)
+public class TestFromClientSide3 {
+  final Log LOG = LogFactory.getLog(getClass());
+  private final static HBaseTestingUtility TEST_UTIL
+    = new HBaseTestingUtility();
+  private static byte[] ROW = Bytes.toBytes("testRow");
+  private static byte[] FAMILY = Bytes.toBytes("testFamily");
+  private static byte[] QUALIFIER = Bytes.toBytes("testQualifier");
+  private static byte[] VALUE = Bytes.toBytes("testValue");
+  private static Random random = new Random();
+  private static int SLAVES = 3;
+
+  /**
+   * @throws java.lang.Exception
+   */
+  @BeforeClass
+  public static void setUpBeforeClass() throws Exception {
+    TEST_UTIL.getConfiguration().setBoolean(
+        "hbase.online.schema.update.enable", true);
+    TEST_UTIL.startMiniCluster(SLAVES);
+  }
+
+  /**
+   * @throws java.lang.Exception
+   */
+  @AfterClass
+  public static void tearDownAfterClass() throws Exception {
+    TEST_UTIL.shutdownMiniCluster();
+  }
+
+  /**
+   * @throws java.lang.Exception
+   */
+  @Before
+  public void setUp() throws Exception {
+    // Nothing to do.
+  }
+
+  /**
+   * @throws java.lang.Exception
+   */
+  @After
+  public void tearDown() throws Exception {
+    // Nothing to do.
+  }
+
+  private void randomCFPuts(HTable table, byte[] row, byte[] family, int nPuts)
+      throws Exception {
+    Put put = new Put(row);
+    for (int i = 0; i < nPuts; i++) {
+      byte[] qualifier = Bytes.toBytes(random.nextInt());
+      byte[] value = Bytes.toBytes(random.nextInt());
+      put.add(family, qualifier, value);
+    }
+    table.put(put);
+  }
+
+  private void performMultiplePutAndFlush(HBaseAdmin admin, HTable table,
+      byte[] row, byte[] family, int nFlushes, int nPuts) throws Exception {
+
+    // connection needed for poll-wait
+    HConnection conn = HConnectionManager.getConnection(TEST_UTIL
+        .getConfiguration());
+    HRegionLocation loc = table.getRegionLocation(row, true);
+    HRegionInterface server = conn.getHRegionConnection(loc.getHostname(), loc
+        .getPort());
+    byte[] regName = loc.getRegionInfo().getRegionName();
+
+    for (int i = 0; i < nFlushes; i++) {
+      randomCFPuts(table, row, family, nPuts);
+      int sfCount = server.getStoreFileList(regName, FAMILY).size();
+
+      // TODO: replace this api with a synchronous flush after HBASE-2949
+      admin.flush(table.getTableName());
+
+      // synchronously poll wait for a new storefile to appear (flush happened)
+      while (server.getStoreFileList(regName, FAMILY).size() == sfCount) {
+        Thread.sleep(40);
+      }
+    }
+  }
+
+  // override the config settings at the CF level and ensure priority
+  @Test(timeout = 60000)
+  public void testAdvancedConfigOverride() throws Exception {
+    /*
+     * Overall idea: (1) create 3 store files and issue a compaction. config's
+     * compaction.min == 3, so should work. (2) Increase the compaction.min
+     * toggle in the HTD to 5 and modify table. If we use the HTD value instead
+     * of the default config value, adding 3 files and issuing a compaction
+     * SHOULD NOT work (3) Decrease the compaction.min toggle in the HCD to 2
+     * and modify table. The CF schema should override the Table schema and now
+     * cause a minor compaction.
+     */
+    TEST_UTIL.getConfiguration().setInt("hbase.hstore.compaction.min", 3);
+
+    String tableName = "testAdvancedConfigOverride";
+    byte[] TABLE = Bytes.toBytes(tableName);
+    HTable hTable = TEST_UTIL.createTable(TABLE, FAMILY, 10);
+    HBaseAdmin admin = new HBaseAdmin(TEST_UTIL.getConfiguration());
+    HConnection connection = HConnectionManager.getConnection(TEST_UTIL
+        .getConfiguration());
+
+    // Create 3 store files.
+    byte[] row = Bytes.toBytes(random.nextInt());
+    performMultiplePutAndFlush(admin, hTable, row, FAMILY, 3, 100);
+
+    // Verify we have multiple store files.
+    HRegionLocation loc = hTable.getRegionLocation(row, true);
+    byte[] regionName = loc.getRegionInfo().getRegionName();
+    HRegionInterface server = connection.getHRegionConnection(
+        loc.getHostname(), loc.getPort());
+    assertTrue(server.getStoreFileList(regionName, FAMILY).size() > 1);
+
+    // Issue a compaction request
+    admin.compact(TABLE);
+
+    // poll wait for the compactions to happen
+    for (int i = 0; i < 10 * 1000 / 40; ++i) {
+      // The number of store files after compaction should be lesser.
+      loc = hTable.getRegionLocation(row, true);
+      if (!loc.getRegionInfo().isOffline()) {
+        regionName = loc.getRegionInfo().getRegionName();
+        server = connection.getHRegionConnection(loc.getHostname(), loc
+            .getPort());
+        if (server.getStoreFileList(regionName, FAMILY).size() <= 1) {
+          break;
+        }
+      }
+      Thread.sleep(40);
+    }
+    // verify the compactions took place and that we didn't just time out
+    assertTrue(server.getStoreFileList(regionName, FAMILY).size() <= 1);
+
+    // change the compaction.min config option for this table to 5
+    LOG.info("hbase.hstore.compaction.min should now be 5");
+    HTableDescriptor htd = new HTableDescriptor(hTable.getTableDescriptor());
+    htd.setValue("hbase.hstore.compaction.min", String.valueOf(5));
+    admin.modifyTable(TABLE, htd);
+    Pair<Integer, Integer> st;
+    while (null != (st = admin.getAlterStatus(TABLE)) && st.getFirst() > 0) {
+      LOG.debug(st.getFirst() + " regions left to update");
+      Thread.sleep(40);
+    }
+    LOG.info("alter status finished");
+
+    // Create 3 more store files.
+    performMultiplePutAndFlush(admin, hTable, row, FAMILY, 3, 10);
+
+    // Issue a compaction request
+    admin.compact(TABLE);
+
+    // This time, the compaction request should not happen
+    Thread.sleep(10 * 1000);
+    int sfCount = 0;
+    loc = hTable.getRegionLocation(row, true);
+    regionName = loc.getRegionInfo().getRegionName();
+    server = connection.getHRegionConnection(loc.getHostname(), loc.getPort());
+    sfCount = server.getStoreFileList(regionName, FAMILY).size();
+    assertTrue(sfCount > 1);
+
+    // change an individual CF's config option to 2 & online schema update
+    LOG.info("hbase.hstore.compaction.min should now be 2");
+    HColumnDescriptor hcd = new HColumnDescriptor(htd.getFamily(FAMILY));
+    hcd.setValue("hbase.hstore.compaction.min", String.valueOf(2));
+    htd.addFamily(hcd);
+    admin.modifyTable(TABLE, htd);
+    while (null != (st = admin.getAlterStatus(TABLE)) && st.getFirst() > 0) {
+      LOG.debug(st.getFirst() + " regions left to update");
+      Thread.sleep(40);
+    }
+    LOG.info("alter status finished");
+
+    // Issue a compaction request
+    admin.compact(TABLE);
+
+    // poll wait for the compactions to happen
+    for (int i = 0; i < 10 * 1000 / 40; ++i) {
+      loc = hTable.getRegionLocation(row, true);
+      regionName = loc.getRegionInfo().getRegionName();
+      try {
+        server = connection.getHRegionConnection(loc.getHostname(), loc
+            .getPort());
+        if (server.getStoreFileList(regionName, FAMILY).size() < sfCount) {
+          break;
+        }
+      } catch (Exception e) {
+        LOG.debug("Waiting for region to come online: " + regionName);
+      }
+      Thread.sleep(40);
+    }
+    // verify the compaction took place and that we didn't just time out
+    assertTrue(server.getStoreFileList(regionName, FAMILY).size() < sfCount);
+
+    // Finally, ensure that we can remove a custom config value after we made it
+    LOG.info("Removing CF config value");
+    LOG.info("hbase.hstore.compaction.min should now be 5");
+    hcd = new HColumnDescriptor(htd.getFamily(FAMILY));
+    hcd.setValue("hbase.hstore.compaction.min", null);
+    htd.addFamily(hcd);
+    admin.modifyTable(TABLE, htd);
+    while (null != (st = admin.getAlterStatus(TABLE)) && st.getFirst() > 0) {
+      LOG.debug(st.getFirst() + " regions left to update");
+      Thread.sleep(40);
+    }
+    LOG.info("alter status finished");
+    assertNull(hTable.getTableDescriptor().getFamily(FAMILY).getValue(
+        "hbase.hstore.compaction.min"));
+  }
+
+  @org.junit.Rule
+  public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu =
+   new org.apache.hadoop.hbase.ResourceCheckerJUnitRule();
+}

Modified: hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java?rev=1462622&r1=1462621&r2=1462622&view=diff
==============================================================================
--- hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java (original)
+++ hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java Fri Mar 29 19:46:43 2013
@@ -380,9 +380,7 @@ public class TestCoprocessorInterface ex
   HRegion reopenRegion(final HRegion closedRegion, Class<?> ... implClasses)
       throws IOException {
     //HRegionInfo info = new HRegionInfo(tableName, null, null, false);
-    HRegion r = new HRegion(closedRegion.getTableDir(), closedRegion.getLog(),
-        closedRegion.getFilesystem(), closedRegion.getConf(),
-        closedRegion.getRegionInfo(), closedRegion.getTableDesc(), null);
+    HRegion r = new HRegion(closedRegion);
     r.initialize();
 
     // this following piece is a hack. currently a coprocessorHost

Added: hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundConfiguration.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundConfiguration.java?rev=1462622&view=auto
==============================================================================
--- hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundConfiguration.java (added)
+++ hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundConfiguration.java Fri Mar 29 19:46:43 2013
@@ -0,0 +1,116 @@
+/**
+ * Copyright The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.regionserver.CompoundConfiguration;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.SmallTests;
+
+import org.junit.experimental.categories.Category;
+import org.junit.Test;
+
+import junit.framework.TestCase;
+
+@Category(SmallTests.class)
+public class TestCompoundConfiguration extends TestCase {
+  private Configuration baseConf;
+
+  @Override
+  protected void setUp() throws Exception {
+    baseConf = new Configuration();
+    baseConf.set("A", "1");
+    baseConf.setInt("B", 2);
+    baseConf.set("C", "3");
+  }
+
+  @Test
+  public void testBasicFunctionality() throws ClassNotFoundException {
+    CompoundConfiguration compoundConf = new CompoundConfiguration()
+        .add(baseConf);
+    assertEquals("1", compoundConf.get("A"));
+    assertEquals(2, compoundConf.getInt("B", 0));
+    assertEquals(3, compoundConf.getInt("C", 0));
+    assertEquals(0, compoundConf.getInt("D", 0));
+
+    assertEquals(CompoundConfiguration.class, compoundConf
+        .getClassByName(CompoundConfiguration.class.getName()));
+    try {
+      compoundConf.getClassByName("bad_class_name");
+      fail("Trying to load bad_class_name should throw an exception");
+    } catch (ClassNotFoundException e) {
+      // win!
+    }
+  }
+
+  @Test
+  public void testWithConfig() {
+    Configuration conf = new Configuration();
+    conf.set("B", "2b");
+    conf.set("C", "33");
+    conf.set("D", "4");
+
+    CompoundConfiguration compoundConf = new CompoundConfiguration()
+        .add(baseConf)
+        .add(conf);
+    assertEquals("1", compoundConf.get("A"));
+    assertEquals("2b", compoundConf.get("B"));
+    assertEquals(33, compoundConf.getInt("C", 0));
+    assertEquals("4", compoundConf.get("D"));
+    assertEquals(4, compoundConf.getInt("D", 0));
+    assertNull(compoundConf.get("E"));
+    assertEquals(6, compoundConf.getInt("F", 6));
+  }
+
+  private ImmutableBytesWritable strToIbw(String s) {
+    return new ImmutableBytesWritable(Bytes.toBytes(s));
+  }
+
+  @Test
+  public void testWithIbwMap() {
+    Map<ImmutableBytesWritable, ImmutableBytesWritable> map =
+      new HashMap<ImmutableBytesWritable, ImmutableBytesWritable>();
+    map.put(strToIbw("B"), strToIbw("2b"));
+    map.put(strToIbw("C"), strToIbw("33"));
+    map.put(strToIbw("D"), strToIbw("4"));
+    // unlike config, note that IBW Maps can accept null values
+    map.put(strToIbw("G"), null);
+
+    CompoundConfiguration compoundConf = new CompoundConfiguration()
+      .add(baseConf)
+      .add(map);
+    assertEquals("1", compoundConf.get("A"));
+    assertEquals("2b", compoundConf.get("B"));
+    assertEquals(33, compoundConf.getInt("C", 0));
+    assertEquals("4", compoundConf.get("D"));
+    assertEquals(4, compoundConf.getInt("D", 0));
+    assertNull(compoundConf.get("E"));
+    assertEquals(6, compoundConf.getInt("F", 6));
+    assertNull(compoundConf.get("G"));
+  }
+
+  @org.junit.Rule
+  public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu =
+    new org.apache.hadoop.hbase.ResourceCheckerJUnitRule();
+}

Modified: hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java?rev=1462622&r1=1462621&r2=1462622&view=diff
==============================================================================
--- hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java (original)
+++ hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java Fri Mar 29 19:46:43 2013
@@ -247,7 +247,7 @@ public class TestSplitTransaction {
     for (HRegion r: daughters) {
       // Open so can count its content.
       HRegion openRegion = HRegion.openHRegion(this.testdir, r.getRegionInfo(),
-         r.getTableDesc(), r.getLog(), r.getConf());
+          r.getTableDesc(), r.getLog(), TEST_UTIL.getConfiguration());
       try {
         int count = countRows(openRegion);
         assertTrue(count > 0 && count != rowcount);
@@ -303,7 +303,7 @@ public class TestSplitTransaction {
     for (HRegion r: daughters) {
       // Open so can count its content.
       HRegion openRegion = HRegion.openHRegion(this.testdir, r.getRegionInfo(),
-         r.getTableDesc(), r.getLog(), r.getConf());
+        r.getTableDesc(), r.getLog(), TEST_UTIL.getConfiguration());
       try {
         int count = countRows(openRegion);
         assertTrue(count > 0 && count != rowcount);