You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by jm...@apache.org on 2013/02/14 13:58:21 UTC

svn commit: r1446147 [7/35] - in /hbase/branches/hbase-7290v2: ./ bin/ conf/ dev-support/ hbase-client/ hbase-common/ hbase-common/src/main/java/org/apache/hadoop/hbase/ hbase-common/src/main/java/org/apache/hadoop/hbase/io/compress/ hbase-common/src/m...

Modified: hbase/branches/hbase-7290v2/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290v2/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon?rev=1446147&r1=1446146&r2=1446147&view=diff
==============================================================================
--- hbase/branches/hbase-7290v2/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon (original)
+++ hbase/branches/hbase-7290v2/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon Thu Feb 14 12:58:12 2013
@@ -48,27 +48,27 @@ Arrays.sort(serverNames);
 </%java>
 
 <div class="tabbable">
-    <ul class="nav nav-tabs">
-        <li class="active"><a href="#baseStats" data-toggle="tab">Base Stats</a></li>
-        <li class=""><a href="#memoryStats" data-toggle="tab">Memory</a></li>
-        <li class=""><a href="#requestStats" data-toggle="tab">Requests</a></li>
-        <li class=""><a href="#storeStats" data-toggle="tab">Storefiles</a></li>
-        <li class=""><a href="#compactStas" data-toggle="tab">Compactions</a></li>
+    <ul class="nav nav-pills">
+        <li class="active"><a href="#tab_baseStats" data-toggle="tab">Base Stats</a></li>
+        <li class=""><a href="#tab_memoryStats" data-toggle="tab">Memory</a></li>
+        <li class=""><a href="#tab_requestStats" data-toggle="tab">Requests</a></li>
+        <li class=""><a href="#tab_storeStats" data-toggle="tab">Storefiles</a></li>
+        <li class=""><a href="#tab_compactStas" data-toggle="tab">Compactions</a></li>
     </ul>
     <div class="tab-content" style="padding-bottom: 9px; border-bottom: 1px solid #ddd;">
-        <div class="tab-pane active" id="baseStats">
+        <div class="tab-pane active" id="tab_baseStats">
             <& baseStats; serverNames = serverNames; &>
         </div>
-        <div class="tab-pane" id="memoryStats">
+        <div class="tab-pane" id="tab_memoryStats">
             <& memoryStats; serverNames = serverNames; &>
         </div>
-        <div class="tab-pane" id="requestStats">
+        <div class="tab-pane" id="tab_requestStats">
             <& requestStats; serverNames = serverNames; &>
         </div>
-        <div class="tab-pane" id="storeStats">
+        <div class="tab-pane" id="tab_storeStats">
             <& storeStats; serverNames = serverNames; &>
         </div>
-        <div class="tab-pane" id="compactStas">
+        <div class="tab-pane" id="tab_compactStas">
             <& compactionStats; serverNames = serverNames; &>
         </div>
     </div>

Modified: hbase/branches/hbase-7290v2/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290v2/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon?rev=1446147&r1=1446146&r2=1446147&view=diff
==============================================================================
--- hbase/branches/hbase-7290v2/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon (original)
+++ hbase/branches/hbase-7290v2/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon Thu Feb 14 12:58:12 2013
@@ -68,7 +68,7 @@ org.apache.hadoop.hbase.protobuf.generat
             <span class="icon-bar"></span>
             <span class="icon-bar"></span>
           </a>
-          <a class="brand" href="/rs-status">HBase Region Server</a>
+          <a class="brand" href="/rs-status"><img src="/static/hbase_logo_small.png" alt="HBase Logo"/></a>
           <div class="nav-collapse">
             <ul class="nav">
                 <li class="active"><a href="/">Home</a></li>
@@ -87,22 +87,27 @@ org.apache.hadoop.hbase.protobuf.generat
 
 <div class="container">
     <div class="row inner_header">
-        <div class="span8">
-            <h1>RegionServer: <% serverName.getHostname() %></h1>
-        </div>
-        <div class="span4 logo">
-            <img src="/static/hbase_logo.png" height="66" width="266" alt="HBase logo"/>
+        <div class="page-header">
+            <h1>RegionServer <small><% serverName.getHostname() %></small></h1>
         </div>
     </div>
+    <div class="row">
 
+    <section>
     <h2>Server Metrics</h2>
     <& ServerMetricsTmpl; mWrap = regionServer.getMetrics().getRegionServerWrapper(); &>
+    </section>
 
+    <section>
     <& ../common/TaskMonitorTmpl; filter = filter &>
+    </section>
 
+    <section>
     <h2>Regions</h2>
     <& RegionListTmpl; regionServer = regionServer; onlineRegions = onlineRegions; &>
+    </section>
 
+    <section>
     <h2>Software Attributes</h2>
     <table id="attributes_table" class="table table-striped">
         <tr>
@@ -151,6 +156,8 @@ org.apache.hadoop.hbase.protobuf.generat
             <td>Address of HBase Master</td>
         </tr>
     </table>
+    </section>
+    </div>
 </div>
 <script src="/static/js/jquery.min.js" type="text/javascript"></script>
 <script src="/static/js/bootstrap.min.js" type="text/javascript"></script>

Modified: hbase/branches/hbase-7290v2/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290v2/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon?rev=1446147&r1=1446146&r2=1446147&view=diff
==============================================================================
--- hbase/branches/hbase-7290v2/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon (original)
+++ hbase/branches/hbase-7290v2/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon Thu Feb 14 12:58:12 2013
@@ -38,23 +38,23 @@
     </%java>
 
     <div class="tabbable">
-        <ul class="nav nav-tabs">
-            <li class="active"><a href="#regionBaseInfo" data-toggle="tab">Base Info</a> </li>
-            <li><a href="#regionRequestStats" data-toggle="tab">Request metrics</a></li>
-            <li class=""><a href="#regionStoreStats" data-toggle="tab">Storefile Metrics</a></li>
-            <li class=""><a href="#regionCompactStas" data-toggle="tab">Compaction Metrics</a></li>
+        <ul class="nav nav-pills">
+            <li class="active"><a href="#tab_regionBaseInfo" data-toggle="tab">Base Info</a> </li>
+            <li><a href="#tab_regionRequestStats" data-toggle="tab">Request metrics</a></li>
+            <li class=""><a href="#tab_regionStoreStats" data-toggle="tab">Storefile Metrics</a></li>
+            <li class=""><a href="#tab_regionCompactStas" data-toggle="tab">Compaction Metrics</a></li>
         </ul>
         <div class="tab-content" style="padding-bottom: 9px; border-bottom: 1px solid #ddd;">
-            <div class="tab-pane active" id="regionBaseInfo">
+            <div class="tab-pane active" id="tab_regionBaseInfo">
                 <& baseInfo; onlineRegions = onlineRegions; &>
             </div>
-            <div class="tab-pane" id="regionRequestStats">
+            <div class="tab-pane" id="tab_regionRequestStats">
                 <& requestStats; onlineRegions = onlineRegions; &>
             </div>
-            <div class="tab-pane" id="regionStoreStats">
+            <div class="tab-pane" id="tab_regionStoreStats">
                 <& storeStats; onlineRegions = onlineRegions; &>
             </div>
-            <div class="tab-pane" id="regionCompactStas">
+            <div class="tab-pane" id="tab_regionCompactStas">
                 <& compactStats; onlineRegions = onlineRegions; &>
             </div>
         </div>

Modified: hbase/branches/hbase-7290v2/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290v2/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon?rev=1446147&r1=1446146&r2=1446147&view=diff
==============================================================================
--- hbase/branches/hbase-7290v2/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon (original)
+++ hbase/branches/hbase-7290v2/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon Thu Feb 14 12:58:12 2013
@@ -35,31 +35,31 @@ com.yammer.metrics.stats.Snapshot;
 java.lang.management.ManagementFactory;
 </%import>
 <div class="tabbable">
-    <ul class="nav nav-tabs">
-        <li class="active"><a href="#baseStats" data-toggle="tab">Base Stats</a></li>
-        <li class=""><a href="#memoryStats" data-toggle="tab">Memory</a></li>
-        <li class=""><a href="#requestStats" data-toggle="tab">Requests</a></li>
-        <li class=""><a href="#storeStats" data-toggle="tab">Storefiles</a></li>
-        <li class=""><a href="#queueStats" data-toggle="tab">Queues</a></li>
-        <li class=""><a href="#blockCacheStats" data-toggle="tab">Block Cache</a></li>
+    <ul class="nav nav-pills">
+        <li class="active"><a href="#tab_baseStats" data-toggle="tab">Base Stats</a></li>
+        <li class=""><a href="#tab_memoryStats" data-toggle="tab">Memory</a></li>
+        <li class=""><a href="#tab_requestStats" data-toggle="tab">Requests</a></li>
+        <li class=""><a href="#tab_storeStats" data-toggle="tab">Storefiles</a></li>
+        <li class=""><a href="#tab_queueStats" data-toggle="tab">Queues</a></li>
+        <li class=""><a href="#tab_blockCacheStats" data-toggle="tab">Block Cache</a></li>
     </ul>
     <div class="tab-content" style="padding-bottom: 9px; border-bottom: 1px solid #ddd;">
-        <div class="tab-pane active" id="baseStats">
+        <div class="tab-pane active" id="tab_baseStats">
             <& baseStats; mWrap = mWrap &>
         </div>
-        <div class="tab-pane" id="memoryStats">
+        <div class="tab-pane" id="tab_memoryStats">
             <& memoryStats; mWrap = mWrap &>
         </div>
-        <div class="tab-pane" id="requestStats">
+        <div class="tab-pane" id="tab_requestStats">
             <& requestStats; mWrap = mWrap &>
         </div>
-        <div class="tab-pane" id="storeStats">
+        <div class="tab-pane" id="tab_storeStats">
             <& storeStats; mWrap = mWrap &>
         </div>
-        <div class="tab-pane" id="queueStats">
+        <div class="tab-pane" id="tab_queueStats">
             <& queueStats; mWrap = mWrap  &>
         </div>
-        <div class="tab-pane" id="blockCacheStats">
+        <div class="tab-pane" id="tab_blockCacheStats">
             <& blockCacheStats; mWrap = mWrap &>
         </div>
     </div>

Modified: hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java?rev=1446147&r1=1446146&r2=1446147&view=diff
==============================================================================
--- hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java (original)
+++ hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java Thu Feb 14 12:58:12 2013
@@ -29,14 +29,17 @@ import java.util.Set;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hbase.DeserializationException;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.io.compress.Compression;
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
 import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema;
-import org.apache.hadoop.hbase.regionserver.StoreFile;
-import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
+import org.apache.hadoop.hbase.regionserver.BloomType;
 import org.apache.hadoop.hbase.regionserver.wal.HLog;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.io.Text;
@@ -60,14 +63,16 @@ import com.google.protobuf.InvalidProtoc
 public class HColumnDescriptor implements WritableComparable<HColumnDescriptor> {
   // For future backward compatibility
 
-  // Version 3 was when column names become byte arrays and when we picked up
+  // Version  3 was when column names become byte arrays and when we picked up
   // Time-to-live feature.  Version 4 was when we moved to byte arrays, HBASE-82.
-  // Version 5 was when bloom filter descriptors were removed.
-  // Version 6 adds metadata as a map where keys and values are byte[].
-  // Version 7 -- add new compression and hfile blocksize to HColumnDescriptor (HBASE-1217)
-  // Version 8 -- reintroduction of bloom filters, changed from boolean to enum
-  // Version 9 -- add data block encoding
-  private static final byte COLUMN_DESCRIPTOR_VERSION = (byte) 9;
+  // Version  5 was when bloom filter descriptors were removed.
+  // Version  6 adds metadata as a map where keys and values are byte[].
+  // Version  7 -- add new compression and hfile blocksize to HColumnDescriptor (HBASE-1217)
+  // Version  8 -- reintroduction of bloom filters, changed from boolean to enum
+  // Version  9 -- add data block encoding
+  // Version 10 -- change metadata to standard type.
+  // Version 11 -- add column family level configuration.
+  private static final byte COLUMN_DESCRIPTOR_VERSION = (byte) 11;
 
   // These constants are used as FileInfo keys
   public static final String COMPRESSION = "COMPRESSION";
@@ -165,7 +170,7 @@ public class HColumnDescriptor implement
   /**
    * Default setting for whether or not to use bloomfilters.
    */
-  public static final String DEFAULT_BLOOMFILTER = StoreFile.BloomType.NONE.toString();
+  public static final String DEFAULT_BLOOMFILTER = BloomType.NONE.toString();
 
   /**
    * Default setting for whether to cache bloom filter blocks on write if block
@@ -221,9 +226,16 @@ public class HColumnDescriptor implement
   private byte [] name;
 
   // Column metadata
-  protected final Map<ImmutableBytesWritable, ImmutableBytesWritable> values =
+  private final Map<ImmutableBytesWritable, ImmutableBytesWritable> values =
     new HashMap<ImmutableBytesWritable,ImmutableBytesWritable>();
 
+  /**
+   * A map which holds the configuration specific to the column family.
+   * The keys of the map have the same names as config keys and override the defaults with
+   * cf-specific settings. Example usage may be for compactions, etc.
+   */
+  private final Map<String, String> configuration = new HashMap<String, String>();
+
   /*
    * Cache the max versions rather than calculate it every time.
    */
@@ -278,6 +290,9 @@ public class HColumnDescriptor implement
         desc.values.entrySet()) {
       this.values.put(e.getKey(), e.getValue());
     }
+    for (Map.Entry<String, String> e : desc.configuration.entrySet()) {
+      this.configuration.put(e.getKey(), e.getValue());
+    }
     setMaxVersions(desc.getMaxVersions());
   }
 
@@ -409,7 +424,7 @@ public class HColumnDescriptor implement
     setEncodeOnDisk(encodeOnDisk);
     setDataBlockEncoding(DataBlockEncoding.
         valueOf(dataBlockEncoding.toUpperCase()));
-    setBloomFilterType(StoreFile.BloomType.
+    setBloomFilterType(BloomType.
       valueOf(bloomFilter.toUpperCase()));
     setBlocksize(blocksize);
     setScope(scope);
@@ -766,19 +781,19 @@ public class HColumnDescriptor implement
   /**
    * @return bloom filter type used for new StoreFiles in ColumnFamily
    */
-  public StoreFile.BloomType getBloomFilterType() {
+  public BloomType getBloomFilterType() {
     String n = getValue(BLOOMFILTER);
     if (n == null) {
       n = DEFAULT_BLOOMFILTER;
     }
-    return StoreFile.BloomType.valueOf(n.toUpperCase());
+    return BloomType.valueOf(n.toUpperCase());
   }
 
   /**
    * @param bt bloom filter type
    * @return this (for chained invocation)
    */
-  public HColumnDescriptor setBloomFilterType(final StoreFile.BloomType bt) {
+  public HColumnDescriptor setBloomFilterType(final BloomType bt) {
     return setValue(BLOOMFILTER, bt.toString());
   }
 
@@ -936,7 +951,7 @@ public class HColumnDescriptor implement
     // print all non-reserved, advanced config keys as a separate subset
     if (hasConfigKeys) {
       s.append(", ");
-      s.append(HConstants.CONFIG).append(" => ");
+      s.append(HConstants.METADATA).append(" => ");
       s.append('{');
       boolean printComma = false;
       for (ImmutableBytesWritable k : values.keySet()) {
@@ -955,6 +970,21 @@ public class HColumnDescriptor implement
       }
       s.append('}');
     }
+
+    if (!configuration.isEmpty()) {
+      s.append(", ");
+      s.append(HConstants.CONFIGURATION).append(" => ");
+      s.append('{');
+      boolean printCommaForConfiguration = false;
+      for (Map.Entry<String, String> e : configuration.entrySet()) {
+        if (printCommaForConfiguration) s.append(", ");
+        printCommaForConfiguration = true;
+        s.append('\'').append(e.getKey()).append('\'');
+        s.append(" => ");
+        s.append('\'').append(e.getValue()).append('\'');
+      }
+      s.append("}");
+    }
     return s;
   }
 
@@ -987,6 +1017,7 @@ public class HColumnDescriptor implement
     int result = Bytes.hashCode(this.name);
     result ^= Byte.valueOf(COLUMN_DESCRIPTOR_VERSION).hashCode();
     result ^= values.hashCode();
+    result ^= configuration.hashCode();
     return result;
   }
 
@@ -1057,6 +1088,19 @@ public class HColumnDescriptor implement
       String value = getValue(HConstants.VERSIONS);
       this.cachedMaxVersions = (value != null)?
           Integer.valueOf(value).intValue(): DEFAULT_VERSIONS;
+      if (version > 10) {
+        configuration.clear();
+        int numConfigs = in.readInt();
+        for (int i = 0; i < numConfigs; i++) {
+          ImmutableBytesWritable key = new ImmutableBytesWritable();
+          ImmutableBytesWritable val = new ImmutableBytesWritable();
+          key.readFields(in);
+          val.readFields(in);
+          configuration.put(
+            Bytes.toString(key.get(), key.getOffset(), key.getLength()),
+            Bytes.toString(val.get(), val.getOffset(), val.getLength()));
+        }
+      }
     }
   }
 
@@ -1073,6 +1117,11 @@ public class HColumnDescriptor implement
       e.getKey().write(out);
       e.getValue().write(out);
     }
+    out.writeInt(configuration.size());
+    for (Map.Entry<String, String> e : configuration.entrySet()) {
+      new ImmutableBytesWritable(Bytes.toBytes(e.getKey())).write(out);
+      new ImmutableBytesWritable(Bytes.toBytes(e.getValue())).write(out);
+    }
   }
 
   // Comparable
@@ -1087,6 +1136,13 @@ public class HColumnDescriptor implement
       else if (result > 0)
         result = 1;
     }
+    if (result == 0) {
+      result = this.configuration.hashCode() - o.configuration.hashCode();
+      if (result < 0)
+        result = -1;
+      else if (result > 0)
+        result = 1;
+    }
     return result;
   }
 
@@ -1127,8 +1183,11 @@ public class HColumnDescriptor implement
     // unrelated-looking test failures that are hard to trace back to here.
     HColumnDescriptor hcd = new HColumnDescriptor();
     hcd.name = cfs.getName().toByteArray();
-    for (ColumnFamilySchema.Attribute a: cfs.getAttributesList()) {
-      hcd.setValue(a.getName().toByteArray(), a.getValue().toByteArray());
+    for (BytesBytesPair a: cfs.getAttributesList()) {
+      hcd.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray());
+    }
+    for (NameStringPair a: cfs.getConfigurationList()) {
+      hcd.setConfiguration(a.getName(), a.getValue());
     }
     return hcd;
   }
@@ -1140,11 +1199,52 @@ public class HColumnDescriptor implement
     ColumnFamilySchema.Builder builder = ColumnFamilySchema.newBuilder();
     builder.setName(ByteString.copyFrom(getName()));
     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e: this.values.entrySet()) {
-      ColumnFamilySchema.Attribute.Builder aBuilder = ColumnFamilySchema.Attribute.newBuilder();
-      aBuilder.setName(ByteString.copyFrom(e.getKey().get()));
-      aBuilder.setValue(ByteString.copyFrom(e.getValue().get()));
+      BytesBytesPair.Builder aBuilder = BytesBytesPair.newBuilder();
+      aBuilder.setFirst(ByteString.copyFrom(e.getKey().get()));
+      aBuilder.setSecond(ByteString.copyFrom(e.getValue().get()));
       builder.addAttributes(aBuilder.build());
     }
+    for (Map.Entry<String, String> e : this.configuration.entrySet()) {
+      NameStringPair.Builder aBuilder = NameStringPair.newBuilder();
+      aBuilder.setName(e.getKey());
+      aBuilder.setValue(e.getValue());
+      builder.addConfiguration(aBuilder.build());
+    }
     return builder.build();
   }
+
+  /**
+   * Getter for accessing the configuration value by key.
+   */
+  public String getConfigurationValue(String key) {
+    return configuration.get(key);
+  }
+
+  /**
+   * Getter for fetching an unmodifiable {@link #configuration} map.
+   */
+  public Map<String, String> getConfiguration() {
+    // shallow pointer copy
+    return Collections.unmodifiableMap(configuration);
+  }
+
+  /**
+   * Setter for storing a configuration setting in {@link #configuration} map.
+   * @param key Config key. Same as XML config key e.g. hbase.something.or.other.
+   * @param value String value. If null, removes the configuration.
+   */
+  public void setConfiguration(String key, String value) {
+    if (value == null) {
+      removeConfiguration(key);
+    } else {
+      configuration.put(key, value);
+    }
+  }
+
+  /**
+   * Remove a configuration setting represented by the key from the {@link #configuration} map.
+   */
+  public void removeConfiguration(final String key) {
+    configuration.remove(key);
+  }
 }

Modified: hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/HDFSBlocksDistribution.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/HDFSBlocksDistribution.java?rev=1446147&r1=1446146&r2=1446147&view=diff
==============================================================================
--- hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/HDFSBlocksDistribution.java (original)
+++ hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/HDFSBlocksDistribution.java Thu Feb 14 12:58:12 2013
@@ -30,7 +30,9 @@ import org.apache.hadoop.classification.
 
 
 /**
- * Data structure to describe the distribution of HDFS blocks amount hosts
+ * Data structure to describe the distribution of HDFS blocks amount hosts.
+ *
+ * Adding erroneous data will be ignored silently.
  */
 @InterfaceAudience.Private
 public class HDFSBlocksDistribution {
@@ -122,8 +124,10 @@ public class HDFSBlocksDistribution {
    */
   public void addHostsAndBlockWeight(String[] hosts, long weight) {
     if (hosts == null || hosts.length == 0) {
-      throw new NullPointerException("empty hosts");
+      // erroneous data
+      return;
     }
+
     addUniqueWeight(weight);
     for (String hostname : hosts) {
       addHostAndBlockWeight(hostname, weight);
@@ -146,7 +150,8 @@ public class HDFSBlocksDistribution {
    */
   private void addHostAndBlockWeight(String host, long weight) {
     if (host == null) {
-      throw new NullPointerException("Passed hostname is null");
+      // erroneous data
+      return;
     }
 
     HostAndWeight hostAndWeight = this.hostAndWeights.get(host);

Modified: hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java?rev=1446147&r1=1446146&r2=1446147&view=diff
==============================================================================
--- hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java (original)
+++ hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java Thu Feb 14 12:58:12 2013
@@ -33,16 +33,12 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.KeyValue.KVComparator;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.FSTableDescriptors;
 import org.apache.hadoop.hbase.util.JenkinsHash;
 import org.apache.hadoop.hbase.util.MD5Hash;
 import org.apache.hadoop.hbase.util.Pair;
@@ -171,14 +167,6 @@ public class HRegionInfo implements Comp
     return encodedRegionName;
   }
 
-  /** HRegionInfo for root region */
-  public static final HRegionInfo ROOT_REGIONINFO =
-    new HRegionInfo(0L, Bytes.toBytes("-ROOT-"));
-
-  /** HRegionInfo for first meta region */
-  public static final HRegionInfo FIRST_META_REGIONINFO =
-    new HRegionInfo(1L, Bytes.toBytes(".META."));
-
   private byte [] endKey = HConstants.EMPTY_BYTE_ARRAY;
   // This flag is in the parent of a split while the parent is still referenced
   // by daughter regions.  We USED to set this flag when we disabled a table
@@ -198,6 +186,14 @@ public class HRegionInfo implements Comp
   // Current TableName
   private byte[] tableName = null;
 
+  /** HRegionInfo for root region */
+  public static final HRegionInfo ROOT_REGIONINFO =
+      new HRegionInfo(0L, Bytes.toBytes("-ROOT-"));
+
+  /** HRegionInfo for first meta region */
+  public static final HRegionInfo FIRST_META_REGIONINFO =
+      new HRegionInfo(1L, Bytes.toBytes(".META."));
+
   private void setHashCode() {
     int result = Arrays.hashCode(this.regionName);
     result ^= this.regionId;
@@ -420,6 +416,15 @@ public class HRegionInfo implements Comp
   }
 
   /**
+   * Gets the start key from the specified region name.
+   * @param regionName
+   * @return Start key.
+   */
+  public static byte[] getStartKey(final byte[] regionName) throws IOException {
+    return parseRegionName(regionName)[1];
+  }
+
+  /**
    * Separate elements of a regionName.
    * @param regionName
    * @return Array of byte[] containing tableName, startKey and id
@@ -563,54 +568,6 @@ public class HRegionInfo implements Comp
        Bytes.equals(endKey, HConstants.EMPTY_BYTE_ARRAY));
   }
 
-  /**
-   * @return the tableDesc
-   * @deprecated Do not use; expensive call
-   *         use HRegionInfo.getTableNameAsString() in place of
-   *         HRegionInfo.getTableDesc().getNameAsString()
-   */
-   @Deprecated
-  public HTableDescriptor getTableDesc() {
-    Configuration c = HBaseConfiguration.create();
-    c.set("fs.defaultFS", c.get(HConstants.HBASE_DIR));
-    c.set("fs.default.name", c.get(HConstants.HBASE_DIR));
-    FileSystem fs;
-    try {
-      fs = FileSystem.get(c);
-    } catch (IOException e) {
-      throw new RuntimeException(e);
-    }
-    FSTableDescriptors fstd =
-      new FSTableDescriptors(fs, new Path(c.get(HConstants.HBASE_DIR)));
-    try {
-      return fstd.get(this.tableName);
-    } catch (IOException e) {
-      throw new RuntimeException(e);
-    }
-  }
-
-  /**
-   * @param newDesc new table descriptor to use
-   * @deprecated Do not use; expensive call
-   */
-  @Deprecated
-  public void setTableDesc(HTableDescriptor newDesc) {
-    Configuration c = HBaseConfiguration.create();
-    FileSystem fs;
-    try {
-      fs = FileSystem.get(c);
-    } catch (IOException e) {
-      throw new RuntimeException(e);
-    }
-    FSTableDescriptors fstd =
-      new FSTableDescriptors(fs, new Path(c.get(HConstants.HBASE_DIR)));
-    try {
-      fstd.add(newDesc);
-    } catch (IOException e) {
-      throw new RuntimeException(e);
-    }
-  }
-
   /** @return true if this is the root region */
   public boolean isRootRegion() {
     return Bytes.equals(tableName, HRegionInfo.ROOT_REGIONINFO.getTableName());
@@ -1056,6 +1013,20 @@ public class HRegionInfo implements Comp
   }
 
   /**
+   * The latest seqnum that the server writing to meta observed when opening the region.
+   * E.g. the seqNum when the result of {@link #getServerName(Result)} was written.
+   * @param r Result to pull the seqNum from
+   * @return SeqNum, or HConstants.NO_SEQNUM if there's no value written.
+   */
+  public static long getSeqNumDuringOpen(final Result r) {
+    byte[] value = r.getValue(HConstants.CATALOG_FAMILY, HConstants.SEQNUM_QUALIFIER);
+    if (value == null || value.length == 0) return HConstants.NO_SEQNUM;
+    Long result = Bytes.toLong(value);
+    if (result == null) return HConstants.NO_SEQNUM;
+    return result.longValue();
+  }
+
+  /**
    * Parses an HRegionInfo instance from the passed in stream.  Presumes the HRegionInfo was
    * serialized to the stream with {@link #toDelimitedByteArray()}
    * @param in

Modified: hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java?rev=1446147&r1=1446146&r2=1446147&view=diff
==============================================================================
--- hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java (original)
+++ hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java Thu Feb 14 12:58:12 2013
@@ -35,6 +35,7 @@ public class HRegionLocation implements 
   private final HRegionInfo regionInfo;
   private final String hostname;
   private final int port;
+  private final long seqNum;
   // Cache of the 'toString' result.
   private String cachedString = null;
   // Cache of the hostname + port
@@ -43,14 +44,20 @@ public class HRegionLocation implements 
   /**
    * Constructor
    * @param regionInfo the HRegionInfo for the region
-   * @param hostname Hostname
-   * @param port port
    */
   public HRegionLocation(HRegionInfo regionInfo, final String hostname,
-      final int port) {
+      final int port, final long seqNum) {
     this.regionInfo = regionInfo;
     this.hostname = hostname;
     this.port = port;
+    this.seqNum = seqNum;
+  }
+
+  /**
+   * Test constructor w/o seqNum.
+   */
+  public HRegionLocation(HRegionInfo regionInfo, final String hostname, final int port) {
+    this(regionInfo, hostname, port, 0);
   }
 
   /**
@@ -60,7 +67,8 @@ public class HRegionLocation implements 
   public synchronized String toString() {
     if (this.cachedString == null) {
       this.cachedString = "region=" + this.regionInfo.getRegionNameAsString() +
-      ", hostname=" + this.hostname + ", port=" + this.port;
+      ", hostname=" + this.hostname + ", port=" + this.port
+      + ", seqNum=" + seqNum;
     }
     return this.cachedString;
   }
@@ -105,6 +113,10 @@ public class HRegionLocation implements 
     return this.port;
   }
 
+  public long getSeqNum() {
+    return seqNum;
+  }
+
   /**
    * @return String made of hostname and port formatted as per {@link Addressing#createHostAndPortStr(String, int)}
    */

Modified: hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java?rev=1446147&r1=1446146&r2=1446147&view=diff
==============================================================================
--- hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java (original)
+++ hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java Thu Feb 14 12:58:12 2013
@@ -21,6 +21,7 @@ package org.apache.hadoop.hbase;
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
@@ -38,7 +39,9 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -63,8 +66,10 @@ public class HTableDescriptor implements
    *  Version 3 adds metadata as a map where keys and values are byte[].
    *  Version 4 adds indexes
    *  Version 5 removed transactional pollution -- e.g. indexes
+   *  Version 6 changed metadata to BytesBytesPair in PB
+   *  Version 7 adds table-level configuration
    */
-  private static final byte TABLE_DESCRIPTOR_VERSION = 5;
+  private static final byte TABLE_DESCRIPTOR_VERSION = 7;
 
   private byte [] name = HConstants.EMPTY_BYTE_ARRAY;
 
@@ -75,9 +80,16 @@ public class HTableDescriptor implements
    * includes values like IS_ROOT, IS_META, DEFERRED_LOG_FLUSH, SPLIT_POLICY,
    * MAX_FILE_SIZE, READONLY, MEMSTORE_FLUSHSIZE etc...
    */
-  protected final Map<ImmutableBytesWritable, ImmutableBytesWritable> values =
+  private final Map<ImmutableBytesWritable, ImmutableBytesWritable> values =
     new HashMap<ImmutableBytesWritable, ImmutableBytesWritable>();
 
+  /**
+   * A map which holds the configuration specific to the table.
+   * The keys of the map have the same names as config keys and override the defaults with
+   * table-specific settings. Example usage may be for compactions, etc.
+   */
+  private final Map<String, String> configuration = new HashMap<String, String>();
+
   public static final String SPLIT_POLICY = "SPLIT_POLICY";
 
   /**
@@ -234,7 +246,7 @@ public class HTableDescriptor implements
     }
     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> entry:
         values.entrySet()) {
-      this.values.put(entry.getKey(), entry.getValue());
+      setValue(entry.getKey(), entry.getValue());
     }
   }
 
@@ -293,7 +305,10 @@ public class HTableDescriptor implements
     }
     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
         desc.values.entrySet()) {
-      this.values.put(e.getKey(), e.getValue());
+      setValue(e.getKey(), e.getValue());
+    }
+    for (Map.Entry<String, String> e : desc.configuration.entrySet()) {
+      this.configuration.put(e.getKey(), e.getValue());
     }
   }
 
@@ -331,7 +346,7 @@ public class HTableDescriptor implements
    */
   protected void setRootRegion(boolean isRoot) {
     // TODO: Make the value a boolean rather than String of boolean.
-    values.put(IS_ROOT_KEY, isRoot? TRUE: FALSE);
+    setValue(IS_ROOT_KEY, isRoot? TRUE: FALSE);
   }
 
   /**
@@ -358,7 +373,7 @@ public class HTableDescriptor implements
     byte [] value = getValue(key);
     if (value != null) {
       // TODO: Make value be a boolean rather than String of boolean.
-      return Boolean.valueOf(Bytes.toString(value)).booleanValue();
+      return Boolean.valueOf(Bytes.toString(value));
     }
     return valueIfNull;
   }
@@ -372,7 +387,7 @@ public class HTableDescriptor implements
    * <code> .META. </code> region 
    */
   protected void setMetaRegion(boolean isMeta) {
-    values.put(IS_META_KEY, isMeta? TRUE: FALSE);
+    setValue(IS_META_KEY, isMeta? TRUE: FALSE);
   }
 
   /** 
@@ -488,7 +503,7 @@ public class HTableDescriptor implements
    * @see #values
    */
   public void setValue(byte[] key, byte[] value) {
-    setValue(new ImmutableBytesWritable(key), value);
+    setValue(new ImmutableBytesWritable(key), new ImmutableBytesWritable(value));
   }
 
   /*
@@ -496,8 +511,8 @@ public class HTableDescriptor implements
    * @param value The value.
    */
   private void setValue(final ImmutableBytesWritable key,
-      final byte[] value) {
-    values.put(key, new ImmutableBytesWritable(value));
+      final String value) {
+    setValue(key, new ImmutableBytesWritable(Bytes.toBytes(value)));
   }
 
   /*
@@ -518,30 +533,30 @@ public class HTableDescriptor implements
    */
   public void setValue(String key, String value) {
     if (value == null) {
-      remove(Bytes.toBytes(key));
+      remove(key);
     } else {
       setValue(Bytes.toBytes(key), Bytes.toBytes(value));
     }
   }
-
+  
   /**
    * Remove metadata represented by the key from the {@link #values} map
    * 
    * @param key Key whose key and value we're to remove from HTableDescriptor
    * parameters.
    */
-  public void remove(final byte [] key) {
-    values.remove(new ImmutableBytesWritable(key));
+  public void remove(final String key) {
+    remove(new ImmutableBytesWritable(Bytes.toBytes(key)));
   }
-  
+
   /**
    * Remove metadata represented by the key from the {@link #values} map
-   * 
+   *
    * @param key Key whose key and value we're to remove from HTableDescriptor
    * parameters.
    */
-  public void remove(final String key) {
-    remove(Bytes.toBytes(key));
+  public void remove(ImmutableBytesWritable key) {
+    values.remove(key);
   }
 
   /**
@@ -595,7 +610,7 @@ public class HTableDescriptor implements
    * 
    * @param isDeferredLogFlush
    */
-  public void setDeferredLogFlush(final boolean isDeferredLogFlush) {
+  public synchronized void setDeferredLogFlush(final boolean isDeferredLogFlush) {
     setValue(DEFERRED_LOG_FLUSH_KEY, isDeferredLogFlush? TRUE: FALSE);
     this.deferredLog = isDeferredLogFlush;
   }
@@ -621,12 +636,10 @@ public class HTableDescriptor implements
   /**
    * This get the class associated with the region split policy which 
    * determines when a region split should occur.  The class used by
-   * default is {@link org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy}
-   * which split the region base on a constant {@link #getMaxFileSize()}
+   * default is defined in {@link org.apache.hadoop.hbase.regionserver.RegionSplitPolicy}
    * 
    * @return the class name of the region split policy for this table.
-   * If this returns null, the default constant size based split policy
-   * is used.
+   * If this returns null, the default split policy is used.
    */
    public String getRegionSplitPolicyClassName() {
     return getValue(SPLIT_POLICY);
@@ -645,18 +658,19 @@ public class HTableDescriptor implements
 
   /** 
    * Returns the maximum size upto which a region can grow to after which a region
-   * split is triggered. The region size is represented by the size of the biggest 
+   * split is triggered. The region size is represented by the size of the biggest
    * store file in that region.
-   * 
-   * @return max hregion size for table
-   * 
+   *
+   * @return max hregion size for table, -1 if not set.
+   *
    * @see #setMaxFileSize(long)
    */
   public long getMaxFileSize() {
     byte [] value = getValue(MAX_FILESIZE_KEY);
-    if (value != null)
-      return Long.valueOf(Bytes.toString(value)).longValue();
-    return HConstants.DEFAULT_MAX_FILE_SIZE;
+    if (value != null) {
+      return Long.parseLong(Bytes.toString(value));
+    }
+    return -1;
   }
   
   /**
@@ -675,21 +689,22 @@ public class HTableDescriptor implements
    * before a split is triggered.
    */
   public void setMaxFileSize(long maxFileSize) {
-    setValue(MAX_FILESIZE_KEY, Bytes.toBytes(Long.toString(maxFileSize)));
+    setValue(MAX_FILESIZE_KEY, Long.toString(maxFileSize));
   }
 
   /**
    * Returns the size of the memstore after which a flush to filesystem is triggered.
-   * 
-   * @return memory cache flush size for each hregion
-   * 
+   *
+   * @return memory cache flush size for each hregion, -1 if not set.
+   *
    * @see #setMemStoreFlushSize(long)
    */
   public long getMemStoreFlushSize() {
     byte [] value = getValue(MEMSTORE_FLUSHSIZE_KEY);
-    if (value != null)
-      return Long.valueOf(Bytes.toString(value)).longValue();
-    return DEFAULT_MEMSTORE_FLUSH_SIZE;
+    if (value != null) {
+      return Long.parseLong(Bytes.toString(value));
+    }
+    return -1;
   }
 
   /**
@@ -699,8 +714,7 @@ public class HTableDescriptor implements
    * @param memstoreFlushSize memory cache flush size for each hregion
    */
   public void setMemStoreFlushSize(long memstoreFlushSize) {
-    setValue(MEMSTORE_FLUSHSIZE_KEY,
-      Bytes.toBytes(Long.toString(memstoreFlushSize)));
+    setValue(MEMSTORE_FLUSHSIZE_KEY, Long.toString(memstoreFlushSize));
   }
 
   /**
@@ -758,13 +772,13 @@ public class HTableDescriptor implements
 
     // step 1: set partitioning and pruning
     Set<ImmutableBytesWritable> reservedKeys = new TreeSet<ImmutableBytesWritable>();
-    Set<ImmutableBytesWritable> configKeys = new TreeSet<ImmutableBytesWritable>();
+    Set<ImmutableBytesWritable> userKeys = new TreeSet<ImmutableBytesWritable>();
     for (ImmutableBytesWritable k : values.keySet()) {
       if (k == null || k.get() == null) continue;
       String key = Bytes.toString(k.get());
       // in this section, print out reserved keywords + coprocessor info
       if (!RESERVED_KEYWORDS.contains(k) && !key.startsWith("coprocessor$")) {
-        configKeys.add(k);
+        userKeys.add(k);
         continue;
       }
       // only print out IS_ROOT/IS_META if true
@@ -781,50 +795,67 @@ public class HTableDescriptor implements
     }
 
     // early exit optimization
-    if (reservedKeys.isEmpty() && configKeys.isEmpty()) return s;
+    boolean hasAttributes = !reservedKeys.isEmpty() || !userKeys.isEmpty();
+    if (!hasAttributes && configuration.isEmpty()) return s;
 
-    // step 2: printing
-    s.append(", {TABLE_ATTRIBUTES => {");
-
-    // print all reserved keys first
-    boolean printCommaForAttr = false;
-    for (ImmutableBytesWritable k : reservedKeys) {
-      String key = Bytes.toString(k.get());
-      String value = Bytes.toString(values.get(k).get());
-      if (printCommaForAttr) s.append(", ");
-      printCommaForAttr = true;
-      s.append(key);
-      s.append(" => ");
-      s.append('\'').append(value).append('\'');
-    }
-
-    if (!configKeys.isEmpty()) {
-      // print all non-reserved, advanced config keys as a separate subset
-      if (printCommaForAttr) s.append(", ");
-      printCommaForAttr = true;
-      s.append(HConstants.CONFIG).append(" => ");
-      s.append("{");
-      boolean printCommaForCfg = false;
-      for (ImmutableBytesWritable k : configKeys) {
+    s.append(", {");
+    // step 2: printing attributes
+    if (hasAttributes) {
+      s.append("TABLE_ATTRIBUTES => {");
+
+      // print all reserved keys first
+      boolean printCommaForAttr = false;
+      for (ImmutableBytesWritable k : reservedKeys) {
         String key = Bytes.toString(k.get());
         String value = Bytes.toString(values.get(k).get());
-        if (printCommaForCfg) s.append(", ");
-        printCommaForCfg = true;
-        s.append('\'').append(key).append('\'');
+        if (printCommaForAttr) s.append(", ");
+        printCommaForAttr = true;
+        s.append(key);
         s.append(" => ");
         s.append('\'').append(value).append('\'');
       }
-      s.append("}");
+
+      if (!userKeys.isEmpty()) {
+        // print all non-reserved, advanced config keys as a separate subset
+        if (printCommaForAttr) s.append(", ");
+        printCommaForAttr = true;
+        s.append(HConstants.METADATA).append(" => ");
+        s.append("{");
+        boolean printCommaForCfg = false;
+        for (ImmutableBytesWritable k : userKeys) {
+          String key = Bytes.toString(k.get());
+          String value = Bytes.toString(values.get(k).get());
+          if (printCommaForCfg) s.append(", ");
+          printCommaForCfg = true;
+          s.append('\'').append(key).append('\'');
+          s.append(" => ");
+          s.append('\'').append(value).append('\'');
+        }
+        s.append("}");
+      }
     }
 
-    s.append("}}"); // end METHOD
+    // step 3: printing all configuration:
+    if (!configuration.isEmpty()) {
+      if (hasAttributes) {
+        s.append(", ");
+      }
+      s.append(HConstants.CONFIGURATION).append(" => ");
+      s.append('{');
+      boolean printCommaForConfig = false;
+      for (Map.Entry<String, String> e : configuration.entrySet()) {
+        if (printCommaForConfig) s.append(", ");
+        printCommaForConfig = true;
+        s.append('\'').append(e.getKey()).append('\'');
+        s.append(" => ");
+        s.append('\'').append(e.getValue()).append('\'');
+      }
+      s.append("}");
+    }
+    s.append("}"); // end METHOD
     return s;
   }
 
-  public static Map<String, String> getDefaultValues() {
-    return Collections.unmodifiableMap(DEFAULT_VALUES);
-  }
-
   /**
    * Compare the contents of the descriptor with another one passed as a parameter. 
    * Checks if the obj passed is an instance of HTableDescriptor, if yes then the
@@ -861,6 +892,7 @@ public class HTableDescriptor implements
       }
     }
     result ^= values.hashCode();
+    result ^= configuration.hashCode();
     return result;
   }
 
@@ -881,13 +913,14 @@ public class HTableDescriptor implements
     setRootRegion(in.readBoolean());
     setMetaRegion(in.readBoolean());
     values.clear();
+    configuration.clear();
     int numVals = in.readInt();
     for (int i = 0; i < numVals; i++) {
       ImmutableBytesWritable key = new ImmutableBytesWritable();
       ImmutableBytesWritable value = new ImmutableBytesWritable();
       key.readFields(in);
       value.readFields(in);
-      values.put(key, value);
+      setValue(key, value);
     }
     families.clear();
     int numFamilies = in.readInt();
@@ -896,8 +929,17 @@ public class HTableDescriptor implements
       c.readFields(in);
       families.put(c.getName(), c);
     }
-    if (version < 4) {
-      return;
+    if (version >= 7) {
+      int numConfigs = in.readInt();
+      for (int i = 0; i < numConfigs; i++) {
+        ImmutableBytesWritable key = new ImmutableBytesWritable();
+        ImmutableBytesWritable value = new ImmutableBytesWritable();
+        key.readFields(in);
+        value.readFields(in);
+        configuration.put(
+          Bytes.toString(key.get(), key.getOffset(), key.getLength()),
+          Bytes.toString(value.get(), value.getOffset(), value.getLength()));
+      }
     }
   }
 
@@ -926,6 +968,11 @@ public class HTableDescriptor implements
       HColumnDescriptor family = it.next();
       family.write(out);
     }
+    out.writeInt(configuration.size());
+    for (Map.Entry<String, String> e : configuration.entrySet()) {
+      new ImmutableBytesWritable(Bytes.toBytes(e.getKey())).write(out);
+      new ImmutableBytesWritable(Bytes.toBytes(e.getValue())).write(out);
+    }
   }
 
   // Comparable
@@ -964,6 +1011,13 @@ public class HTableDescriptor implements
       else if (result > 0)
         result = 1;
     }
+    if (result == 0) {
+      result = this.configuration.hashCode() - other.configuration.hashCode();
+      if (result < 0)
+        result = -1;
+      else if (result > 0)
+        result = 1;
+    }
     return result;
   }
 
@@ -999,7 +1053,8 @@ public class HTableDescriptor implements
    * @see #getFamilies()
    */
   public HColumnDescriptor[] getColumnFamilies() {
-    return getFamilies().toArray(new HColumnDescriptor[0]);
+    Collection<HColumnDescriptor> hColumnDescriptors = getFamilies();
+    return hColumnDescriptors.toArray(new HColumnDescriptor[hColumnDescriptors.size()]);
   }
   
 
@@ -1107,10 +1162,10 @@ public class HTableDescriptor implements
     setValue(key, value);
   }
 
-  
+
   /**
    * Check if the table has an attached co-processor represented by the name className
-   * 
+   *
    * @param className - Class name of the co-processor
    * @return true of the table has a co-processor className
    */
@@ -1141,6 +1196,30 @@ public class HTableDescriptor implements
   }
 
   /**
+   * Return the list of attached co-processor represented by their name className
+   *
+   * @return The list of co-processors classNames
+   */
+  public List<String> getCoprocessors() {
+    List<String> result = new ArrayList<String>();
+    Matcher keyMatcher;
+    Matcher valueMatcher;
+    for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e : this.values.entrySet()) {
+      keyMatcher = HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e.getKey().get()));
+      if (!keyMatcher.matches()) {
+        continue;
+      }
+      valueMatcher = HConstants.CP_HTD_ATTR_VALUE_PATTERN.matcher(Bytes
+          .toString(e.getValue().get()));
+      if (!valueMatcher.matches()) {
+        continue;
+      }
+      result.add(valueMatcher.group(2).trim()); // classname is the 2nd field
+    }
+    return result;
+  }
+
+  /**
    * Remove a coprocessor from those set on the table
    * @param className Class name of the co-processor
    */
@@ -1170,7 +1249,7 @@ public class HTableDescriptor implements
     }
     // if we found a match, remove it
     if (match != null)
-      this.values.remove(match);
+      remove(match);
   }
   
   /**
@@ -1218,9 +1297,9 @@ public class HTableDescriptor implements
   @Deprecated
   public void setOwnerString(String ownerString) {
     if (ownerString != null) {
-      setValue(OWNER_KEY, Bytes.toBytes(ownerString));
+      setValue(OWNER_KEY, ownerString);
     } else {
-      values.remove(OWNER_KEY);
+      remove(OWNER_KEY);
     }
   }
 
@@ -1257,7 +1336,7 @@ public class HTableDescriptor implements
     }
     int pblen = ProtobufUtil.lengthOfPBMagic();
     TableSchema.Builder builder = TableSchema.newBuilder();
-    TableSchema ts = null;
+    TableSchema ts;
     try {
       ts = builder.mergeFrom(bytes, pblen, bytes.length - pblen).build();
     } catch (InvalidProtocolBufferException e) {
@@ -1273,14 +1352,20 @@ public class HTableDescriptor implements
     TableSchema.Builder builder = TableSchema.newBuilder();
     builder.setName(ByteString.copyFrom(getName()));
     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e: this.values.entrySet()) {
-      TableSchema.Attribute.Builder aBuilder = TableSchema.Attribute.newBuilder();
-      aBuilder.setName(ByteString.copyFrom(e.getKey().get()));
-      aBuilder.setValue(ByteString.copyFrom(e.getValue().get()));
+      BytesBytesPair.Builder aBuilder = BytesBytesPair.newBuilder();
+      aBuilder.setFirst(ByteString.copyFrom(e.getKey().get()));
+      aBuilder.setSecond(ByteString.copyFrom(e.getValue().get()));
       builder.addAttributes(aBuilder.build());
     }
     for (HColumnDescriptor hcd: getColumnFamilies()) {
       builder.addColumnFamilies(hcd.convert());
     }
+    for (Map.Entry<String, String> e : this.configuration.entrySet()) {
+      NameStringPair.Builder aBuilder = NameStringPair.newBuilder();
+      aBuilder.setName(e.getKey());
+      aBuilder.setValue(e.getValue());
+      builder.addConfiguration(aBuilder.build());
+    }
     return builder.build();
   }
 
@@ -1296,9 +1381,47 @@ public class HTableDescriptor implements
       hcds[index++] = HColumnDescriptor.convert(cfs);
     }
     HTableDescriptor htd = new HTableDescriptor(ts.getName().toByteArray(), hcds);
-    for (TableSchema.Attribute a: ts.getAttributesList()) {
-      htd.setValue(a.getName().toByteArray(), a.getValue().toByteArray());
+    for (BytesBytesPair a: ts.getAttributesList()) {
+      htd.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray());
+    }
+    for (NameStringPair a: ts.getConfigurationList()) {
+      htd.setConfiguration(a.getName(), a.getValue());
     }
     return htd;
   }
+
+  /**
+   * Getter for accessing the configuration value by key
+   */
+  public String getConfigurationValue(String key) {
+    return configuration.get(key);
+  }
+
+  /**
+   * Getter for fetching an unmodifiable {@link #configuration} map.
+   */
+  public Map<String, String> getConfiguration() {
+    // shallow pointer copy
+    return Collections.unmodifiableMap(configuration);
+  }
+
+  /**
+   * Setter for storing a configuration setting in {@link #configuration} map.
+   * @param key Config key. Same as XML config key e.g. hbase.something.or.other.
+   * @param value String value. If null, removes the setting.
+   */
+  public void setConfiguration(String key, String value) {
+    if (value == null) {
+      removeConfiguration(key);
+    } else {
+      configuration.put(key, value);
+    }
+  }
+
+  /**
+   * Remove a config setting represented by the key from the {@link #configuration} map
+   */
+  public void removeConfiguration(final String key) {
+    configuration.remove(key);
+  }
 }

Modified: hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/MasterAdminProtocol.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/MasterAdminProtocol.java?rev=1446147&r1=1446146&r2=1446147&view=diff
==============================================================================
--- hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/MasterAdminProtocol.java (original)
+++ hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/MasterAdminProtocol.java Thu Feb 14 12:58:12 2013
@@ -18,67 +18,11 @@
 
 package org.apache.hadoop.hbase;
 
-import java.io.IOException;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
-import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AddColumnRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AddColumnResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteColumnRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteColumnResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotResponse;
 import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MasterAdminService;
-import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyColumnRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyColumnResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MoveRegionRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MoveRegionResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.OfflineRegionRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.OfflineRegionResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ShutdownRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ShutdownResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse;
 import org.apache.hadoop.hbase.security.KerberosInfo;
 import org.apache.hadoop.hbase.security.TokenInfo;
-import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
-import org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
-
-import com.google.protobuf.RpcController;
-import com.google.protobuf.ServiceException;
 
 /**
  * Protocol that a client uses to communicate with the Master (for admin purposes).
@@ -88,324 +32,5 @@ import com.google.protobuf.ServiceExcept
 @TokenInfo("HBASE_AUTH_TOKEN")
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
-public interface MasterAdminProtocol extends
-    MasterAdminService.BlockingInterface, MasterProtocol {
-  public static final long VERSION = 1L;
-
-  /* Column-level */
-
-  /**
-   * Adds a column to the specified table
-   * @param controller Unused (set to null).
-   * @param req AddColumnRequest that contains:<br>
-   * - tableName: table to modify<br>
-   * - column: column descriptor
-   * @throws ServiceException
-   */
-  @Override
-  public AddColumnResponse addColumn(RpcController controller, AddColumnRequest req)
-  throws ServiceException;
-
-  /**
-   * Deletes a column from the specified table. Table must be disabled.
-   * @param controller Unused (set to null).
-   * @param req DeleteColumnRequest that contains:<br>
-   * - tableName: table to alter<br>
-   * - columnName: column family to remove
-   * @throws ServiceException
-   */
-  @Override
-  public DeleteColumnResponse deleteColumn(RpcController controller, DeleteColumnRequest req)
-  throws ServiceException;
-
-  /**
-   * Modifies an existing column on the specified table
-   * @param controller Unused (set to null).
-   * @param req ModifyColumnRequest that contains:<br>
-   * - tableName: table name<br>
-   * - descriptor: new column descriptor
-   * @throws ServiceException  e
-   */
-  @Override
-  public ModifyColumnResponse modifyColumn(RpcController controller, ModifyColumnRequest req)
-  throws ServiceException;
-
-  /* Region-level */
-
-  /**
-   * Move a region to a specified destination server.
-   * @param controller Unused (set to null).
-   * @param req The request that contains:<br>
-   * - region: The encoded region name; i.e. the hash that makes
-   * up the region name suffix: e.g. if regionname is
-   * <code>TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396.</code>,
-   * then the encoded region name is: <code>527db22f95c8a9e0116f0cc13c680396</code>.<br>
-   * - destServerName: The servername of the destination regionserver.  If
-   * passed the empty byte array we'll assign to a random server.  A server name
-   * is made of host, port and startcode.  Here is an example:
-   * <code> host187.example.com,60020,1289493121758</code>.
-   * @throws ServiceException that wraps a UnknownRegionException if we can't find a
-   * region named <code>encodedRegionName</code>
-   */
-  @Override
-  public MoveRegionResponse moveRegion(RpcController controller, MoveRegionRequest req)
-  throws ServiceException;
-
-  /**
-   * Assign a region to a server chosen at random.
-   * @param controller Unused (set to null).
-   * @param req contains the region to assign.  Will use existing RegionPlan if one
-   * found.
-   * @throws ServiceException
-   */
-  @Override
-  public AssignRegionResponse assignRegion(RpcController controller, AssignRegionRequest req)
-  throws ServiceException;
-
-  /**
-   * Unassign a region from current hosting regionserver.  Region will then be
-   * assigned to a regionserver chosen at random.  Region could be reassigned
-   * back to the same server.  Use {@link #moveRegion} if you want to
-   * control the region movement.
-   * @param controller Unused (set to null).
-   * @param req The request that contains:<br>
-   * - region: Region to unassign. Will clear any existing RegionPlan
-   * if one found.<br>
-   * - force: If true, force unassign (Will remove region from
-   * regions-in-transition too if present as well as from assigned regions --
-   * radical!.If results in double assignment use hbck -fix to resolve.
-   * @throws ServiceException
-   */
-  @Override
-  public UnassignRegionResponse unassignRegion(RpcController controller, UnassignRegionRequest req)
-  throws ServiceException;
-
-  /**
-   * Offline a region from the assignment manager's in-memory state.  The
-   * region should be in a closed state and there will be no attempt to
-   * automatically reassign the region as in unassign.   This is a special
-   * method, and should only be used by experts or hbck.
-   * @param controller Unused (set to null).
-   * @param request OfflineRegionRequest that contains:<br>
-   * - region: Region to offline.  Will clear any existing RegionPlan
-   * if one found.
-   * @throws ServiceException
-   */
-  @Override
-  public OfflineRegionResponse offlineRegion(RpcController controller, OfflineRegionRequest request)
-  throws ServiceException;
-
-  /* Table-level */
-
-  /**
-   * Creates a new table asynchronously.  If splitKeys are specified, then the
-   * table will be created with an initial set of multiple regions.
-   * If splitKeys is null, the table will be created with a single region.
-   * @param controller Unused (set to null).
-   * @param req CreateTableRequest that contains:<br>
-   * - tablesSchema: table descriptor<br>
-   * - splitKeys
-   * @throws ServiceException
-   */
-  @Override
-  public CreateTableResponse createTable(RpcController controller, CreateTableRequest req)
-  throws ServiceException;
-
-  /**
-   * Deletes a table
-   * @param controller Unused (set to null).
-   * @param req DeleteTableRequest that contains:<br>
-   * - tableName: table to delete
-   * @throws ServiceException
-   */
-  @Override
-  public DeleteTableResponse deleteTable(RpcController controller, DeleteTableRequest req)
-  throws ServiceException;
-
-  /**
-   * Puts the table on-line (only needed if table has been previously taken offline)
-   * @param controller Unused (set to null).
-   * @param req EnableTableRequest that contains:<br>
-   * - tableName: table to enable
-   * @throws ServiceException
-   */
-  @Override
-  public EnableTableResponse enableTable(RpcController controller, EnableTableRequest req)
-  throws ServiceException;
-
-  /**
-   * Take table offline
-   *
-   * @param controller Unused (set to null).
-   * @param req DisableTableRequest that contains:<br>
-   * - tableName: table to take offline
-   * @throws ServiceException
-   */
-  @Override
-  public DisableTableResponse disableTable(RpcController controller, DisableTableRequest req)
-  throws ServiceException;
-
-  /**
-   * Modify a table's metadata
-   *
-   * @param controller Unused (set to null).
-   * @param req ModifyTableRequest that contains:<br>
-   * - tableName: table to modify<br>
-   * - tableSchema: new descriptor for table
-   * @throws ServiceException
-   */
-  @Override
-  public ModifyTableResponse modifyTable(RpcController controller, ModifyTableRequest req)
-  throws ServiceException;
-
-  /* Cluster-level */
-
-  /**
-   * Shutdown an HBase cluster.
-   * @param controller Unused (set to null).
-   * @param request ShutdownRequest
-   * @return ShutdownResponse
-   * @throws ServiceException
-   */
-  @Override
-  public ShutdownResponse shutdown(RpcController controller, ShutdownRequest request)
-  throws ServiceException;
-
-  /**
-   * Stop HBase Master only.
-   * Does not shutdown the cluster.
-   * @param controller Unused (set to null).
-   * @param request StopMasterRequest
-   * @return StopMasterResponse
-   * @throws ServiceException
-   */
-  @Override
-  public StopMasterResponse stopMaster(RpcController controller, StopMasterRequest request)
-  throws ServiceException;
-
-  /**
-   * Run the balancer.  Will run the balancer and if regions to move, it will
-   * go ahead and do the reassignments.  Can NOT run for various reasons.  Check
-   * logs.
-   * @param c Unused (set to null).
-   * @param request BalanceRequest
-   * @return BalanceResponse that contains:<br>
-   * - balancerRan: True if balancer ran and was able to tell the region servers to
-   * unassign all the regions to balance (the re-assignment itself is async),
-   * false otherwise.
-   */
-  @Override
-  public BalanceResponse balance(RpcController c, BalanceRequest request) throws ServiceException;
-
-  /**
-   * Turn the load balancer on or off.
-   * @param controller Unused (set to null).
-   * @param req SetBalancerRunningRequest that contains:<br>
-   * - on: If true, enable balancer. If false, disable balancer.<br>
-   * - synchronous: if true, wait until current balance() call, if outstanding, to return.
-   * @return SetBalancerRunningResponse that contains:<br>
-   * - prevBalanceValue: Previous balancer value
-   * @throws ServiceException
-   */
-  @Override
-  public SetBalancerRunningResponse setBalancerRunning(
-      RpcController controller, SetBalancerRunningRequest req) throws ServiceException;
-
-    /**
-   * @param c Unused (set to null).
-   * @param req IsMasterRunningRequest
-   * @return IsMasterRunningRequest that contains:<br>
-   * isMasterRunning: true if master is available
-   * @throws ServiceException
-   */
-  @Override
-  public IsMasterRunningResponse isMasterRunning(RpcController c, IsMasterRunningRequest req)
-  throws ServiceException;
-
-  /**
-   * Run a scan of the catalog table
-   * @param c Unused (set to null).
-   * @param req CatalogScanRequest
-   * @return CatalogScanResponse that contains the int return code corresponding
-   *         to the number of entries cleaned
-   * @throws ServiceException
-   */
-  @Override
-  public CatalogScanResponse runCatalogScan(RpcController c,
-      CatalogScanRequest req) throws ServiceException;
-
-  /**
-   * Enable/Disable the catalog janitor
-   * @param c Unused (set to null).
-   * @param req EnableCatalogJanitorRequest that contains:<br>
-   * - enable: If true, enable catalog janitor. If false, disable janitor.<br>
-   * @return EnableCatalogJanitorResponse that contains:<br>
-   * - prevValue: true, if it was enabled previously; false, otherwise
-   * @throws ServiceException
-   */
-  @Override
-  public EnableCatalogJanitorResponse enableCatalogJanitor(RpcController c,
-      EnableCatalogJanitorRequest req) throws ServiceException;
-
-  /**
-   * Query whether the catalog janitor is enabled
-   * @param c Unused (set to null).
-   * @param req IsCatalogJanitorEnabledRequest
-   * @return IsCatalogCatalogJanitorEnabledResponse that contains:<br>
-   * - value: true, if it is enabled; false, otherwise
-   * @throws ServiceException
-   */
-  @Override
-  public IsCatalogJanitorEnabledResponse isCatalogJanitorEnabled(RpcController c,
-      IsCatalogJanitorEnabledRequest req) throws ServiceException;
-
-  /**
-   * Create a snapshot for the given table.
-   * @param controller Unused (set to null).
-   * @param snapshot description of the snapshot to take
-   * @return empty response on success
-   * @throws ServiceException if the snapshot cannot be taken
-   */
-  @Override
-  public TakeSnapshotResponse snapshot(RpcController controller, TakeSnapshotRequest snapshot)
-      throws ServiceException;
-
-  /**
-   * List existing snapshots.
-   * @param controller Unused (set to null).
-   * @param request information about the request (can be empty)
-   * @return {@link ListSnapshotResponse} - a list of {@link SnapshotDescription}
-   * @throws ServiceException if we cannot reach the filesystem
-   */
-  @Override
-  public ListSnapshotResponse listSnapshots(RpcController controller, ListSnapshotRequest request)
-      throws ServiceException;
-
-  /**
-   * Delete an existing snapshot. This method can also be used to clean up a aborted snapshot.
-   * @param controller Unused (set to null).
-   * @param snapshotName snapshot to delete
-   * @return <tt>true</tt> if the snapshot was deleted, <tt>false</tt> if the snapshot didn't exist
-   *         originally
-   * @throws ServiceException if the filesystem cannot be reached
-   */
-  @Override
-  public DeleteSnapshotResponse deleteSnapshot(RpcController controller,
-      DeleteSnapshotRequest snapshotName) throws ServiceException;
-
-  /**
-   * Check to see if the snapshot is done.
-   * @param controller Unused (set to null).
-   * @param request name of the snapshot to check.
-   * @throws ServiceException around possible exceptions:
-   *           <ol>
-   *           <li>{@link UnknownSnapshotException} if the passed snapshot name doesn't match the
-   *           current snapshot <i>or</i> there is no previous snapshot.</li>
-   *           <li>{@link SnapshotCreationException} if the snapshot couldn't complete because of
-   *           errors</li>
-   *           </ol>
-   */
-  @Override
-  public IsSnapshotDoneResponse isSnapshotDone(RpcController controller,
-      IsSnapshotDoneRequest request) throws ServiceException;
-}
+public interface MasterAdminProtocol
+extends MasterAdminService.BlockingInterface, MasterProtocol {}

Modified: hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/MasterMonitorProtocol.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/MasterMonitorProtocol.java?rev=1446147&r1=1446146&r2=1446147&view=diff
==============================================================================
--- hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/MasterMonitorProtocol.java (original)
+++ hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/MasterMonitorProtocol.java Thu Feb 14 12:58:12 2013
@@ -21,19 +21,8 @@ package org.apache.hadoop.hbase;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.MasterMonitorService;
-import org.apache.hadoop.hbase.security.TokenInfo;
 import org.apache.hadoop.hbase.security.KerberosInfo;
-import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetSchemaAlterStatusRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetSchemaAlterStatusResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse;
-
-import com.google.protobuf.RpcController;
-import com.google.protobuf.ServiceException;
+import org.apache.hadoop.hbase.security.TokenInfo;
 
 /**
  * Protocol that a client uses to communicate with the Master (for monitoring purposes).
@@ -43,57 +32,5 @@ import com.google.protobuf.ServiceExcept
 @TokenInfo("HBASE_AUTH_TOKEN")
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
-public interface MasterMonitorProtocol extends
-    MasterMonitorService.BlockingInterface, MasterProtocol {
-  public static final long VERSION = 1L;
-
-  /**
-   * Used by the client to get the number of regions that have received the
-   * updated schema
-   *
-   * @param controller Unused (set to null).
-   * @param req GetSchemaAlterStatusRequest that contains:<br>
-   * - tableName
-   * @return GetSchemaAlterStatusResponse indicating the number of regions updated.
-   *         yetToUpdateRegions is the regions that are yet to be updated totalRegions
-   *         is the total number of regions of the table
-   * @throws ServiceException
-   */
-  @Override
-  public GetSchemaAlterStatusResponse getSchemaAlterStatus(
-    RpcController controller, GetSchemaAlterStatusRequest req) throws ServiceException;
-
-  /**
-   * Get list of TableDescriptors for requested tables.
-   * @param controller Unused (set to null).
-   * @param req GetTableDescriptorsRequest that contains:<br>
-   * - tableNames: requested tables, or if empty, all are requested
-   * @return GetTableDescriptorsResponse
-   * @throws ServiceException
-   */
-  @Override
-  public GetTableDescriptorsResponse getTableDescriptors(
-      RpcController controller, GetTableDescriptorsRequest req) throws ServiceException;
-
-  /**
-   * Return cluster status.
-   * @param controller Unused (set to null).
-   * @param req GetClusterStatusRequest
-   * @return status object
-   * @throws ServiceException
-   */
-  @Override
-  public GetClusterStatusResponse getClusterStatus(RpcController controller, GetClusterStatusRequest req)
-  throws ServiceException;
-
-  /**
-   * @param c Unused (set to null).
-   * @param req IsMasterRunningRequest
-   * @return IsMasterRunningRequest that contains:<br>
-   * isMasterRunning: true if master is available
-   * @throws ServiceException
-   */
-  @Override
-  public IsMasterRunningResponse isMasterRunning(RpcController c, IsMasterRunningRequest req)
-  throws ServiceException;
-}
+public interface MasterMonitorProtocol
+extends MasterMonitorService.BlockingInterface, MasterProtocol {}
\ No newline at end of file

Modified: hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/MasterProtocol.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/MasterProtocol.java?rev=1446147&r1=1446146&r2=1446147&view=diff
==============================================================================
--- hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/MasterProtocol.java (original)
+++ hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/MasterProtocol.java Thu Feb 14 12:58:12 2013
@@ -16,29 +16,14 @@
  * limitations under the License.
  */
 
-// Functions implemented by all the master protocols (e.g. MasterAdminProtocol,
-// MasterMonitorProtocol).  Currently, this is only isMasterRunning, which is used,
-// on proxy creation, to check if the master has been stopped.  If it has,
-// a MasterNotRunningException is thrown back to the client, and the client retries.
-
 package org.apache.hadoop.hbase;
 
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService;
-import org.apache.hadoop.hbase.ipc.VersionedProtocol;
-import com.google.protobuf.RpcController;
-import com.google.protobuf.ServiceException;
-
-public interface MasterProtocol extends VersionedProtocol, MasterService.BlockingInterface {
 
-  /**
-   * @param c Unused (set to null).
-   * @param req IsMasterRunningRequest
-   * @return IsMasterRunningRequest that contains:<br>
-   * isMasterRunning: true if master is available
-   * @throws ServiceException
-   */
-  public IsMasterRunningResponse isMasterRunning(RpcController c, IsMasterRunningRequest req)
-  throws ServiceException;
-}
+/**
+ * Functions implemented by all the master protocols: e.g. {@link MasterAdminProtocol}
+ * and {@link MasterMonitorProtocol}. Currently, the only shared method
+ * {@link #isMasterRunning(com.google.protobuf.RpcController, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest)}
+ * which is used on connection setup to check if the master has been stopped.
+ */
+public interface MasterProtocol extends IpcProtocol, MasterService.BlockingInterface {}
\ No newline at end of file

Modified: hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionMovedException.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionMovedException.java?rev=1446147&r1=1446146&r2=1446147&view=diff
==============================================================================
--- hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionMovedException.java (original)
+++ hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionMovedException.java Thu Feb 14 12:58:12 2013
@@ -31,18 +31,22 @@ import org.apache.hadoop.ipc.RemoteExcep
 @InterfaceStability.Evolving
 public class RegionMovedException extends NotServingRegionException {
   private static final Log LOG = LogFactory.getLog(RegionMovedException.class);
-  private static final long serialVersionUID = -7232903522310558397L;
+  private static final long serialVersionUID = -7232903522310558396L;
 
   private final String hostname;
   private final int port;
+  private final long locationSeqNum;
 
   private static final String HOST_FIELD = "hostname=";
   private static final String PORT_FIELD = "port=";
+  private static final String LOCATIONSEQNUM_FIELD = "locationSeqNum=";
 
-  public RegionMovedException(final String hostname, final int port) {
+  public RegionMovedException(final String hostname, final int port,
+    final long locationSeqNum) {
     super();
     this.hostname = hostname;
     this.port = port;
+    this.locationSeqNum = locationSeqNum;
   }
 
   public String getHostname() {
@@ -53,6 +57,10 @@ public class RegionMovedException extend
     return port;
   }
 
+  public long getLocationSeqNum() {
+    return locationSeqNum;
+  }
+
   /**
    * For hadoop.ipc internal call. Do NOT use.
    * We have to parse the hostname to recreate the exception.
@@ -61,24 +69,31 @@ public class RegionMovedException extend
   public RegionMovedException(String s) {
     int posHostname = s.indexOf(HOST_FIELD) + HOST_FIELD.length();
     int posPort = s.indexOf(PORT_FIELD) + PORT_FIELD.length();
+    int posSeqNum = s.indexOf(LOCATIONSEQNUM_FIELD) + LOCATIONSEQNUM_FIELD.length();
 
     String tmpHostname = null;
     int tmpPort = -1;
+    long tmpSeqNum = HConstants.NO_SEQNUM;
     try {
+      // TODO: this whole thing is extremely brittle.
       tmpHostname = s.substring(posHostname, s.indexOf(' ', posHostname));
       tmpPort = Integer.parseInt(s.substring(posPort, s.indexOf('.', posPort)));
+      tmpSeqNum = Long.parseLong(s.substring(posSeqNum, s.indexOf('.', posSeqNum)));
     } catch (Exception ignored) {
-      LOG.warn("Can't parse the hostname and the port from this string: " + s + ", "+
-        "Continuing");
+      LOG.warn("Can't parse the hostname and the port from this string: " + s + ", continuing");
     }
 
     hostname = tmpHostname;
     port = tmpPort;
+    locationSeqNum = tmpSeqNum;
   }
 
   @Override
   public String getMessage() {
-    return "Region moved to: " + HOST_FIELD + hostname + " " + PORT_FIELD + port + ".";
+    // TODO: deserialization above depends on this. That is bad, but also means this
+    // should be modified carefully.
+    return "Region moved to: " + HOST_FIELD + hostname + " " + PORT_FIELD + port + ". As of "
+      + LOCATIONSEQNUM_FIELD + locationSeqNum + ".";
   }
 
   /**

Modified: hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionServerStatusProtocol.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionServerStatusProtocol.java?rev=1446147&r1=1446146&r2=1446147&view=diff
==============================================================================
--- hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionServerStatusProtocol.java (original)
+++ hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionServerStatusProtocol.java Thu Feb 14 12:58:12 2013
@@ -20,7 +20,6 @@ package org.apache.hadoop.hbase;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hbase.ipc.VersionedProtocol;
 import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStatusService;
 import org.apache.hadoop.hbase.security.TokenInfo;
 import org.apache.hadoop.hbase.security.KerberosInfo;
@@ -33,7 +32,5 @@ import org.apache.hadoop.hbase.security.
 @TokenInfo("HBASE_AUTH_TOKEN")
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
-public interface RegionServerStatusProtocol extends
-    RegionServerStatusService.BlockingInterface, VersionedProtocol {
-  public static final long VERSION = 1L;
-}
+public interface RegionServerStatusProtocol
+extends RegionServerStatusService.BlockingInterface, IpcProtocol {}
\ No newline at end of file

Modified: hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/ServerName.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/ServerName.java?rev=1446147&r1=1446146&r2=1446147&view=diff
==============================================================================
--- hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/ServerName.java (original)
+++ hbase/branches/hbase-7290v2/hbase-server/src/main/java/org/apache/hadoop/hbase/ServerName.java Thu Feb 14 12:58:12 2013
@@ -71,7 +71,7 @@ public class ServerName implements Compa
    */
   public static final String SERVERNAME_SEPARATOR = ",";
 
-  public static Pattern SERVERNAME_PATTERN =
+  public static final Pattern SERVERNAME_PATTERN =
     Pattern.compile("[^" + SERVERNAME_SEPARATOR + "]+" +
       SERVERNAME_SEPARATOR + Addressing.VALID_PORT_REGEX +
       SERVERNAME_SEPARATOR + Addressing.VALID_PORT_REGEX + "$");