You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2012/08/28 05:40:49 UTC

svn commit: r1377965 [2/3] - in /hbase/trunk: hbase-common/src/main/java/org/apache/hadoop/hbase/ hbase-server/src/main/java/org/apache/hadoop/hbase/ hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/ hbase-server/src/main/java/org/apache/hado...

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java?rev=1377965&r1=1377964&r2=1377965&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java Tue Aug 28 03:40:47 2012
@@ -73,7 +73,6 @@ import org.apache.hadoop.hbase.util.Byte
 import org.apache.hadoop.hbase.util.KeyLocker;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.util.Threads;
-import org.apache.hadoop.hbase.util.Writables;
 import org.apache.hadoop.hbase.zookeeper.RootRegionTracker;
 import org.apache.hadoop.hbase.zookeeper.ZKAssign;
 import org.apache.hadoop.hbase.zookeeper.ZKTable;
@@ -149,14 +148,14 @@ public class AssignmentManager extends Z
    * Contains the server which need to update timer, these servers will be
    * handled by {@link TimerUpdater}
    */
-  private final ConcurrentSkipListSet<ServerName> serversInUpdatingTimer = 
+  private final ConcurrentSkipListSet<ServerName> serversInUpdatingTimer =
     new ConcurrentSkipListSet<ServerName>();
 
   private final ExecutorService executorService;
 
   //Thread pool executor service for timeout monitor
   private java.util.concurrent.ExecutorService threadPoolExecutorService;
-  
+
   private List<EventType> ignoreStatesRSOffline = Arrays.asList(new EventType[]{
       EventType.RS_ZK_REGION_FAILED_OPEN, EventType.RS_ZK_REGION_CLOSED });
 
@@ -166,8 +165,8 @@ public class AssignmentManager extends Z
    */
   private volatile boolean failover = false;
 
-  // Set holding all the regions which got processed while RIT was not 
-  // populated during master failover. 
+  // Set holding all the regions which got processed while RIT was not
+  // populated during master failover.
   private Map<String, HRegionInfo> failoverProcessedRegions =
     new HashMap<String, HRegionInfo>();
 
@@ -184,7 +183,7 @@ public class AssignmentManager extends Z
    * @param catalogTracker
    * @param service
    * @throws KeeperException
-   * @throws IOException 
+   * @throws IOException
    */
   public AssignmentManager(Server master, ServerManager serverManager,
       CatalogTracker catalogTracker, final LoadBalancer balancer,
@@ -244,8 +243,8 @@ public class AssignmentManager extends Z
 
   /**
    * Add a regionPlan for the specified region.
-   * @param encodedName 
-   * @param plan 
+   * @param encodedName
+   * @param plan
    */
   public void addPlan(String encodedName, RegionPlan plan) {
     synchronized (regionPlans) {
@@ -344,7 +343,7 @@ public class AssignmentManager extends Z
 
   /**
    * Process all regions that are in transition in zookeeper and also
-   * processes the list of dead servers by scanning the META. 
+   * processes the list of dead servers by scanning the META.
    * Used by master joining an cluster.  If we figure this is a clean cluster
    * startup, will assign all user regions.
    * @param deadServers
@@ -441,7 +440,7 @@ public class AssignmentManager extends Z
    * up in zookeeper.
    * @param encodedRegionName Region to process failover for.
    * @param regionInfo If null we'll go get it from meta table.
-   * @param deadServers Can be null 
+   * @param deadServers Can be null
    * @return True if we processed <code>regionInfo</code> as a RIT.
    * @throws KeeperException
    * @throws IOException
@@ -461,7 +460,7 @@ public class AssignmentManager extends Z
     }
     HRegionInfo hri = regionInfo;
     if (hri == null) {
-      if ((hri = getHRegionInfo(rt.getRegionName())) == null) return false; 
+      if ((hri = getHRegionInfo(rt.getRegionName())) == null) return false;
     }
     processRegionsInTransition(rt, hri, deadServers, stat.getVersion());
     return true;
@@ -582,7 +581,7 @@ public class AssignmentManager extends Z
       lock.unlock();
     }
   }
-  
+
 
   /**
    * Put the region <code>hri</code> into an offline state up in zk.
@@ -728,7 +727,7 @@ public class AssignmentManager extends Z
           byte [] payload = rt.getPayload();
           List<HRegionInfo> daughters = null;
           try {
-            daughters = Writables.getHRegionInfos(payload, 0, payload.length);
+            daughters = HRegionInfo.parseDelimitedFrom(payload, 0, payload.length);
           } catch (IOException e) {
             LOG.error("Dropped split! Failed reading split payload for " +
               prettyPrintedRegionName);
@@ -795,7 +794,7 @@ public class AssignmentManager extends Z
           this.executorService.submit(new ClosedRegionHandler(master,
             this, regionState.getRegion()));
           break;
-          
+
         case RS_ZK_REGION_FAILED_OPEN:
           hri = checkIfInFailover(regionState, encodedName, regionName);
           if (hri != null) {
@@ -869,7 +868,7 @@ public class AssignmentManager extends Z
           this.executorService.submit(
             new OpenedRegionHandler(master, this, regionState.getRegion(), sn, expectedVersion));
           break;
-          
+
         default:
           throw new IllegalStateException("Received event is not valid.");
       }
@@ -901,7 +900,7 @@ public class AssignmentManager extends Z
   /**
    * Gets the HRegionInfo from the META table
    * @param  regionName
-   * @return HRegionInfo hri for the region 
+   * @return HRegionInfo hri for the region
    */
   private HRegionInfo getHRegionInfo(final byte [] regionName) {
     Pair<HRegionInfo, ServerName> p = null;
@@ -1234,7 +1233,7 @@ public class AssignmentManager extends Z
    */
   public void assign(HRegionInfo region, boolean setOfflineInZK,
       boolean forceNewPlan, boolean hijack) {
-    // If hijack is true do not call disableRegionIfInRIT as 
+    // If hijack is true do not call disableRegionIfInRIT as
     // we have not yet moved the znode to OFFLINE state.
     if (!hijack && isDisabledorDisablingRegionInRIT(region)) {
       return;
@@ -1285,7 +1284,7 @@ public class AssignmentManager extends Z
           destination));
     }
     this.addPlans(plans);
-    
+
     // Presumption is that only this thread will be updating the state at this
     // time; i.e. handlers on backend won't be trying to set it to OPEN, etc.
     AtomicInteger counter = new AtomicInteger(0);
@@ -1366,7 +1365,7 @@ public class AssignmentManager extends Z
   /**
    * Bulk assign regions to available servers if any with retry, else assign
    * region singly.
-   * 
+   *
    * @param regions all regions to assign
    * @param servers all available servers
    */
@@ -1660,7 +1659,7 @@ public class AssignmentManager extends Z
 
   /**
    * Set region as OFFLINED up in zookeeper
-   * 
+   *
    * @param state
    * @param hijack
    *          - true if needs to be hijacked and reassigned, false otherwise.
@@ -1670,7 +1669,7 @@ public class AssignmentManager extends Z
   int setOfflineInZooKeeper(final RegionState state,
       boolean hijack) {
     // In case of reassignment the current state in memory need not be
-    // OFFLINE. 
+    // OFFLINE.
     if (!hijack && !state.isClosed() && !state.isOffline()) {
       String msg = "Unexpected state : " + state + " .. Cannot transit it to OFFLINE.";
       this.master.abort(msg, new IllegalStateException(msg));
@@ -1679,13 +1678,13 @@ public class AssignmentManager extends Z
     boolean allowZNodeCreation = false;
     // Under reassignment if the current state is PENDING_OPEN
     // or OPENING then refresh the in-memory state to PENDING_OPEN. This is
-    // important because if the region was in 
+    // important because if the region was in
     // RS_OPENING state for a long time the master will try to force the znode
     // to OFFLINE state meanwhile the RS could have opened the corresponding
     // region and the state in znode will be RS_ZK_REGION_OPENED.
     // For all other cases we can change the in-memory state to OFFLINE.
     if (hijack &&
-        (state.getState().equals(RegionState.State.PENDING_OPEN) || 
+        (state.getState().equals(RegionState.State.PENDING_OPEN) ||
             state.getState().equals(RegionState.State.OPENING))) {
       regionStates.updateRegionState(state.getRegion(),
         RegionState.State.PENDING_OPEN);
@@ -1698,7 +1697,7 @@ public class AssignmentManager extends Z
     int versionOfOfflineNode = -1;
     try {
       // get the version after setting the znode to OFFLINE
-      versionOfOfflineNode = ZKAssign.createOrForceNodeOffline(master.getZooKeeper(), 
+      versionOfOfflineNode = ZKAssign.createOrForceNodeOffline(master.getZooKeeper(),
           state.getRegion(), this.master.getServerName(),
           hijack, allowZNodeCreation);
       if (versionOfOfflineNode == -1) {
@@ -1734,7 +1733,7 @@ public class AssignmentManager extends Z
     } catch (KeeperException e) {
       if (e instanceof NodeExistsException) {
         LOG.warn("Node for " + state.getRegion() + " already exists");
-      } else { 
+      } else {
         master.abort("Unexpected ZK exception creating/setting node OFFLINE", e);
       }
       return false;
@@ -1918,7 +1917,7 @@ public class AssignmentManager extends Z
             return;
           }
         } catch (KeeperException ee) {
-          Exception e = ee; 
+          Exception e = ee;
           if (e instanceof NodeExistsException) {
             // Handle race between master initiated close and regionserver
             // orchestrated splitting. See if existing node is in a
@@ -1949,8 +1948,8 @@ public class AssignmentManager extends Z
         }
         state = regionStates.updateRegionState(region, RegionState.State.PENDING_CLOSE);
       } else if (force && (state.isPendingClose() || state.isClosing())) {
-        LOG.debug("Attempting to unassign region " + region.getRegionNameAsString() + 
-          " which is already " + state.getState()  + 
+        LOG.debug("Attempting to unassign region " + region.getRegionNameAsString() +
+          " which is already " + state.getState()  +
           " but forcing to send a CLOSE RPC again ");
         state.updateTimestampToNow();
       } else {
@@ -2015,9 +2014,9 @@ public class AssignmentManager extends Z
   public void unassign(HRegionInfo region, boolean force){
      unassign(region, force, null);
   }
-  
+
   /**
-   * 
+   *
    * @param region regioninfo of znode to be deleted.
    */
   public void deleteClosingOrClosedNode(HRegionInfo region) {
@@ -2048,7 +2047,7 @@ public class AssignmentManager extends Z
    * @param path
    * @return True if znode is in SPLIT or SPLITTING state.
    * @throws KeeperException Can happen if the znode went away in meantime.
-   * @throws DeserializationException 
+   * @throws DeserializationException
    */
   private boolean isSplitOrSplitting(final String path)
   throws KeeperException, DeserializationException {
@@ -2118,7 +2117,7 @@ public class AssignmentManager extends Z
 
   /**
    * Assigns all user regions to online servers. Use round-robin assignment.
-   * 
+   *
    * @param regions
    * @throws IOException
    * @throws InterruptedException
@@ -2177,7 +2176,7 @@ public class AssignmentManager extends Z
   public void assignAllUserRegions() throws IOException, InterruptedException {
     // Skip assignment for regions of tables in DISABLING state because during clean cluster startup
     // no RS is alive and regions map also doesn't have any information about the regions.
-    // See HBASE-6281. 
+    // See HBASE-6281.
     Set<String> disablingAndDisabledTables = new HashSet<String>(this.disablingTables);
     disablingAndDisabledTables.addAll(this.zkTable.getDisabledTables());
     // Scan META for all user regions, skipping any disabled tables
@@ -2409,7 +2408,7 @@ public class AssignmentManager extends Z
     for (Result result : results) {
       boolean disabled = false;
       boolean disablingOrEnabling = false;
-      Pair<HRegionInfo, ServerName> region = MetaReader.parseCatalogResult(result);
+      Pair<HRegionInfo, ServerName> region = HRegionInfo.getHRegionInfoAndServerName(result);
       if (region == null) continue;
       HRegionInfo regionInfo = region.getFirst();
       ServerName regionLocation = region.getSecond();
@@ -2499,14 +2498,14 @@ public class AssignmentManager extends Z
     } else if (checkIfRegionsBelongsToEnabling(regionInfo)) {
       enablingTables.add(disablingTableName);
       return true;
-    } 
+    }
     return false;
   }
 
   /**
    * Recover the tables that were not fully moved to DISABLED state. These
    * tables are in DISABLING state when the master restarted/switched.
-   * 
+   *
    * @param disablingTables
    * @return
    * @throws KeeperException
@@ -2536,7 +2535,7 @@ public class AssignmentManager extends Z
   /**
    * Recover the tables that are not fully moved to ENABLED state. These tables
    * are in ENABLING state when the master restarted/switched
-   * 
+   *
    * @param enablingTables
    * @param isWatcherCreated
    * @throws KeeperException
@@ -2583,10 +2582,10 @@ public class AssignmentManager extends Z
    * Processes list of dead servers from result of META scan and regions in RIT
    * <p>
    * This is used for failover to recover the lost regions that belonged to
-   * RegionServers which failed while there was no active master or regions 
+   * RegionServers which failed while there was no active master or regions
    * that were in RIT.
    * <p>
-   * 
+   *
    * @param deadServers
    *          The list of dead servers which failed while there was no active
    *          master. Can be null.
@@ -2612,7 +2611,7 @@ public class AssignmentManager extends Z
     if (deadServers == null) return;
     Set<ServerName> actualDeadServers = this.serverManager.getDeadServers();
     for (Map.Entry<ServerName, List<Pair<HRegionInfo, Result>>> deadServer: deadServers.entrySet()) {
-      // skip regions of dead servers because SSH will process regions during rs expiration. 
+      // skip regions of dead servers because SSH will process regions during rs expiration.
       // see HBASE-5916
       if (actualDeadServers.contains(deadServer.getKey())) {
         for (Pair<HRegionInfo, Result> deadRegion : deadServer.getValue()) {
@@ -2881,17 +2880,17 @@ public class AssignmentManager extends Z
           "expire, send RPC again");
         invokeUnassign(regionInfo);
         break;
-        
+
       case SPLIT:
       case SPLITTING:
         break;
-        
+
       default:
         throw new IllegalStateException("Received event is not valid.");
       }
     }
   }
-  
+
   private void processOpeningState(HRegionInfo regionInfo) {
     LOG.info("Region has been OPENING for too " + "long, reassigning region="
         + regionInfo.getRegionNameAsString());
@@ -2940,7 +2939,7 @@ public class AssignmentManager extends Z
   public boolean isCarryingMeta(ServerName serverName) {
     return isCarryingRegion(serverName, HRegionInfo.FIRST_META_REGIONINFO);
   }
-  
+
   /**
    * Check if the shutdown server carries the specific region.
    * We have a bunch of places that store region location
@@ -3044,10 +3043,10 @@ public class AssignmentManager extends Z
     this.timeoutMonitor.interrupt();
     this.timerUpdater.interrupt();
   }
-  
+
   /**
    * Check whether the RegionServer is online.
-   * @param serverName 
+   * @param serverName
    * @return True if online.
    */
   public boolean isServerOnline(ServerName serverName) {

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java?rev=1377965&r1=1377964&r2=1377965&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java Tue Aug 28 03:40:47 2012
@@ -37,7 +37,6 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.hbase.Chore;
 import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.Server;
@@ -50,7 +49,7 @@ import org.apache.hadoop.hbase.regionser
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.hbase.util.Writables;
+import org.apache.hadoop.hbase.util.PairOfSameType;
 
 /**
  * A janitor for the catalog tables.  Scans the <code>.META.</code> catalog
@@ -124,7 +123,7 @@ class CatalogJanitor extends Chore {
       public boolean visit(Result r) throws IOException {
         if (r == null || r.isEmpty()) return true;
         count.incrementAndGet();
-        HRegionInfo info = getHRegionInfo(r);
+        HRegionInfo info = HRegionInfo.getHRegionInfo(r);
         if (info == null) return true; // Keep scanning
         if (info.isSplitParent()) splitParents.put(info, r);
         // Returning true means "keep scanning"
@@ -160,11 +159,10 @@ class CatalogJanitor extends Chore {
             cleanParent(e.getKey(), e.getValue())) {
           cleaned++;
         } else {
-          // We could not clean the parent, so its daughters should not be cleaned either(HBASE-6160)
-          parentNotCleaned.add(
-              getDaughterRegionInfo(e.getValue(), HConstants.SPLITA_QUALIFIER).getEncodedName());
-          parentNotCleaned.add(
-              getDaughterRegionInfo(e.getValue(), HConstants.SPLITB_QUALIFIER).getEncodedName());
+          // We could not clean the parent, so it's daughters should not be cleaned either (HBASE-6160)
+          PairOfSameType<HRegionInfo> daughters = HRegionInfo.getDaughterRegions(e.getValue());
+          parentNotCleaned.add(daughters.getFirst().getEncodedName());
+          parentNotCleaned.add(daughters.getSecond().getEncodedName());
         }
       }
       if (cleaned != 0) {
@@ -216,24 +214,6 @@ class CatalogJanitor extends Chore {
   }
 
   /**
-   * Get HRegionInfo from passed Map of row values.
-   * @param result Map to do lookup in.
-   * @return Null if not found (and logs fact that expected COL_REGIONINFO
-   * was missing) else deserialized {@link HRegionInfo}
-   * @throws IOException
-   */
-  static HRegionInfo getHRegionInfo(final Result result)
-  throws IOException {
-    byte [] bytes =
-      result.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
-    if (bytes == null) {
-      LOG.warn("REGIONINFO_QUALIFIER is empty in " + result);
-      return null;
-    }
-    return Writables.getHRegionInfo(bytes);
-  }
-
-  /**
    * If daughters no longer hold reference to the parents, delete the parent.
    * @param parent HRegionInfo of split offlined parent
    * @param rowContent Content of <code>parent</code> row in
@@ -246,12 +226,11 @@ class CatalogJanitor extends Chore {
   throws IOException {
     boolean result = false;
     // Run checks on each daughter split.
-    HRegionInfo a_region = getDaughterRegionInfo(rowContent, HConstants.SPLITA_QUALIFIER);
-    HRegionInfo b_region = getDaughterRegionInfo(rowContent, HConstants.SPLITB_QUALIFIER);
+    PairOfSameType<HRegionInfo> daughters = HRegionInfo.getDaughterRegions(rowContent);
     Pair<Boolean, Boolean> a =
-      checkDaughterInFs(parent, a_region, HConstants.SPLITA_QUALIFIER);
+      checkDaughterInFs(parent, daughters.getFirst());
     Pair<Boolean, Boolean> b =
-      checkDaughterInFs(parent, b_region, HConstants.SPLITB_QUALIFIER);
+      checkDaughterInFs(parent, daughters.getSecond());
     if (hasNoReferences(a) && hasNoReferences(b)) {
       LOG.debug("Deleting region " + parent.getRegionNameAsString() +
         " because daughter splits no longer hold references");
@@ -284,21 +263,6 @@ class CatalogJanitor extends Chore {
   }
 
   /**
-   * Get daughter HRegionInfo out of parent info:splitA/info:splitB columns.
-   * @param result
-   * @param which Whether "info:splitA" or "info:splitB" column
-   * @return Deserialized content of the info:splitA or info:splitB as a
-   * HRegionInfo
-   * @throws IOException
-   */
-  private HRegionInfo getDaughterRegionInfo(final Result result,
-    final byte [] which)
-  throws IOException {
-    byte [] bytes = result.getValue(HConstants.CATALOG_FAMILY, which);
-    return Writables.getHRegionInfoOrNull(bytes);
-  }
-
-  /**
    * Remove mention of daughters from parent row.
    * @param parent
    * @throws IOException
@@ -311,27 +275,24 @@ class CatalogJanitor extends Chore {
   /**
    * Checks if a daughter region -- either splitA or splitB -- still holds
    * references to parent.
-   * @param parent Parent region name. 
-   * @param split Which column family.
-   * @param qualifier Which of the daughters to look at, splitA or splitB.
+   * @param parent Parent region
+   * @param daughter Daughter region
    * @return A pair where the first boolean says whether or not the daughter
    * region directory exists in the filesystem and then the second boolean says
    * whether the daughter has references to the parent.
    * @throws IOException
    */
-  Pair<Boolean, Boolean> checkDaughterInFs(final HRegionInfo parent,
-    final HRegionInfo split,
-    final byte [] qualifier)
+  Pair<Boolean, Boolean> checkDaughterInFs(final HRegionInfo parent, final HRegionInfo daughter)
   throws IOException {
     boolean references = false;
     boolean exists = false;
-    if (split == null)  {
+    if (daughter == null)  {
       return new Pair<Boolean, Boolean>(Boolean.FALSE, Boolean.FALSE);
     }
     FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
     Path rootdir = this.services.getMasterFileSystem().getRootDir();
-    Path tabledir = new Path(rootdir, split.getTableNameAsString());
-    Path regiondir = new Path(tabledir, split.getEncodedName());
+    Path tabledir = new Path(rootdir, daughter.getTableNameAsString());
+    Path regiondir = new Path(tabledir, daughter.getEncodedName());
     exists = fs.exists(regiondir);
     if (!exists) {
       LOG.warn("Daughter regiondir does not exist: " + regiondir.toString());
@@ -340,7 +301,7 @@ class CatalogJanitor extends Chore {
     HTableDescriptor parentDescriptor = getTableDescriptor(parent.getTableName());
 
     for (HColumnDescriptor family: parentDescriptor.getFamilies()) {
-      Path p = HStore.getStoreHomedir(tabledir, split.getEncodedName(),
+      Path p = HStore.getStoreHomedir(tabledir, daughter.getEncodedName(),
         family.getName());
       if (!fs.exists(p)) continue;
       // Look for reference files.  Call listStatus with anonymous instance of PathFilter.

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java?rev=1377965&r1=1377964&r2=1377965&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java Tue Aug 28 03:40:47 2012
@@ -80,9 +80,6 @@ import org.apache.hadoop.hbase.executor.
 import org.apache.hadoop.hbase.executor.ExecutorService.ExecutorType;
 import org.apache.hadoop.hbase.ipc.HBaseRPC;
 import org.apache.hadoop.hbase.ipc.HBaseServer;
-import org.apache.hadoop.hbase.master.metrics.MasterMetricsWrapperImpl;
-import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.protobuf.ResponseConverter;
 import org.apache.hadoop.hbase.ipc.ProtocolSignature;
 import org.apache.hadoop.hbase.ipc.RpcServer;
 import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory;
@@ -99,9 +96,12 @@ import org.apache.hadoop.hbase.master.ha
 import org.apache.hadoop.hbase.master.handler.TableEventHandler;
 import org.apache.hadoop.hbase.master.handler.TableModifyFamilyHandler;
 import org.apache.hadoop.hbase.master.metrics.MasterMetrics;
+import org.apache.hadoop.hbase.master.metrics.MasterMetricsWrapperImpl;
 import org.apache.hadoop.hbase.monitoring.MemoryBoundedLogMessageBuffer;
 import org.apache.hadoop.hbase.monitoring.MonitoredTask;
 import org.apache.hadoop.hbase.monitoring.TaskMonitor;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.ResponseConverter;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
@@ -252,7 +252,7 @@ Server {
   private CatalogTracker catalogTracker;
   // Cluster status zk tracker and local setter
   private ClusterStatusTracker clusterStatusTracker;
-  
+
   // buffer for "fatal error" notices from region servers
   // in the cluster. This is only used for assisting
   // operations/debugging.
@@ -399,7 +399,7 @@ Server {
         "(Also watching cluster state node)");
       Thread.sleep(c.getInt("zookeeper.session.timeout", 180 * 1000));
     }
-    
+
   }
 
   MasterMetrics getMetrics() {
@@ -441,7 +441,7 @@ Server {
       }
     } catch (Throwable t) {
       // HBASE-5680: Likely hadoop23 vs hadoop 20.x/1.x incompatibility
-      if (t instanceof NoClassDefFoundError && 
+      if (t instanceof NoClassDefFoundError &&
           t.getMessage().contains("org/apache/hadoop/hdfs/protocol/FSConstants$SafeModeAction")) {
           // improved error message for this special case
           abort("HBase is having a problem with its Hadoop jars.  You may need to "
@@ -453,7 +453,7 @@ Server {
       }
     } finally {
       startupStatus.cleanup();
-      
+
       stopChores();
       // Wait for all the remaining region servers to report in IFF we were
       // running a cluster shutdown AND we were NOT aborting.
@@ -475,7 +475,7 @@ Server {
 
   /**
    * Try becoming active master.
-   * @param startupStatus 
+   * @param startupStatus
    * @return True if we could successfully become the active master.
    * @throws InterruptedException
    */
@@ -581,7 +581,7 @@ Server {
 
   /**
    * Finish initialization of HMaster after becoming the primary master.
-   * 
+   *
    * <ol>
    * <li>Initialize master components - file system manager, server manager,
    *     assignment manager, region server tracker, catalog tracker, etc</li>
@@ -593,9 +593,9 @@ Server {
    * <li>Ensure assignment of root and meta regions<li>
    * <li>Handle either fresh cluster start or master failover</li>
    * </ol>
-   * 
+   *
    * @param masterRecovery
-   * 
+   *
    * @throws IOException
    * @throws InterruptedException
    * @throws KeeperException
@@ -636,7 +636,7 @@ Server {
       // initialize master side coprocessors before we start handling requests
       status.setStatus("Initializing master coprocessors");
       this.cpHost = new MasterCoprocessorHost(this, this.conf);
-      
+
       // start up all service threads.
       status.setStatus("Initializing master service threads");
       startServiceThreads();
@@ -665,12 +665,12 @@ Server {
     if (!assignRootAndMeta(status)) return;
     enableServerShutdownHandler();
 
-    // Update meta with new HRI if required. i.e migrate all HRI with HTD to
-    // HRI with out HTD in meta and update the status in ROOT. This must happen
+    // Update meta with new PB serialization if required. i.e migrate all HRI
+    // to PB serialization in meta and update the status in ROOT. This must happen
     // before we assign all user regions or else the assignment will fail.
-    // TODO: Remove this when we do 0.94.
-    org.apache.hadoop.hbase.catalog.MetaMigrationRemovingHTD.
-      updateMetaWithNewHRI(this);
+    // TODO: Remove this after 0.96, when we do 0.98.
+    org.apache.hadoop.hbase.catalog.MetaMigrationConvertingToPB
+      .updateRootAndMetaIfNecessary(this);
 
     this.balancer.setMasterServices(this);
     // Fixup assignment manager status
@@ -683,7 +683,7 @@ Server {
     status.setStatus("Fixing up missing daughters");
     fixupDaughters(status);
 
-    if (!masterRecovery) {      
+    if (!masterRecovery) {
       // Start balancer and meta catalog janitor after meta and regions have
       // been assigned.
       status.setStatus("Starting balancer and catalog janitor");
@@ -699,8 +699,8 @@ Server {
     // removing dead server with same hostname and port of rs which is trying to check in before
     // master initialization. See HBASE-5916.
     this.serverManager.clearDeadServersWithSameHostNameAndPortOfOnlineServer();
-    
-    if (!masterRecovery) {      
+
+    if (!masterRecovery) {
       if (this.cpHost != null) {
         // don't let cp initialization errors kill the master
         try {
@@ -711,7 +711,7 @@ Server {
       }
     }
   }
-  
+
   /**
    * Useful for testing purpose also where we have
    * master restart scenarios.
@@ -879,8 +879,7 @@ Server {
       public boolean visit(Result r) throws IOException {
         if (r == null || r.isEmpty()) return true;
         HRegionInfo info =
-          MetaReader.parseHRegionInfoFromCatalogResult(
-            r, HConstants.REGIONINFO_QUALIFIER);
+          HRegionInfo.getHRegionInfo(r);
         if (info == null) return true; // Keep scanning
         if (info.isOffline() && info.isSplit()) {
           offlineSplitParents.put(info, r);
@@ -992,7 +991,7 @@ Server {
    *  need to install an unexpected exception handler.
    */
   void startServiceThreads() throws IOException{
- 
+
    // Start the executor service pools
    this.executorService.startExecutorService(ExecutorType.MASTER_OPEN_REGION,
       conf.getInt("hbase.master.executor.openregion.threads", 5));
@@ -1002,7 +1001,7 @@ Server {
       conf.getInt("hbase.master.executor.serverops.threads", 3));
    this.executorService.startExecutorService(ExecutorType.MASTER_META_SERVER_OPERATIONS,
       conf.getInt("hbase.master.executor.serverops.threads", 5));
-   
+
    // We depend on there being only one instance of this executor running
    // at a time.  To do concurrency, would need fencing of enable/disable of
    // tables.
@@ -1033,7 +1032,7 @@ Server {
      this.infoServer.setAttribute(MASTER, this);
      this.infoServer.start();
     }
-   
+
     // Start allowing requests to happen.
     this.rpcServer.openServer();
     this.rpcServerOpen = true;
@@ -1118,7 +1117,7 @@ Server {
 
   /**
    * @return Get remote side's InetAddress
-   * @throws UnknownHostException 
+   * @throws UnknownHostException
    */
   InetAddress getRemoteInetAddress(final int port, final long serverStartCode)
   throws UnknownHostException {
@@ -1330,11 +1329,11 @@ Server {
         newValue = this.cpHost.preBalanceSwitch(newValue);
       }
       if (mode == BalanceSwitchMode.SYNC) {
-        synchronized (this.balancer) {        
+        synchronized (this.balancer) {
           this.balanceSwitch = newValue;
         }
       } else {
-        this.balanceSwitch = newValue;        
+        this.balanceSwitch = newValue;
       }
       LOG.info("BalanceSwitch=" + newValue);
       if (this.cpHost != null) {
@@ -1536,7 +1535,7 @@ Server {
    * @return Pair indicating the number of regions updated Pair.getFirst is the
    *         regions that are yet to be updated Pair.getSecond is the total number
    *         of regions of the table
-   * @throws IOException 
+   * @throws IOException
    */
   @Override
   public GetSchemaAlterStatusResponse getSchemaAlterStatus(
@@ -1686,7 +1685,7 @@ Server {
           if (data == null || data.size() <= 0) {
             return true;
           }
-          Pair<HRegionInfo, ServerName> pair = MetaReader.parseCatalogResult(data);
+          Pair<HRegionInfo, ServerName> pair = HRegionInfo.getHRegionInfoAndServerName(data);
           if (pair == null) {
             return false;
           }
@@ -2034,13 +2033,13 @@ Server {
   public boolean isAborted() {
     return this.abort;
   }
-  
+
   void checkInitialized() throws PleaseHoldException {
     if (!this.initialized) {
       throw new PleaseHoldException("Master is initializing");
     }
   }
-  
+
   /**
    * Report whether this master is currently the active master or not.
    * If not active master, we are parked on ZK waiting to become active.

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java?rev=1377965&r1=1377964&r2=1377965&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java Tue Aug 28 03:40:47 2012
@@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.master.Ma
 import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.master.ServerManager;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.PairOfSameType;
 import org.apache.hadoop.hbase.zookeeper.ZKAssign;
 import org.apache.zookeeper.KeeperException;
 
@@ -90,12 +91,12 @@ public class ServerShutdownHandler exten
   }
 
   /**
-   * Before assign the ROOT region, ensure it haven't 
+   * Before assign the ROOT region, ensure it haven't
    *  been assigned by other place
    * <p>
    * Under some scenarios, the ROOT region can be opened twice, so it seemed online
    * in two regionserver at the same time.
-   * If the ROOT region has been assigned, so the operation can be canceled. 
+   * If the ROOT region has been assigned, so the operation can be canceled.
    * @throws InterruptedException
    * @throws IOException
    * @throws KeeperException
@@ -145,7 +146,7 @@ public class ServerShutdownHandler exten
       }
     }
   }
-  
+
   /**
    * @return True if the server we are processing was carrying <code>-ROOT-</code>
    */
@@ -417,9 +418,10 @@ public class ServerShutdownHandler exten
       final AssignmentManager assignmentManager,
       final CatalogTracker catalogTracker)
   throws IOException {
-    int fixedA = fixupDaughter(result, HConstants.SPLITA_QUALIFIER,
+    PairOfSameType<HRegionInfo> daughters = HRegionInfo.getDaughterRegions(result);
+    int fixedA = fixupDaughter(result, daughters.getFirst(),
       assignmentManager, catalogTracker);
-    int fixedB = fixupDaughter(result, HConstants.SPLITB_QUALIFIER,
+    int fixedB = fixupDaughter(result, daughters.getSecond(),
       assignmentManager, catalogTracker);
     return fixedA + fixedB;
   }
@@ -431,12 +433,10 @@ public class ServerShutdownHandler exten
    * @return 1 if the daughter is missing and fixed. Otherwise 0
    * @throws IOException
    */
-  static int fixupDaughter(final Result result, final byte [] qualifier,
+  static int fixupDaughter(final Result result, HRegionInfo daughter,
       final AssignmentManager assignmentManager,
       final CatalogTracker catalogTracker)
   throws IOException {
-    HRegionInfo daughter =
-      MetaReader.parseHRegionInfoFromCatalogResult(result, qualifier);
     if (daughter == null) return 0;
     if (isDaughterMissing(catalogTracker, daughter)) {
       LOG.info("Fixup; missing daughter " + daughter.getRegionNameAsString());
@@ -460,7 +460,7 @@ public class ServerShutdownHandler exten
    * Daughter could have been split over on regionserver before a run of the
    * catalogJanitor had chance to clear reference from parent.
    * @param daughter Daughter region to search for.
-   * @throws IOException 
+   * @throws IOException
    */
   private static boolean isDaughterMissing(final CatalogTracker catalogTracker,
       final HRegionInfo daughter) throws IOException {
@@ -498,7 +498,7 @@ public class ServerShutdownHandler exten
     @Override
     public boolean visit(Result r) throws IOException {
       HRegionInfo hri =
-        MetaReader.parseHRegionInfoFromCatalogResult(r, HConstants.REGIONINFO_QUALIFIER);
+        HRegionInfo.getHRegionInfo(r);
       if (hri == null) {
         LOG.warn("No serialized HRegionInfo in " + r);
         return true;

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java?rev=1377965&r1=1377964&r2=1377965&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java Tue Aug 28 03:40:47 2012
@@ -130,7 +130,6 @@ import org.apache.hadoop.hbase.util.FSUt
 import org.apache.hadoop.hbase.util.HashedBytes;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.util.Threads;
-import org.apache.hadoop.hbase.util.Writables;
 import org.apache.hadoop.io.MultipleIOException;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.util.StringUtils;
@@ -371,7 +370,7 @@ public class HRegion implements HeapSize
     this.threadWakeFrequency = 0L;
     this.coprocessorHost = null;
     this.scannerReadPoints = new ConcurrentHashMap<RegionScanner, Long>();
-    
+
     this.opMetrics = new OperationMetrics();
   }
 
@@ -434,7 +433,7 @@ public class HRegion implements HeapSize
     setHTableSpecificConf();
     this.regiondir = getRegionDir(this.tableDir, encodedNameStr);
     this.scannerReadPoints = new ConcurrentHashMap<RegionScanner, Long>();
-    
+
     this.opMetrics = new OperationMetrics(conf, this.regionInfo);
 
     /*
@@ -785,7 +784,7 @@ public class HRegion implements HeapSize
   /**
    * @param hri
    * @return Content of the file we write out to the filesystem under a region
-   * @throws IOException 
+   * @throws IOException
    */
   private static byte [] getDotRegionInfoFileContent(final HRegionInfo hri) throws IOException {
     return hri.toDelimitedByteArray();
@@ -2031,7 +2030,7 @@ public class HRegion implements HeapSize
 
       try {
         if (!initialized) {
-          this.writeRequestsCount.increment(); 
+          this.writeRequestsCount.increment();
           doPreMutationHook(batchOp);
           initialized = true;
         }
@@ -2095,9 +2094,9 @@ public class HRegion implements HeapSize
     boolean deletesCfSetConsistent = true;
     //The set of columnFamilies first seen for Delete.
     Set<byte[]> deletesCfSet = null;
-    
+
     WALEdit walEdit = new WALEdit();
-    
+
     long startTimeMs = EnvironmentEdgeManager.currentTimeMillis();
 
 
@@ -2352,7 +2351,7 @@ public class HRegion implements HeapSize
 
       // do after lock
       final long netTimeMs = EnvironmentEdgeManager.currentTimeMillis() - startTimeMs;
-      
+
       // See if the column families were consistent through the whole thing.
       // if they were then keep them. If they were not then pass a null.
       // null will be treated as unknown.
@@ -2627,7 +2626,7 @@ public class HRegion implements HeapSize
     // do after lock
     final long after = EnvironmentEdgeManager.currentTimeMillis();
     this.opMetrics.updatePutMetrics(familyMap.keySet(), after - now);
-    
+
 
     if (flush) {
       // Request a cache flush.  Do it outside update lock.
@@ -3788,7 +3787,7 @@ public class HRegion implements HeapSize
 
   /**
    * Convenience method creating new HRegions. Used by createTable.
-   * The {@link HLog} for the created region needs to be closed 
+   * The {@link HLog} for the created region needs to be closed
    * explicitly, if it is not null.
    * Use {@link HRegion#getLog()} to get access.
    *
@@ -3981,7 +3980,7 @@ public class HRegion implements HeapSize
       final List<KeyValue> edits = new ArrayList<KeyValue>(2);
       edits.add(new KeyValue(row, HConstants.CATALOG_FAMILY,
         HConstants.REGIONINFO_QUALIFIER, now,
-        Writables.getBytes(r.getRegionInfo())));
+        r.getRegionInfo().toByteArray()));
       // Set into the root table the version of the meta table.
       edits.add(new KeyValue(row, HConstants.CATALOG_FAMILY,
         HConstants.META_VERSION_QUALIFIER, now,
@@ -4756,16 +4755,16 @@ public class HRegion implements HeapSize
     } finally {
       closeRegionOperation();
     }
-    
+
     long after = EnvironmentEdgeManager.currentTimeMillis();
-    this.opMetrics.updateAppendMetrics(append.getFamilyMap().keySet(), after - before);   
-    
-    
+    this.opMetrics.updateAppendMetrics(append.getFamilyMap().keySet(), after - before);
+
+
     if (flush) {
       // Request a cache flush. Do it outside update lock.
       requestFlush();
     }
-    
+
 
     return append.isReturnResults() ? new Result(allKVs) : null;
   }
@@ -4878,10 +4877,10 @@ public class HRegion implements HeapSize
     } finally {
       closeRegionOperation();
     }
-    
+
     long after = EnvironmentEdgeManager.currentTimeMillis();
-    this.opMetrics.updateIncrementMetrics(increment.getFamilyMap().keySet(), after - before);   
-    
+    this.opMetrics.updateIncrementMetrics(increment.getFamilyMap().keySet(), after - before);
+
     if (flush) {
       // Request a cache flush.  Do it outside update lock.
       requestFlush();
@@ -4981,7 +4980,7 @@ public class HRegion implements HeapSize
     // do after lock
     long after = EnvironmentEdgeManager.currentTimeMillis();
     this.opMetrics.updateIncrementColumnValueMetrics(family, after - before);
-    
+
     if (flush) {
       // Request a cache flush.  Do it outside update lock.
       requestFlush();
@@ -5017,7 +5016,7 @@ public class HRegion implements HeapSize
   public static final long DEEP_OVERHEAD = FIXED_OVERHEAD +
       ClassSize.OBJECT + // closeLock
       (2 * ClassSize.ATOMIC_BOOLEAN) + // closed, closing
-      (3 * ClassSize.ATOMIC_LONG) + // memStoreSize, numPutsWithoutWAL, dataInMemoryWithoutWAL 
+      (3 * ClassSize.ATOMIC_LONG) + // memStoreSize, numPutsWithoutWAL, dataInMemoryWithoutWAL
       ClassSize.ATOMIC_INTEGER + // lockIdGenerator
       (3 * ClassSize.CONCURRENT_HASHMAP) +  // lockedRows, lockIds, scannerReadPoints
       WriteState.HEAP_SIZE + // writestate
@@ -5371,7 +5370,7 @@ public class HRegion implements HeapSize
    */
   private void recordPutWithoutWal(final Map<byte [], List<KeyValue>> familyMap) {
     if (numPutsWithoutWAL.getAndIncrement() == 0) {
-      LOG.info("writing data to region " + this + 
+      LOG.info("writing data to region " + this +
                " with WAL disabled. Data may be lost in the event of a crash.");
     }
 

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java?rev=1377965&r1=1377964&r2=1377965&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java Tue Aug 28 03:40:47 2012
@@ -50,7 +50,6 @@ import org.apache.hadoop.hbase.util.Envi
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.HasThread;
 import org.apache.hadoop.hbase.util.PairOfSameType;
-import org.apache.hadoop.hbase.util.Writables;
 import org.apache.hadoop.hbase.zookeeper.ZKAssign;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
@@ -207,7 +206,7 @@ public class SplitTransaction {
 
   private static IOException closedByOtherException = new IOException(
       "Failed to close region: already closed by another thread");
-  
+
   /**
    * Prepare the regions and region files.
    * @param server Hosting server instance.  Can be null when testing (won't try
@@ -241,7 +240,7 @@ public class SplitTransaction {
     this.fileSplitTimeout = testing ? this.fileSplitTimeout :
         server.getConfiguration().getLong("hbase.regionserver.fileSplitTimeout",
           this.fileSplitTimeout);
-    
+
     this.journal.add(JournalEntry.STARTED_SPLITTING);
     // Set ephemeral SPLITTING znode up in zk.  Mocked servers sometimes don't
     // have zookeeper so don't do zk stuff if server or zookeeper is null
@@ -258,7 +257,7 @@ public class SplitTransaction {
 
     createSplitDir(this.parent.getFilesystem(), this.splitdir);
     this.journal.add(JournalEntry.CREATE_SPLIT_DIR);
- 
+
     List<StoreFile> hstoreFilesToSplit = null;
     Exception exceptionToThrow = null;
     try{
@@ -319,7 +318,7 @@ public class SplitTransaction {
     // regions.
     // We should add PONR JournalEntry before offlineParentInMeta,so even if
     // OfflineParentInMeta timeout,this will cause regionserver exit,and then
-    // master ServerShutdownHandler will fix daughter & avoid data loss. (See 
+    // master ServerShutdownHandler will fix daughter & avoid data loss. (See
     // HBase-4562).
     this.journal.add(JournalEntry.PONR);
 
@@ -745,13 +744,13 @@ public class SplitTransaction {
     while (iterator.hasPrevious()) {
       JournalEntry je = iterator.previous();
       switch(je) {
-      
+
       case STARTED_SPLITTING:
         if (server != null && server.getZooKeeper() != null) {
           cleanZK(server, this.parent.getRegionInfo(), false);
         }
         break;
-      
+
       case SET_SPLITTING_IN_ZK:
         if (server != null && server.getZooKeeper() != null) {
           cleanZK(server, this.parent.getRegionInfo(), true);
@@ -859,7 +858,7 @@ public class SplitTransaction {
     } catch (KeeperException.NoNodeException nn) {
       if (abort) {
         server.abort("Failed cleanup of " + hri.getRegionNameAsString(), nn);
-      }      
+      }
     } catch (KeeperException e) {
       server.abort("Failed cleanup of " + hri.getRegionNameAsString(), e);
     }
@@ -876,8 +875,8 @@ public class SplitTransaction {
    * @param region region to be created as offline
    * @param serverName server event originates from
    * @return Version of znode created.
-   * @throws KeeperException 
-   * @throws IOException 
+   * @throws KeeperException
+   * @throws IOException
    */
   int createNodeSplitting(final ZooKeeperWatcher zkw, final HRegionInfo region,
       final ServerName serverName) throws KeeperException, IOException {
@@ -925,20 +924,20 @@ public class SplitTransaction {
    * @param serverName server event originates from
    * @return version of node after transition, -1 if unsuccessful transition
    * @throws KeeperException if unexpected zookeeper exception
-   * @throws IOException 
+   * @throws IOException
    */
   private static int transitionNodeSplit(ZooKeeperWatcher zkw,
       HRegionInfo parent, HRegionInfo a, HRegionInfo b, ServerName serverName,
       final int znodeVersion)
   throws KeeperException, IOException {
-    byte [] payload = Writables.getBytes(a, b);
+    byte [] payload = HRegionInfo.toDelimitedByteArray(a, b);
     return ZKAssign.transitionNode(zkw, parent, serverName,
       EventType.RS_ZK_REGION_SPLITTING, EventType.RS_ZK_REGION_SPLIT,
       znodeVersion, payload);
   }
 
   /**
-   * 
+   *
    * @param zkw zk reference
    * @param parent region to be transitioned to splitting
    * @param serverName server event originates from
@@ -957,7 +956,7 @@ public class SplitTransaction {
       HRegionInfo parent, HRegionInfo a, HRegionInfo b, ServerName serverName,
       final int znodeVersion)
   throws KeeperException, IOException {
-    byte [] payload = Writables.getBytes(a, b);
+    byte [] payload = HRegionInfo.toDelimitedByteArray(a, b);
     return ZKAssign.transitionNode(zkw, parent, serverName,
       EventType.RS_ZK_REGION_SPLIT, EventType.RS_ZK_REGION_SPLIT,
       znodeVersion, payload);

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java?rev=1377965&r1=1377964&r2=1377965&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java Tue Aug 28 03:40:47 2012
@@ -84,7 +84,6 @@ import org.apache.hadoop.hbase.thrift.ge
 import org.apache.hadoop.hbase.util.Addressing;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Strings;
-import org.apache.hadoop.hbase.util.Writables;
 import org.apache.hadoop.net.DNS;
 import org.apache.thrift.TException;
 import org.apache.thrift.protocol.TBinaryProtocol;
@@ -1375,14 +1374,12 @@ public class ThriftServerRunner implemen
         }
 
         // find region start and end keys
-        byte[] value = startRowResult.getValue(HConstants.CATALOG_FAMILY,
-                                               HConstants.REGIONINFO_QUALIFIER);
-        if (value == null || value.length == 0) {
+        HRegionInfo regionInfo = HRegionInfo.getHRegionInfo(startRowResult);
+        if (regionInfo == null) {
           throw new IOException("HRegionInfo REGIONINFO was null or " +
                                 " empty in Meta for row="
                                 + Bytes.toStringBinary(row));
         }
-        HRegionInfo regionInfo = Writables.getHRegionInfo(value);
         TRegionInfo region = new TRegionInfo();
         region.setStartKey(regionInfo.getStartKey());
         region.setEndKey(regionInfo.getEndKey());
@@ -1391,13 +1388,10 @@ public class ThriftServerRunner implemen
         region.version = regionInfo.getVersion();
 
         // find region assignment to server
-        value = startRowResult.getValue(HConstants.CATALOG_FAMILY,
-                                        HConstants.SERVER_QUALIFIER);
-        if (value != null && value.length > 0) {
-          String hostAndPort = Bytes.toString(value);
-          region.setServerName(Bytes.toBytes(
-              Addressing.parseHostname(hostAndPort)));
-          region.port = Addressing.parsePort(hostAndPort);
+        ServerName serverName = HRegionInfo.getServerName(startRowResult);
+        if (serverName != null) {
+          region.setServerName(Bytes.toBytes(serverName.getHostname()));
+          region.port = serverName.getPort();
         }
         return region;
       } catch (IOException e) {

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java?rev=1377965&r1=1377964&r2=1377965&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java Tue Aug 28 03:40:47 2012
@@ -62,7 +62,7 @@ import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.MasterNotRunningException;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
-import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.catalog.MetaEditor;
 import org.apache.hadoop.hbase.client.AdminProtocol;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Get;
@@ -724,7 +724,7 @@ public class HBaseFsck {
 
   /**
    * This borrows code from MasterFileSystem.bootstrap()
-   * 
+   *
    * @return an open .META. HRegion
    */
   private HRegion createNewRootAndMeta() throws IOException {
@@ -748,9 +748,9 @@ public class HBaseFsck {
   }
 
   /**
-   * Generate set of puts to add to new meta.  This expects the tables to be 
+   * Generate set of puts to add to new meta.  This expects the tables to be
    * clean with no overlaps or holes.  If there are any problems it returns null.
-   * 
+   *
    * @return An array list of puts to do in bulk, null if tables have problems
    */
   private ArrayList<Put> generatePuts(SortedMap<String, TableInfo> tablesInfo) throws IOException {
@@ -781,9 +781,7 @@ public class HBaseFsck {
         // add the row directly to meta.
         HbckInfo hi = his.iterator().next();
         HRegionInfo hri = hi.getHdfsHRI(); // hi.metaEntry;
-        Put p = new Put(hri.getRegionName());
-        p.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
-            Writables.getBytes(hri));
+        Put p = MetaEditor.makePutFromRegionInfo(hri);
         puts.add(p);
       }
     }
@@ -803,7 +801,7 @@ public class HBaseFsck {
   /**
    * Rebuilds meta from information in hdfs/fs.  Depends on configuration
    * settings passed into hbck constructor to point to a particular fs/dir.
-   * 
+   *
    * @param fix flag that determines if method should attempt to fix holes
    * @return true if successful, false if attempt failed.
    */
@@ -989,7 +987,7 @@ public class HBaseFsck {
       Path backupTableDir= new Path(backupHbaseDir, tableName);
       boolean success = fs.rename(tableDir, backupTableDir);
       if (!success) {
-        throw new IOException("Failed to move  " + tableName + " from " 
+        throw new IOException("Failed to move  " + tableName + " from "
             +  tableDir.getName() + " to " + backupTableDir.getName());
       }
     } else {
@@ -1020,7 +1018,7 @@ public class HBaseFsck {
       } catch (IOException ioe) {
         LOG.fatal("... failed to sideline root and meta and failed to restore "
             + "prevoius state.  Currently in inconsistent state.  To restore "
-            + "try to rename -ROOT- in " + backupDir.getName() + " to " 
+            + "try to rename -ROOT- in " + backupDir.getName() + " to "
             + hbaseDir.getName() + ".", ioe);
       }
       throw e; // throw original exception
@@ -1194,7 +1192,7 @@ public class HBaseFsck {
     for (ServerName rsinfo: regionServerList) {
       workItems.add(new WorkItemRegion(this, rsinfo, errors, connection));
     }
-    
+
     workFutures = executor.invokeAll(workItems);
 
     for(int i=0; i<workFutures.size(); i++) {
@@ -1260,12 +1258,10 @@ public class HBaseFsck {
     d.deleteColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER);
     mutations.add(d);
 
-    Put p = new Put(hi.metaEntry.getRegionName());
     HRegionInfo hri = new HRegionInfo(hi.metaEntry);
     hri.setOffline(false);
     hri.setSplit(false);
-    p.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
-      Writables.getBytes(hri));
+    Put p = MetaEditor.makePutFromRegionInfo(hri);
     mutations.add(p);
 
     meta.mutateRow(mutations);
@@ -1335,7 +1331,6 @@ public class HBaseFsck {
    * the offline ipc call exposed on the master (<0.90.5, <0.92.0) a master
    * restart or failover may be required.
    */
-  @SuppressWarnings("deprecation")
   private void closeRegion(HbckInfo hi) throws IOException, InterruptedException {
     if (hi.metaEntry == null && hi.hdfsEntry == null) {
       undeployRegions(hi);
@@ -1348,19 +1343,15 @@ public class HBaseFsck {
     get.addColumn(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
     get.addColumn(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER);
     Result r = meta.get(get);
-    byte[] value = r.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
-    byte[] startcodeBytes = r.getValue(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER);
-    if (value == null || startcodeBytes == null) {
+    ServerName serverName = HRegionInfo.getServerName(r);
+    if (serverName == null) {
       errors.reportError("Unable to close region "
           + hi.getRegionNameAsString() +  " because meta does not "
           + "have handle to reach it.");
       return;
     }
-    long startcode = Bytes.toLong(startcodeBytes);
 
-    ServerName hsa = new ServerName(Bytes.toString(value), startcode);
-    byte[] hriVal = r.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
-    HRegionInfo hri= Writables.getHRegionInfoOrNull(hriVal);
+    HRegionInfo hri = HRegionInfo.getHRegionInfo(r);
     if (hri == null) {
       LOG.warn("Unable to close region " + hi.getRegionNameAsString()
           + " because META had invalid or missing "
@@ -1371,7 +1362,7 @@ public class HBaseFsck {
     }
 
     // close the region -- close files and remove assignment
-    HBaseFsckRepair.closeRegionSilentlyAndWait(admin, hsa, hri);
+    HBaseFsckRepair.closeRegionSilentlyAndWait(admin, serverName, hri);
   }
 
   private void tryAssignmentRepair(HbckInfo hbi, String msg) throws IOException,
@@ -1723,7 +1714,7 @@ public class HBaseFsck {
         return;
       }
 
-      // if not the absolute end key, check for cycle 
+      // if not the absolute end key, check for cycle
       if (Bytes.compareTo(hir.getStartKey(), hir.getEndKey()) > 0) {
         errors.reportError(
             ERROR_CODE.REGION_CYCLE,
@@ -1773,7 +1764,7 @@ public class HBaseFsck {
             "Last region should end with an empty key. You need to "
                 + "create a new region and regioninfo in HDFS to plug the hole.", getTableInfo());
       }
-      
+
       @Override
       public void handleDegenerateRegion(HbckInfo hi) throws IOException{
         errors.reportError(ERROR_CODE.DEGENERATE_REGION,
@@ -1872,7 +1863,7 @@ public class HBaseFsck {
             + " " + region);
         fixes++;
       }
-      
+
       /**
        * There is a hole in the hdfs regions that violates the table integrity
        * rules.  Create a new empty region that patches the hole.
@@ -2131,7 +2122,7 @@ public class HBaseFsck {
       if (prevKey != null) {
         handler.handleRegionEndKeyNotEmpty(prevKey);
       }
-      
+
       for (Collection<HbckInfo> overlap : overlapGroups.asMap().values()) {
         handler.handleOverlapGroup(overlap);
       }
@@ -2159,7 +2150,7 @@ public class HBaseFsck {
 
     /**
      * This dumps data in a visually reasonable way for visual debugging
-     * 
+     *
      * @param splits
      * @param regions
      */
@@ -2348,7 +2339,7 @@ public class HBaseFsck {
 
           // record the latest modification of this META record
           long ts =  Collections.max(result.list(), comp).getTimestamp();
-          Pair<HRegionInfo, ServerName> pair = MetaReader.parseCatalogResult(result);
+          Pair<HRegionInfo, ServerName> pair = HRegionInfo.getHRegionInfoAndServerName(result);
           if (pair == null || pair.getFirst() == null) {
             emptyRegionInfoQualifiers.add(result);
             return true;
@@ -2695,7 +2686,7 @@ public class HBaseFsck {
       errorTables.add(table);
       reportError(errorCode, message);
     }
-    
+
     public synchronized void reportError(ERROR_CODE errorCode, String message, TableInfo table,
                                          HbckInfo info) {
       errorTables.add(table);
@@ -2821,7 +2812,7 @@ public class HBaseFsck {
           HbckInfo hbi = hbck.getOrCreateInfo(r.getEncodedName());
           hbi.addServer(r, rsinfo);
         }
-      } catch (IOException e) {          // unable to connect to the region server. 
+      } catch (IOException e) {          // unable to connect to the region server.
         errors.reportError(ERROR_CODE.RS_CONNECT_FAILURE, "RegionServer: " + rsinfo.getServerName() +
           " Unable to fetch region information. " + e);
         throw e;
@@ -2851,7 +2842,7 @@ public class HBaseFsck {
     private ErrorReporter errors;
     private FileSystem fs;
 
-    WorkItemHdfsDir(HBaseFsck hbck, FileSystem fs, ErrorReporter errors, 
+    WorkItemHdfsDir(HBaseFsck hbck, FileSystem fs, ErrorReporter errors,
                     FileStatus status) {
       this.hbck = hbck;
       this.fs = fs;
@@ -3049,7 +3040,7 @@ public class HBaseFsck {
   public boolean shouldFixVersionFile() {
     return fixVersionFile;
   }
-  
+
   public void setSidelineBigOverlaps(boolean sbo) {
     this.sidelineBigOverlaps = sbo;
   }
@@ -3119,7 +3110,7 @@ public class HBaseFsck {
   }
 
   /**
-   * 
+   *
    * @param sidelineDir - HDFS path to sideline data
    */
   public void setSidelineDir(String sidelineDir) {

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java?rev=1377965&r1=1377964&r2=1377965&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java Tue Aug 28 03:40:47 2012
@@ -34,11 +34,11 @@ import org.apache.hadoop.hbase.HRegionIn
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
+import org.apache.hadoop.hbase.catalog.MetaEditor;
 import org.apache.hadoop.hbase.client.AdminProtocol;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HConnection;
 import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.regionserver.HRegion;
@@ -174,11 +174,8 @@ public class HBaseFsckRepair {
    */
   public static void fixMetaHoleOnline(Configuration conf,
       HRegionInfo hri) throws IOException {
-    Put p = new Put(hri.getRegionName());
-    p.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
-        Writables.getBytes(hri));
     HTable meta = new HTable(conf, HConstants.META_TABLE_NAME);
-    meta.put(p);
+    MetaEditor.addRegionToMeta(meta, hri);
     meta.close();
   }
 

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java?rev=1377965&r1=1377964&r2=1377965&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java Tue Aug 28 03:40:47 2012
@@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.HTableDes
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.RemoteExceptionHandler;
 import org.apache.hadoop.hbase.TableNotDisabledException;
+import org.apache.hadoop.hbase.catalog.MetaEditor;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HConnection;
@@ -257,14 +258,12 @@ class HMerge {
         if (results == null) {
           return null;
         }
-        byte[] regionInfoValue = results.getValue(HConstants.CATALOG_FAMILY,
-            HConstants.REGIONINFO_QUALIFIER);
-        if (regionInfoValue == null || regionInfoValue.length == 0) {
+        HRegionInfo region = HRegionInfo.getHRegionInfo(results);
+        if (region == null) {
           throw new NoSuchElementException("meta region entry missing " +
               Bytes.toString(HConstants.CATALOG_FAMILY) + ":" +
               Bytes.toString(HConstants.REGIONINFO_QUALIFIER));
         }
-        HRegionInfo region = Writables.getHRegionInfo(regionInfoValue);
         if (!Bytes.equals(region.getTableName(), this.tableName)) {
           return null;
         }
@@ -333,10 +332,7 @@ class HMerge {
       }
       newRegion.getRegionInfo().setOffline(true);
 
-      Put put = new Put(newRegion.getRegionName());
-      put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
-        Writables.getBytes(newRegion.getRegionInfo()));
-      table.put(put);
+      MetaEditor.addRegionToMeta(table, newRegion.getRegionInfo());
 
       if(LOG.isDebugEnabled()) {
         LOG.debug("updated columns in row: "
@@ -376,7 +372,7 @@ class HMerge {
         do {
           hasMore = rootScanner.next(results);
           for(KeyValue kv: results) {
-            HRegionInfo info = Writables.getHRegionInfoOrNull(kv.getValue());
+            HRegionInfo info = HRegionInfo.parseFromOrNull(kv.getValue());
             if (info != null) {
               metaRegions.add(info);
             }
@@ -428,9 +424,8 @@ class HMerge {
       }
       HRegionInfo newInfo = newRegion.getRegionInfo();
       newInfo.setOffline(true);
-      Put put = new Put(newRegion.getRegionName());
-      put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
-          Writables.getBytes(newInfo));
+
+      Put put = MetaEditor.makePutFromRegionInfo(newInfo);
       root.put(put);
       if(LOG.isDebugEnabled()) {
         LOG.debug("updated columns in row: " + Bytes.toStringBinary(newRegion.getRegionName()));

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java?rev=1377965&r1=1377964&r2=1377965&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java Tue Aug 28 03:40:47 2012
@@ -20,6 +20,8 @@
 
 package org.apache.hadoop.hbase.util;
 
+import java.io.IOException;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -32,12 +34,12 @@ import org.apache.hadoop.hbase.HBaseConf
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.MasterNotRunningException;
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.wal.HLog;
 import org.apache.hadoop.io.WritableComparator;
@@ -47,9 +49,6 @@ import org.apache.hadoop.util.ToolRunner
 
 import com.google.common.base.Preconditions;
 
-import java.io.IOException;
-import java.util.List;
-
 /**
  * Utility that can merge any two regions in the same table: adjacent,
  * overlapping or disjoint.
@@ -153,15 +152,15 @@ public class Merge extends Configured im
     HRegion rootRegion = utils.getRootRegion();
     Get get = new Get(region1);
     get.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
-    List<KeyValue> cells1 =  rootRegion.get(get, null).list();
-    Preconditions.checkState(cells1 != null, "First region cells can not be null");
-    HRegionInfo info1 = Writables.getHRegionInfo(cells1.get(0).getValue());
+    Result result1 =  rootRegion.get(get, null);
+    Preconditions.checkState(!result1.isEmpty(), "First region cells can not be null");
+    HRegionInfo info1 = HRegionInfo.getHRegionInfo(result1);
 
     get = new Get(region2);
     get.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
-    List<KeyValue> cells2 =  rootRegion.get(get, null).list();
-    Preconditions.checkState(cells2 != null, "Second region cells can not be null");
-    HRegionInfo info2 = Writables.getHRegionInfo(cells2.get(0).getValue());
+    Result result2 =  rootRegion.get(get, null);
+    Preconditions.checkState(!result2.isEmpty(), "Second region cells can not be null");
+    HRegionInfo info2 = HRegionInfo.getHRegionInfo(result2);
     HRegion merged = merge(HTableDescriptor.META_TABLEDESC, info1, rootRegion, info2, rootRegion);
     LOG.info("Adding " + merged.getRegionInfo() + " to " +
         rootRegion.getRegionInfo());
@@ -224,10 +223,10 @@ public class Merge extends Configured im
     HRegion metaRegion1 = this.utils.getMetaRegion(meta1);
     Get get = new Get(region1);
     get.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
-    List<KeyValue> cells1 =  metaRegion1.get(get, null).list();
-    Preconditions.checkState(cells1 != null,
+    Result result1 =  metaRegion1.get(get, null);
+    Preconditions.checkState(!result1.isEmpty(),
         "First region cells can not be null");
-    HRegionInfo info1 = Writables.getHRegionInfo(cells1.get(0).getValue());
+    HRegionInfo info1 = HRegionInfo.getHRegionInfo(result1);
     if (info1 == null) {
       throw new NullPointerException("info1 is null using key " +
           Bytes.toStringBinary(region1) + " in " + meta1);
@@ -241,10 +240,10 @@ public class Merge extends Configured im
     }
     get = new Get(region2);
     get.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
-    List<KeyValue> cells2 =  metaRegion2.get(get, null).list();
-    Preconditions.checkState(cells2 != null,
+    Result result2 =  metaRegion2.get(get, null);
+    Preconditions.checkState(!result2.isEmpty(),
         "Second region cells can not be null");
-    HRegionInfo info2 = Writables.getHRegionInfo(cells2.get(0).getValue());
+    HRegionInfo info2 = HRegionInfo.getHRegionInfo(result2);
     if (info2 == null) {
       throw new NullPointerException("info2 is null using key " + meta2);
     }

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MetaUtils.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MetaUtils.java?rev=1377965&r1=1377964&r2=1377965&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MetaUtils.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MetaUtils.java Tue Aug 28 03:40:47 2012
@@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.HConstant
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.catalog.MetaEditor;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.HTable;
@@ -220,7 +221,7 @@ public class MetaUtils {
         hasNext = s.next(results);
         HRegionInfo info = null;
         for (KeyValue kv: results) {
-          info = Writables.getHRegionInfoOrNull(kv.getValue());
+          info = HRegionInfo.parseFromOrNull(kv.getValue());
           if (info == null) {
             LOG.warn("Region info is null for row " +
               Bytes.toStringBinary(kv.getRow()) + " in table " +
@@ -302,16 +303,13 @@ public class MetaUtils {
     if(kvs.length <= 0) {
       throw new IOException("no information for row " + Bytes.toString(row));
     }
-    byte [] value = kvs[0].getValue();
-    if (value == null) {
+    HRegionInfo info = HRegionInfo.getHRegionInfo(res);
+    if (info == null) {
       throw new IOException("no information for row " + Bytes.toString(row));
     }
-    HRegionInfo info = Writables.getHRegionInfo(value);
-    Put put = new Put(row);
+
     info.setOffline(onlineOffline);
-    put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
-        Writables.getBytes(info));
-    t.put(put);
+    MetaEditor.addRegionToMeta(t, info);
 
     Delete delete = new Delete(row);
     delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
@@ -338,20 +336,17 @@ public class MetaUtils {
       if(kvs.length <= 0) {
         return;
       }
-      byte [] value = kvs[0].getValue();
-      if (value == null) {
+
+      HRegionInfo h = HRegionInfo.getHRegionInfo(res);
+      if (h == null) {
         return;
       }
-      HRegionInfo h = Writables.getHRegionInfoOrNull(value);
-
       LOG.debug("Old " + Bytes.toString(HConstants.CATALOG_FAMILY) + ":" +
           Bytes.toString(HConstants.REGIONINFO_QUALIFIER) + " for " +
           hri.toString() + " in " + r.toString() + " is: " + h.toString());
     }
 
-    Put put = new Put(hri.getRegionName());
-    put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
-        Writables.getBytes(hri));
+    Put put = MetaEditor.makePutFromRegionInfo(hri);
     r.put(put);
 
     if (LOG.isDebugEnabled()) {
@@ -362,12 +357,11 @@ public class MetaUtils {
       if(kvs.length <= 0) {
         return;
       }
-      byte [] value = kvs[0].getValue();
-      if (value == null) {
+      HRegionInfo h = HRegionInfo.getHRegionInfo(res);
+      if (h == null) {
         return;
       }
-      HRegionInfo h = Writables.getHRegionInfoOrNull(value);
-        LOG.debug("New " + Bytes.toString(HConstants.CATALOG_FAMILY) + ":" +
+      LOG.debug("New " + Bytes.toString(HConstants.CATALOG_FAMILY) + ":" +
             Bytes.toString(HConstants.REGIONINFO_QUALIFIER) + " for " +
             hri.toString() + " in " + r.toString() + " is: " +  h.toString());
     }

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Writables.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Writables.java?rev=1377965&r1=1377964&r2=1377965&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Writables.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Writables.java Tue Aug 28 03:40:47 2012
@@ -19,11 +19,6 @@
  */
 package org.apache.hadoop.hbase.util;
 
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.io.DataInputBuffer;
-import org.apache.hadoop.io.Writable;
-
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
 import java.io.DataInputStream;
@@ -32,6 +27,10 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.io.DataInputBuffer;
+import org.apache.hadoop.io.Writable;
+
 /**
  * Utility class with methods for manipulating Writable objects
  */
@@ -139,58 +138,6 @@ public class Writables {
   }
 
   /**
-   * @param bytes serialized bytes
-   * @return A HRegionInfo instance built out of passed <code>bytes</code>.
-   * @throws IOException e
-   * @deprecated Use {@link HRegionInfo#parseFrom(byte[])}
-   */
-  public static HRegionInfo getHRegionInfo(final byte [] bytes)
-  throws IOException {
-    return (HRegionInfo)getWritable(bytes, new HRegionInfo());
-  }
-
-  /**
-   * @param bytes serialized bytes
-   * @return All the hregioninfos that are in the byte array.  Keeps reading
-   * till we hit the end.
-   * @throws IOException e
-   */
-  public static List<HRegionInfo> getHRegionInfos(final byte [] bytes,
-      final int offset, final int length)
-  throws IOException {
-    if (bytes == null) {
-      throw new IllegalArgumentException("Can't build a writable with empty " +
-        "bytes array");
-    }
-    DataInputBuffer in = new DataInputBuffer();
-    List<HRegionInfo> hris = new ArrayList<HRegionInfo>();
-    try {
-      in.reset(bytes, offset, length);
-      while (in.available() > 0) {
-        HRegionInfo hri = new HRegionInfo();
-        hri.readFields(in);
-        hris.add(hri);
-      }
-    } finally {
-      in.close();
-    }
-    return hris;
-  }
-
-  /**
-   * @param bytes serialized bytes
-   * @return A HRegionInfo instance built out of passed <code>bytes</code>
-   * or <code>null</code> if passed bytes are null or an empty array.
-   * @throws IOException e
-   * @deprecated Use {@link HRegionInfo#parseFromOrNull(byte[])}
-   */
-  public static HRegionInfo getHRegionInfoOrNull(final byte [] bytes)
-  throws IOException {
-    return (bytes == null || bytes.length <= 0)?
-        null : getHRegionInfo(bytes);
-  }
-
-  /**
    * Copy one Writable to another.  Copies bytes using data streams.
    * @param src Source Writable
    * @param tgt Target Writable

Modified: hbase/trunk/hbase-server/src/main/ruby/hbase/table.rb
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/ruby/hbase/table.rb?rev=1377965&r1=1377964&r2=1377965&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/ruby/hbase/table.rb (original)
+++ hbase/trunk/hbase-server/src/main/ruby/hbase/table.rb Tue Aug 28 03:40:47 2012
@@ -459,7 +459,7 @@ EOF
     def to_string(column, kv, maxlength = -1)
       if is_meta_table?
         if column == 'info:regioninfo' or column == 'info:splitA' or column == 'info:splitB'
-          hri = org.apache.hadoop.hbase.util.Writables.getHRegionInfoOrNull(kv.getValue)
+          hri = org.apache.hadoop.hbase.HRegionInfo.parseFromOrNull(kv.getValue)
           return "timestamp=%d, value=%s" % [kv.getTimestamp, hri.toString]
         end
         if column == 'info:serverstartcode'

Added: hbase/trunk/hbase-server/src/test/data/TestMetaMigrationConvertToPB.tgz
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/data/TestMetaMigrationConvertToPB.tgz?rev=1377965&view=auto
==============================================================================
Binary file - no diff available.

Propchange: hbase/trunk/hbase-server/src/test/data/TestMetaMigrationConvertToPB.tgz
------------------------------------------------------------------------------
    svn:mime-type = application/octet-stream

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java?rev=1377965&r1=1377964&r2=1377965&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java Tue Aug 28 03:40:47 2012
@@ -53,6 +53,7 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.catalog.MetaEditor;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
@@ -81,7 +82,6 @@ import org.apache.hadoop.hbase.util.FSUt
 import org.apache.hadoop.hbase.util.JVMClusterUtil;
 import org.apache.hadoop.hbase.util.RegionSplitter;
 import org.apache.hadoop.hbase.util.Threads;
-import org.apache.hadoop.hbase.util.Writables;
 import org.apache.hadoop.hbase.zookeeper.EmptyWatcher;
 import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
 import org.apache.hadoop.hbase.zookeeper.ZKAssign;
@@ -1188,11 +1188,7 @@ public class HBaseTestingUtility {
       int j = (i + 1) % startKeys.length;
       HRegionInfo hri = new HRegionInfo(table.getTableName(),
         startKeys[i], startKeys[j]);
-      Put put = new Put(hri.getRegionName());
-      put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
-        Writables.getBytes(hri));
-      meta.put(put);
-      LOG.info("createMultiRegions: inserted " + hri.toString());
+      MetaEditor.addRegionToMeta(meta, hri);
       newRegions.add(hri);
       count++;
     }
@@ -1238,11 +1234,7 @@ public class HBaseTestingUtility {
       int j = (i + 1) % startKeys.length;
       HRegionInfo hri = new HRegionInfo(htd.getName(), startKeys[i],
           startKeys[j]);
-      Put put = new Put(hri.getRegionName());
-      put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
-        Writables.getBytes(hri));
-      meta.put(put);
-      LOG.info("createMultiRegionsInMeta: inserted " + hri.toString());
+      MetaEditor.addRegionToMeta(meta, hri);
       newRegions.add(hri);
     }
 
@@ -1281,13 +1273,13 @@ public class HBaseTestingUtility {
     List<byte[]> rows = new ArrayList<byte[]>();
     ResultScanner s = t.getScanner(new Scan());
     for (Result result : s) {
-      byte[] val = result.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
-      if (val == null) {
+      HRegionInfo info = HRegionInfo.getHRegionInfo(result);
+      if (info == null) {
         LOG.error("No region info for row " + Bytes.toString(result.getRow()));
         // TODO figure out what to do for this new hosed case.
         continue;
       }
-      HRegionInfo info = Writables.getHRegionInfo(val);
+
       if (Bytes.compareTo(info.getTableName(), tableName) == 0) {
         LOG.info("getMetaTableRows: row -> " +
             Bytes.toStringBinary(result.getRow()) + info);
@@ -1390,7 +1382,7 @@ public class HBaseTestingUtility {
 
     // Needed for TestImportTsv.
     conf.set("mapred.job.tracker", jobConf.get("mapred.job.tracker"));
-    // this for mrv2 support; mr1 ignores this 
+    // this for mrv2 support; mr1 ignores this
     conf.set("mapreduce.framework.name", "yarn");
     String rmAdress = jobConf.get("yarn.resourcemanager.address");
     if (rmAdress != null) {

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSerialization.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSerialization.java?rev=1377965&r1=1377964&r2=1377965&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSerialization.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSerialization.java Tue Aug 28 03:40:47 2012
@@ -20,20 +20,18 @@
 package org.apache.hadoop.hbase;
 
 
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
 
 import java.io.ByteArrayOutputStream;
 import java.io.DataOutputStream;
-import java.io.IOException;
 import java.util.ArrayList;
-import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.NavigableSet;
 import java.util.Set;
-import java.util.TreeMap;
 
-import org.apache.hadoop.hbase.HServerLoad092.RegionLoad;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Put;
@@ -41,10 +39,10 @@ import org.apache.hadoop.hbase.client.Re
 import org.apache.hadoop.hbase.client.RowLock;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.filter.BinaryComparator;
+import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
 import org.apache.hadoop.hbase.filter.Filter;
 import org.apache.hadoop.hbase.filter.PrefixFilter;
 import org.apache.hadoop.hbase.filter.RowFilter;
-import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
 import org.apache.hadoop.hbase.io.HbaseMapWritable;
 import org.apache.hadoop.hbase.io.TimeRange;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
@@ -115,22 +113,30 @@ public class TestSerialization {
    */
   @Test public void testRegionInfo() throws Exception {
     HRegionInfo hri = createRandomRegion("testRegionInfo");
-    byte [] hrib = Writables.getBytes(hri);
-    HRegionInfo deserializedHri =
-      (HRegionInfo)Writables.getWritable(hrib, new HRegionInfo());
+
+    //test toByteArray()
+    byte [] hrib = hri.toByteArray();
+    HRegionInfo deserializedHri = HRegionInfo.parseFrom(hrib);
     assertEquals(hri.getEncodedName(), deserializedHri.getEncodedName());
-    //assertEquals(hri.getTableDesc().getFamilies().size(),
-    //  deserializedHri.getTableDesc().getFamilies().size());
+    assertEquals(hri, deserializedHri);
+
+    //test toDelimitedByteArray()
+    hrib = hri.toDelimitedByteArray();
+    DataInputBuffer buf = new DataInputBuffer();
+    try {
+      buf.reset(hrib, hrib.length);
+      deserializedHri = HRegionInfo.parseFrom(buf);
+      assertEquals(hri.getEncodedName(), deserializedHri.getEncodedName());
+      assertEquals(hri, deserializedHri);
+    } finally {
+      buf.close();
+    }
   }
 
   @Test public void testRegionInfos() throws Exception {
     HRegionInfo hri = createRandomRegion("testRegionInfos");
-    byte [] hrib = Writables.getBytes(hri);
-    byte [] triple = new byte [3 * hrib.length];
-    System.arraycopy(hrib, 0, triple, 0, hrib.length);
-    System.arraycopy(hrib, 0, triple, hrib.length, hrib.length);
-    System.arraycopy(hrib, 0, triple, hrib.length * 2, hrib.length);
-    List<HRegionInfo> regions = Writables.getHRegionInfos(triple, 0, triple.length);
+    byte[] triple = HRegionInfo.toDelimitedByteArray(hri, hri, hri);
+    List<HRegionInfo> regions = HRegionInfo.parseDelimitedFrom(triple, 0, triple.length);
     assertTrue(regions.size() == 3);
     assertTrue(regions.get(0).equals(regions.get(1)));
     assertTrue(regions.get(0).equals(regions.get(2)));