You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by li...@apache.org on 2014/03/12 22:17:20 UTC

svn commit: r1576909 [9/18] - in /hbase/branches/0.89-fb/src: ./ examples/thrift/ main/java/org/apache/hadoop/hbase/ main/java/org/apache/hadoop/hbase/avro/ main/java/org/apache/hadoop/hbase/avro/generated/ main/java/org/apache/hadoop/hbase/client/ mai...

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/ChangeTableState.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/ChangeTableState.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/ChangeTableState.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/ChangeTableState.java Wed Mar 12 21:17:13 2014
@@ -34,6 +34,7 @@ import java.io.IOException;
 import java.util.HashSet;
 import java.util.Map;
 import java.util.TreeMap;
+import java.util.concurrent.ExecutionException;
 
 /**
  * Instantiated to enable or disable a table
@@ -65,6 +66,7 @@ class ChangeTableState extends TableOper
     }
   }
 
+  @SuppressWarnings("deprecation")
   @Override
   protected void postProcessMeta(MetaRegion m, HRegionInterface server)
   throws IOException {

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/ColumnOperation.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/ColumnOperation.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/ColumnOperation.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/ColumnOperation.java Wed Mar 12 21:17:13 2014
@@ -19,31 +19,25 @@
  */
 package org.apache.hadoop.hbase.master;
 
-import java.util.ArrayList;
 import java.util.HashSet;
-import java.util.List;
 import java.util.Set;
+import java.util.concurrent.ExecutionException;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.TableNotDisabledException;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.ipc.HRegionInterface;
-import org.apache.hadoop.hbase.regionserver.Store;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.util.Writables;
 
 import java.io.IOException;
 
 abstract class ColumnOperation extends TableOperation {
   private final Log LOG = LogFactory.getLog(this.getClass());
-
+  
   protected ColumnOperation(final HMaster master, final byte [] tableName)
   throws IOException {
     super(master, tableName);
@@ -69,7 +63,7 @@ abstract class ColumnOperation extends T
   }
 
   /**
-   * Simply updates the given table descriptor with the relevant changes
+   * Simply updates the given table descriptor with the relevant changes 
    * for the given column operation
    * @param desc  The descriptor that will be updated.
    */
@@ -77,7 +71,7 @@ abstract class ColumnOperation extends T
     throws IOException;
 
   /**
-   * Is run after META has been updated. Defaults to doing nothing, but
+   * Is run after META has been updated. Defaults to doing nothing, but 
    * some operations make use of this for housekeeping operations.
    * @param hri Info for the region that has just been updated.
    */
@@ -86,8 +80,8 @@ abstract class ColumnOperation extends T
   }
 
   /**
-   * Contains all of the logic for updating meta for each type of possible
-   * schema change. By implementing the logic at this level, we are able to
+   * Contains all of the logic for updating meta for each type of possible 
+   * schema change. By implementing the logic at this level, we are able to 
    * easily prevent excess writes to META
    * @param m the region
    */
@@ -103,8 +97,8 @@ abstract class ColumnOperation extends T
       updateRegionInfo(server, m.getRegionName(), i);
       // TODO: queue this to occur after reopening region
       postProcess(i);
-      // Ignore regions that are split or disabled,
-      // as we do not want to reopen them
+      // Ignore regions that are split or disabled, 
+      // as we do not want to reopen them  
       if (!(i.isSplit() || i.isOffline())) {
         regionsToReopen.add(i);
       }

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/DeleteColumn.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/DeleteColumn.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/DeleteColumn.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/DeleteColumn.java Wed Mar 12 21:17:13 2014
@@ -49,7 +49,7 @@ class DeleteColumn extends ColumnOperati
   }
 
   @Override
-  protected void updateTableDescriptor(HTableDescriptor desc)
+  protected void updateTableDescriptor(HTableDescriptor desc) 
   throws IOException {
     if (!desc.hasFamily(columnDeletion)) {
       // we have an error.

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/HMaster.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/HMaster.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/HMaster.java Wed Mar 12 21:17:13 2014
@@ -93,6 +93,7 @@ import org.apache.hadoop.hbase.executor.
 import org.apache.hadoop.hbase.executor.HBaseExecutorService;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.ipc.HBaseRPC;
+import org.apache.hadoop.hbase.ipc.HBaseRPCOptions;
 import org.apache.hadoop.hbase.ipc.HBaseRPCProtocolVersion;
 import org.apache.hadoop.hbase.ipc.HBaseServer;
 import org.apache.hadoop.hbase.ipc.HMasterInterface;
@@ -233,14 +234,14 @@ public class HMaster extends HasThread i
   private volatile boolean isActiveMaster = false;
 
   public ThreadPoolExecutor logSplitThreadPool;
-  
+
   public RegionPlacementPolicy regionPlacement;
 
   /** Log directories split on startup for testing master failover */
   private List<String> logDirsSplitOnStartup;
 
   private boolean shouldAssignRegionsWithFavoredNodes = false;
-  
+
   /**
    * The number of dead server log split requests received. This is not
    * incremented during log splitting on startup. This field is never
@@ -252,12 +253,14 @@ public class HMaster extends HasThread i
   private String stopReason = "not stopping";
 
   private ZKClusterStateRecovery clusterStateRecovery;
-  
+
   private AtomicBoolean isLoadBalancerDisabled = new AtomicBoolean(false);
 
   private ConfigurationManager configurationManager =
       new ConfigurationManager();
-  
+
+  protected boolean useThrift = false;
+
   private long schemaChangeTryLockTimeoutMs;
 
   /**
@@ -296,6 +299,14 @@ public class HMaster extends HasThread i
         10 * 1000);
 
     this.sleeper = new Sleeper(this.threadWakeFrequency, getStopper());
+
+    /**
+     * Overriding the useThrift parameter depending upon the configuration.
+     * We set CLIENT_TO_RS_USE_THRIFT to MASTER_TO_RS_USE_THRIFT_STRING
+     */
+    this.useThrift = conf.getBoolean(HConstants.MASTER_TO_RS_USE_THRIFT,
+        HConstants.MASTER_TO_RS_USE_THRIFT_DEFAULT);
+    conf.setBoolean(HConstants.CLIENT_TO_RS_USE_THRIFT, this.useThrift);
     this.connection = ServerConnectionManager.getConnection(conf);
 
     // hack! Maps DFSClient => Master for logs.  HDFS made this
@@ -357,12 +368,13 @@ public class HMaster extends HasThread i
 
     final String masterName = getServerName();
     // initialize the thread pool for non-distributed log splitting.
-    int maxSplitLogThread = 
+    int maxSplitLogThread =
       conf.getInt("hbase.master.splitLogThread.max", 1000);
     logSplitThreadPool = Threads.getBoundedCachedThreadPool(
         maxSplitLogThread, 30L, TimeUnit.SECONDS,
         new ThreadFactory() {
           private int count = 1;
+          @Override
           public Thread newThread(Runnable r) {
             Thread t = new Thread(r, masterName + "-LogSplittingThread" + "-"
                 + count++);
@@ -373,7 +385,7 @@ public class HMaster extends HasThread i
         });
 
     regionPlacement = new RegionPlacement(this.conf);
-    
+
     // Only read favored nodes if using the assignment-based load balancer.
     this.shouldAssignRegionsWithFavoredNodes = conf.getClass(
         HConstants.LOAD_BALANCER_IMPL, Object.class).equals(
@@ -398,17 +410,20 @@ public class HMaster extends HasThread i
       tableLockManager = null;
     }
   }
-  
+
+  @Override
   public void enableLoadBalancer() {
     this.isLoadBalancerDisabled.set(false);
     LOG.info("Enable the load balancer");
   }
-  
+
+  @Override
   public void disableLoadBalancer() {
     this.isLoadBalancerDisabled.set(true);
     LOG.info("Disable the load balancer");
   }
-  
+
+  @Override
   public boolean isLoadBalancerDisabled() {
     return this.isLoadBalancerDisabled.get();
   }
@@ -440,7 +455,7 @@ public class HMaster extends HasThread i
     // directory is complete, they will be queued for further processing.
     unassignedWatcher = new ZKUnassignedWatcher(this);
   }
-  
+
   /**
    * @return true if successfully became primary master
    */
@@ -845,7 +860,7 @@ public class HMaster extends HasThread i
     if (!isStopped()) {
       clusterStateRecovery.backgroundRecoverRegionStateFromZK();
     }
-    
+
     try {
       /* Main processing loop */
       FINISHED: while (!this.closed.get()) {
@@ -1158,7 +1173,7 @@ public class HMaster extends HasThread i
       splitCount += contentSummary.getFileCount();
       splitLogSize += contentSummary.getSpaceConsumed();
     }
-    if (logDirs.size() == 0) {
+    if (logDirs.isEmpty()) {
       LOG.info("No logs to split");
       this.metrics.addSplit(0, 0, 0);
       return;
@@ -1308,7 +1323,7 @@ public class HMaster extends HasThread i
   }
 
   void updateLastFlushedSequenceIds(HServerInfo serverInfo) {
-    SortedMap<byte[], Long> flushedSequenceIds = serverInfo.getFlushedSequenceIdByRegion();
+    SortedMap<byte[], Long> flushedSequenceIds = (SortedMap<byte[], Long>) serverInfo.getFlushedSequenceIdByRegion();
     for (Entry<byte[], Long> entry : flushedSequenceIds.entrySet()) {
       Long existingValue = flushedSequenceIdByRegion.get(entry.getKey());
       if (existingValue != null) {
@@ -1359,6 +1374,7 @@ public class HMaster extends HasThread i
    * Request a shutdown the whole HBase cluster. This only modifies state
    * flags in memory and in ZK, so it is safe to be called multiple times.
    */
+  @Override
   public void requestClusterShutdown() {
     if (!clusterShutdownRequested.compareAndSet(false, true)) {
       // Only request cluster shutdown once.
@@ -1372,7 +1388,7 @@ public class HMaster extends HasThread i
     stopped = true;
     stopReason = "cluster shutdown";
   }
-  
+
   /** Shutdown the cluster quickly, don't quiesce regionservers */
   private void shutdownClusterNow() {
     closed.set(true);
@@ -1462,7 +1478,7 @@ public class HMaster extends HasThread i
     } catch (TableExistsException e) {
       throw e;
     } catch (IOException e) {
-      LOG.error("Cannot create table " + desc.getNameAsString() + 
+      LOG.error("Cannot create table " + desc.getNameAsString() +
         " because of " + e.toString());
       throw RemoteExceptionHandler.checkIOException(e);
     }
@@ -1494,14 +1510,15 @@ public class HMaster extends HasThread i
   }
 
   private static boolean tableExists(HRegionInterface srvr,
-    byte[] metaRegionName, String tableName)
-  throws IOException {
+      byte[] metaRegionName, String tableName)
+      throws IOException {
     byte[] firstRowInTable = Bytes.toBytes(tableName + ",,");
     Scan scan = new Scan(firstRowInTable);
     scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
     long scannerid = srvr.openScanner(metaRegionName, scan);
     try {
-      Result data = srvr.next(scannerid);
+      Result data =
+          BaseScanner.getOneResultFromScanner(srvr, scannerid);
       if (data != null && data.size() > 0) {
         HRegionInfo info = Writables.getHRegionInfo(
           data.getValue(HConstants.CATALOG_FAMILY,
@@ -1557,7 +1574,8 @@ public class HMaster extends HasThread i
     // created. Throw already-exists exception.
     MetaRegion m = regionManager.getFirstMetaRegionForRegion(newRegions[0]);
     byte [] metaRegionName = m.getRegionName();
-    HRegionInterface srvr = this.connection.getHRegionConnection(m.getServer());
+    HRegionInterface srvr = this.connection.getHRegionConnection(m.getServer(),
+        true, new HBaseRPCOptions());
     if (tableExists(srvr, metaRegionName, tableName)) {
       throw new TableExistsException(tableName);
     }
@@ -1598,7 +1616,7 @@ public class HMaster extends HasThread i
    * Get the assignment domain for the table.
    * Currently the domain would be generated by shuffling all the online
    * region servers.
-   * 
+   *
    * It would be easy to extend for the multi-tenancy in the future.
    * @param tableName
    * @return the assignment domain for the table.
@@ -1629,7 +1647,7 @@ public class HMaster extends HasThread i
     domain.addServers(servers);
     return domain;
   }
-  
+
   @Override
   public void deleteTable(final byte [] tableName) throws IOException {
     lockTable(tableName, "delete");
@@ -1682,6 +1700,7 @@ public class HMaster extends HasThread i
     alterTable(tableName, columnAdditions, columnModifications, columnDeletions, waitInterval, maxClosedRegions);
   }
 
+  @Override
   public Pair<Integer, Integer> getAlterStatus(byte [] tableName)
       throws IOException {
     Pair <Integer, Integer> p = new Pair<Integer, Integer>(0,0);
@@ -1838,6 +1857,7 @@ public class HMaster extends HasThread i
     return result.get();
   }
 
+  @SuppressWarnings("deprecation")
   Pair<HRegionInfo,HServerAddress> getTableRegionFromName(
       final byte [] regionName)
   throws IOException {
@@ -1865,6 +1885,7 @@ public class HMaster extends HasThread i
    * @return Result
    * @throws IOException
    */
+  @SuppressWarnings("deprecation")
   protected Result getFromMETA(final byte [] row, final byte [] family)
   throws IOException {
     MetaRegion meta = this.regionManager.getMetaRegionForRow(row);
@@ -2415,6 +2436,7 @@ public class HMaster extends HasThread i
     return numDeadServerLogSplitRequests.get();
   }
 
+  @Override
   public boolean isClusterShutdownRequested() {
     return clusterShutdownRequested.get();
   }
@@ -2422,7 +2444,7 @@ public class HMaster extends HasThread i
   public StoppableMaster getStopper() {
     return this;
   }
-  
+
   public StopStatus getClosedStatus() {
     return new StopStatus() {
       @Override
@@ -2467,5 +2489,14 @@ public class HMaster extends HasThread i
   public ConfigurationManager getConfigurationManager() {
     return configurationManager;
   }
+
+  public static boolean useThriftMasterToRS(Configuration c) {
+    return c.getBoolean(HConstants.MASTER_TO_RS_USE_THRIFT,
+        HConstants.MASTER_TO_RS_USE_THRIFT_DEFAULT);
+  }
+
+  @Override
+  public void close() throws Exception {
+  }
 }
 

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/ModifyTableMeta.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/ModifyTableMeta.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/ModifyTableMeta.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/ModifyTableMeta.java Wed Mar 12 21:17:13 2014
@@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.util.Byte
 import org.apache.hadoop.hbase.util.Writables;
 
 import java.io.IOException;
+import java.util.concurrent.ExecutionException;
 
 /** Instantiated to modify table descriptor metadata */
 class ModifyTableMeta extends TableOperation {
@@ -52,7 +53,8 @@ class ModifyTableMeta extends TableOpera
     HRegionInfo i)
   throws IOException {
     Put put = new Put(i.getRegionName());
-    put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, Writables.getBytes(i));
+    put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
+        Writables.getBytes(i));
     server.put(regionName, put);
     LOG.debug("updated HTableDescriptor for region " + i.getRegionNameAsString());
   }

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/MultiColumnOperation.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/MultiColumnOperation.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/MultiColumnOperation.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/MultiColumnOperation.java Wed Mar 12 21:17:13 2014
@@ -34,8 +34,8 @@ class MultiColumnOperation extends Colum
   private final List<ColumnOperation> operations;
 
   MultiColumnOperation(final HMaster master, final byte [] tableName,
-      final List<HColumnDescriptor> columnAdditions,
-      final List<Pair<byte [], HColumnDescriptor>> columnModifications,
+      final List<HColumnDescriptor> columnAdditions, 
+      final List<Pair<byte [], HColumnDescriptor>> columnModifications, 
       final List<byte []> columnDeletions) throws IOException {
     super(master, tableName);
     // convert the three separate lists to an internal list of sub-operations
@@ -68,7 +68,7 @@ class MultiColumnOperation extends Colum
   }
 
   @Override
-  protected void updateTableDescriptor(HTableDescriptor desc)
+  protected void updateTableDescriptor(HTableDescriptor desc) 
   throws IOException {
     // just ask all of the sub-operations to update the descriptor
     for (ColumnOperation op : operations) {
@@ -76,3 +76,4 @@ class MultiColumnOperation extends Colum
     }
   }
 }
+

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/ProcessRegionOpen.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/ProcessRegionOpen.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/ProcessRegionOpen.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/ProcessRegionOpen.java Wed Mar 12 21:17:13 2014
@@ -20,6 +20,7 @@
 package org.apache.hadoop.hbase.master;
 
 import java.io.IOException;
+import java.util.concurrent.ExecutionException;
 
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/ProcessServerShutdown.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/ProcessServerShutdown.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/ProcessServerShutdown.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/ProcessServerShutdown.java Wed Mar 12 21:17:13 2014
@@ -153,7 +153,7 @@ class ProcessServerShutdown extends Regi
     byte [] regionName)
   throws IOException {
     long scannerId = server.openScanner(regionName, scan);
-
+    
     int rows = scan.getCaching();
     // The default caching if not set for scans is -1. Handle it
     if (rows < 1) rows = 1;
@@ -266,7 +266,7 @@ class ProcessServerShutdown extends Regi
     }
     t2 = System.currentTimeMillis();
     if (LOG.isDebugEnabled())
-      LOG.debug("Took " + this.toString() + " " + (t2 - t0)
+      LOG.debug("Took " + this.toString() + " " + (t2 - t0) 
           + " ms. to update RegionManager. And,"
         + (t1 - t0) + " ms. to get the lock.");
 
@@ -274,8 +274,10 @@ class ProcessServerShutdown extends Regi
     t0 = System.currentTimeMillis();
     for (ToDoEntry e: toDoList) {
       if (e.regionOffline) {
-        LOG.debug(this.toString() + " setting offlineRegionInMETA : " + e.info.toString());
-        HRegion.offlineRegionInMETA(server, regionName, e.info);
+        LOG.debug(this.toString() + " setting offlineRegionInMETA : "
+            + e.info.toString());
+        HRegion.offlineRegionInMETA(server, regionName,
+            e.info);
       }
     }
     t1 = System.currentTimeMillis();
@@ -288,7 +290,7 @@ class ProcessServerShutdown extends Regi
 
         LOG.debug(this.toString() +
             (skip? "skipping set " : "setting ") + " unassigned: " + info.toString());
-        if (skip)
+        if (skip) 
           continue;
 
         this.setRegionUnassigned(info, false);
@@ -300,10 +302,10 @@ class ProcessServerShutdown extends Regi
       }
     }
     t2 = System.currentTimeMillis();
-
+    
     if (LOG.isDebugEnabled())
-      LOG.debug("Took " + this.toString() + " "
-          + (t1 - t0 ) + " ms. to mark regions offlineInMeta"
+      LOG.debug("Took " + this.toString() + " " 
+          + (t1 - t0 ) + " ms. to mark regions offlineInMeta" 
           + (t2 - t1) + " ms. to set " + regions.size() + " regions unassigned");
     return true;
   }

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/RackManager.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/RackManager.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/RackManager.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/RackManager.java Wed Mar 12 21:17:13 2014
@@ -15,7 +15,7 @@ import org.apache.hadoop.net.IPv4Address
 public class RackManager {
   static final Log LOG = LogFactory.getLog(RackManager.class);
   private DNSToSwitchMapping switchMapping;
-
+  
   public RackManager(Configuration conf) {
     Class<DNSToSwitchMapping> clz = (Class<DNSToSwitchMapping>)
         conf.getClass("hbase.util.ip.to.rack.determiner",
@@ -45,7 +45,7 @@ public class RackManager {
       return HConstants.UNKNOWN_RACK;
     return this.getRack(info.getServerAddress());
   }
-
+  
   /**
    * Get the name of the rack containing a server, according to the DNS to
    * switch mapping.
@@ -55,13 +55,13 @@ public class RackManager {
   public String getRack(HServerAddress server) {
     if (server == null)
       return HConstants.UNKNOWN_RACK;
-
+    
     List<String> racks = switchMapping.resolve(Arrays.asList(
         new String[]{server.getBindAddress()}));
     if (racks != null && racks.size() > 0) {
       return racks.get(0);
     }
-
+    
     return HConstants.UNKNOWN_RACK;
   }
 }

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/RegionAssignmentSnapshot.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/RegionAssignmentSnapshot.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/RegionAssignmentSnapshot.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/RegionAssignmentSnapshot.java Wed Mar 12 21:17:13 2014
@@ -53,14 +53,14 @@ public class RegionAssignmentSnapshot {
   private final Map<HRegionInfo, HServerAddress> regionToRegionServerMap;
   /** the region name to region info map */
   private final Map<String, HRegionInfo> regionNameToRegionInfoMap;
-
+  
   /** the regionServer to region map */
   private final Map<HServerAddress, List<HRegionInfo>> regionServerToRegionMap;
   /** the existing assignment plan in the META region */
   private final AssignmentPlan exsitingAssignmentPlan;
   /** The rack view for the current region server */
   private final AssignmentDomain globalAssignmentDomain;
-
+  
   public RegionAssignmentSnapshot(Configuration conf) {
     this.conf = conf;
     tableToRegionMap = new HashMap<String, List<HRegionInfo>>();
@@ -77,8 +77,8 @@ public class RegionAssignmentSnapshot {
    */
   public void initialize() throws IOException {
     LOG.info("Start to scan the META for the current region assignment " +
-		"snappshot");
-
+    		"snappshot");
+    
     // Add all the online region servers
     HBaseAdmin admin  = new HBaseAdmin(conf);
     for (HServerInfo serverInfo : admin.getClusterStatus().getServerInfo()) {
@@ -99,14 +99,14 @@ public class RegionAssignmentSnapshot {
             return true;
           }
           addRegion(regionInfo);
-
+          
           // Process the region server
           if (server == null) return true;
           HServerAddress regionServer = new HServerAddress(Bytes.toString(server));
-
+          
           // Add the current assignment to the snapshot
           addAssignment(regionInfo, regionServer);
-
+          
           // Process the assignment plan
           byte[] favoredNodes = result.getValue(HConstants.CATALOG_FAMILY,
               HConstants.FAVOREDNODES_QUALIFIER);
@@ -130,13 +130,13 @@ public class RegionAssignmentSnapshot {
     LOG.info("Finished to scan the META for the current region assignment" +
       "snapshot");
   }
-
+  
   private void addRegion(HRegionInfo regionInfo) {
     if (regionInfo == null)
       return;
     // Process the region name to region info map
     regionNameToRegionInfoMap.put(regionInfo.getRegionNameAsString(), regionInfo);
-
+    
     // Process the table to region map
     String tableName = regionInfo.getTableDesc().getNameAsString();
     List<HRegionInfo> regionList = tableToRegionMap.get(tableName);
@@ -147,12 +147,12 @@ public class RegionAssignmentSnapshot {
     regionList.add(regionInfo);
     tableToRegionMap.put(tableName, regionList);
   }
-
+  
   private void addAssignment(HRegionInfo regionInfo, HServerAddress server) {
     if (server != null && regionInfo != null) {
       // Process the region to region server map
       regionToRegionServerMap.put(regionInfo, server);
-
+      
       // Process the region server to region map
       List<HRegionInfo> regionList = regionServerToRegionMap.get(server);
       if (regionList == null) {
@@ -160,13 +160,13 @@ public class RegionAssignmentSnapshot {
       }
       regionList.add(regionInfo);
       regionServerToRegionMap.put(server, regionList);
-    }
+    } 
   }
 
   public Map<String, HRegionInfo> getRegionNameToRegionInfoMap() {
     return this.regionNameToRegionInfoMap;
   }
-
+  
   public Map<String, List<HRegionInfo>> getTableToRegionMap() {
     return tableToRegionMap;
   }
@@ -178,7 +178,7 @@ public class RegionAssignmentSnapshot {
   public Map<HServerAddress, List<HRegionInfo>> getRegionServerToRegionMap() {
     return regionServerToRegionMap;
   }
-
+  
   public AssignmentPlan getExistingAssignmentPlan() {
     return this.exsitingAssignmentPlan;
   }
@@ -186,7 +186,7 @@ public class RegionAssignmentSnapshot {
   public AssignmentDomain getGlobalAssignmentDomain() {
     return this.globalAssignmentDomain;
   }
-
+  
   public Set<String> getTableSet() {
     return this.tableToRegionMap.keySet();
   }

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/RegionManager.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/RegionManager.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/RegionManager.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/RegionManager.java Wed Mar 12 21:17:13 2014
@@ -26,7 +26,6 @@ import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
-import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
@@ -102,7 +101,7 @@ public class RegionManager {
   /**
    * Map key -> tableName, value -> ThrottledRegionReopener
    * An entry is created in the map before an alter operation is performed on the
-   * table. It is cleared when all the regions have reopened.   
+   * table. It is cleared when all the regions have reopened.
    */
   private final Map<String, ThrottledRegionReopener> tablesReopeningRegions =
       new ConcurrentHashMap<String, ThrottledRegionReopener>();
@@ -121,7 +120,7 @@ public class RegionManager {
    */
    final SortedMap<String, RegionState> regionsInTransition =
     Collections.synchronizedSortedMap(new TreeMap<String, RegionState>());
-   
+
    /** Serves as a cache for locating where a particular region is open.
     * Currently being used to detect legitmate duplicate assignments from
     * spurious ones, that may seem to occur if a ZK notification is received
@@ -187,8 +186,8 @@ public class RegionManager {
   private final int zooKeeperNumRetries;
   private final int zooKeeperPause;
 
-  /** 
-   * Set of region servers which send heart beat in the first period of time 
+  /**
+   * Set of region servers which send heart beat in the first period of time
    * during the master boots. Hold the best locality regions for these
    * region servers.
    */
@@ -198,7 +197,7 @@ public class RegionManager {
 
   private LegacyRootZNodeUpdater legacyRootZNodeUpdater;
 
-  
+
   RegionManager(HMaster master) throws IOException {
     Configuration conf = master.getConfiguration();
 
@@ -249,7 +248,7 @@ public class RegionManager {
   void unsetRootRegion() {
     synchronized (regionsInTransition) {
       synchronized (rootRegionLocation) {
-        rootRegionLocation.set(null);        
+        rootRegionLocation.set(null);
         rootRegionLocation.notifyAll();
       }
       regionsInTransition.remove(
@@ -283,7 +282,7 @@ public class RegionManager {
    * Assigns regions to region servers attempting to balance the load across all
    * region servers. Note that no synchronization is necessary as the caller
    * (ServerManager.processMsgs) already owns the monitor for the RegionManager.
-   * 
+   *
    * @param info
    * @param mostLoadedRegions
    * @param returnMsgs
@@ -295,7 +294,7 @@ public class RegionManager {
       // be assigned when the region server reports next
       return;
     }
-    
+
     if (this.master.shouldAssignRegionsWithFavoredNodes()) {
       // assign regions with favored nodes
       assignRegionsWithFavoredNodes(info, mostLoadedRegions, returnMsgs);
@@ -304,7 +303,7 @@ public class RegionManager {
       assignRegionsWithoutFavoredNodes(info, mostLoadedRegions, returnMsgs);
     }
   }
-  
+
   private void assignRegionsWithFavoredNodes(HServerInfo regionServer,
       HRegionInfo[] mostLoadedRegions, ArrayList<HMsg> returnMsgs) {
     // get the regions that are waiting for assignment for that region server
@@ -337,17 +336,17 @@ public class RegionManager {
     boolean isSingleServer = isSingleRegionServer();
     boolean holdRegionForBestRegionServer = false;
     boolean assignmentByLocality = false;
-    
-    // only check assignmentByLocality when the 
+
+    // only check assignmentByLocality when the
     // PreferredRegionToRegionServerMapping is not null;
     if (this.master.getPreferredRegionToRegionServerMapping() != null) {
       long masterRunningTime = System.currentTimeMillis()
-              - this.master.getMasterStartupTime();      
-      holdRegionForBestRegionServer = 
+              - this.master.getMasterStartupTime();
+      holdRegionForBestRegionServer =
         masterRunningTime < this.master.getHoldRegionForBestLocalityPeriod();
-      assignmentByLocality = 
+      assignmentByLocality =
         masterRunningTime < this.master.getApplyPreferredAssignmentPeriod();
-      
+
       // once it has passed the ApplyPreferredAssignmentPeriod, clear up
       // the quickStartRegionServerSet and PreferredRegionToRegionServerMapping
       // and it won't check the assignmentByLocality anymore.
@@ -356,7 +355,7 @@ public class RegionManager {
         this.master.clearPreferredRegionToRegionServerMapping();
       }
     }
-    
+
     if (assignmentByLocality) {
       // have to add . at the end of host name
       String hostName = info.getHostname();
@@ -374,7 +373,7 @@ public class RegionManager {
         isSingleServer, preferredAssignment, assignmentByLocality,
         holdRegionForBestRegionServer,
         quickStartRegionServerSet);
-    
+
     if (regionsToAssign.isEmpty()) {
       // There are no regions waiting to be assigned.
       if (!assignmentByLocality
@@ -384,7 +383,7 @@ public class RegionManager {
         this.loadBalancer.loadBalancing(info, mostLoadedRegions, returnMsgs);
       }
     } else {
-      // if there's only one server or assign the region by locality, 
+      // if there's only one server or assign the region by locality,
       // just give the regions to this server
       if (isSingleServer || assignmentByLocality
           || preferredAssignment.booleanValue()) {
@@ -400,11 +399,11 @@ public class RegionManager {
 
   /*
    * Make region assignments taking into account multiple servers' loads.
-   * 
+   *
    * Note that no synchronization is needed while we iterate over
    * regionsInTransition because this method is only called by assignRegions
    * whose caller owns the monitor for RegionManager
-   * 
+   *
    * TODO: This code is unintelligible. REWRITE. Add TESTS! St.Ack 09/30/2009
    * @param thisServersLoad
    * @param regionsToAssign
@@ -420,7 +419,7 @@ public class RegionManager {
         isMetaAssign = true;
     }
     int nRegionsToAssign = regionsToAssign.size();
-    int otherServersRegionsCount = 
+    int otherServersRegionsCount =
       regionsToGiveOtherServers(nRegionsToAssign, thisServersLoad);
     nRegionsToAssign -= otherServersRegionsCount;
     if (nRegionsToAssign > 0 || isMetaAssign) {
@@ -434,8 +433,8 @@ public class RegionManager {
       int nservers = computeNextHeaviestLoad(thisServersLoad, heavierLoad);
       int nregions = 0;
       // Advance past any less-loaded servers
-      for (HServerLoad load = new HServerLoad(thisServersLoad); 
-      load.compareTo(heavierLoad) <= 0 && nregions < nRegionsToAssign; 
+      for (HServerLoad load = new HServerLoad(thisServersLoad);
+      load.compareTo(heavierLoad) <= 0 && nregions < nRegionsToAssign;
       load.setNumberOfRegions(load.getNumberOfRegions() + 1), nregions++) {
         // continue;
       }
@@ -488,7 +487,7 @@ public class RegionManager {
    * Note that no synchronization is needed on regionsInTransition while
    * iterating on it because the only caller is assignRegions whose caller owns
    * the monitor for RegionManager
-   * 
+   *
    * @param regionsToAssign
    * @param serverName
    * @param returnMsgs
@@ -548,7 +547,7 @@ public class RegionManager {
   private int regionsToGiveOtherServers(final int numUnassignedRegions,
       final HServerLoad thisServersLoad) {
 
-    SortedMap<HServerLoad, Collection<String>> lightServers = 
+    SortedMap<HServerLoad, Collection<String>> lightServers =
         master.getServerManager().getServersToLoad().getLightServers(thisServersLoad);
     // Examine the list of servers that are more lightly loaded than this one.
     // Pretend that we will assign regions to these more lightly loaded servers
@@ -614,7 +613,7 @@ public class RegionManager {
     // for the current region server
     Set<HRegionInfo> preservedRegionsForCurrentRS =
       assignmentManager.getTransientAssignments(addr);
-    
+
     synchronized (this.regionsInTransition) {
       int nonPreferredAssignment = 0;
       for (RegionState regionState : regionsInTransition.values()) {
@@ -657,13 +656,13 @@ public class RegionManager {
         if (!regionState.isUnassigned()) {
           continue;
         }
-        
-        if (preservedRegionsForCurrentRS == null || 
+
+        if (preservedRegionsForCurrentRS == null ||
             !preservedRegionsForCurrentRS.contains(regionInfo)) {
-          if (assignmentManager.hasTransientAssignment(regionInfo) || 
+          if (assignmentManager.hasTransientAssignment(regionInfo) ||
               nonPreferredAssignment > this.maxAssignInOneGo ||
               master.isServerBlackListed(server.getHostnamePort())) {
-            // Hold the region for its favored nodes and limit the number of 
+            // Hold the region for its favored nodes and limit the number of
             // non preferred assignments for each region server.
             continue;
           }
@@ -673,22 +672,22 @@ public class RegionManager {
         } else {
           isPreferredAssignment = true;
         }
-        
+
         // Assign the current region to the region server.
         regionsToAssign.add(regionState);
         LOG.debug("Going to assign user region " +
             regionInfo.getRegionNameAsString() +
             " to server " + server.getHostnamePort() + " in a " +
             (isPreferredAssignment ? "": "non-") + "preferred way");
-      
+
       }
     }
     return regionsToAssign;
   }
-  
+
   /**
    * Get the set of regions that should be assignable in this pass.
-   * 
+   *
    * Note that no synchronization on regionsInTransition is needed because the
    * only caller (assignRegions, whose caller is ServerManager.processMsgs) owns
    * the monitor for RegionManager
@@ -781,13 +780,13 @@ public class RegionManager {
               + " is in transition but not enough servers yet");
           continue;
         }
-        
+
         // if we are holding it, don't give it away to any other server
         if (assignmentManager.hasTransientAssignment(s.getRegionInfo())) {
           continue;
         }
         if (assignmentByLocality && !i.isRootRegion() && !i.isMetaRegion()) {
-          Text preferredHostNameTxt = 
+          Text preferredHostNameTxt =
             (Text)this.master.getPreferredRegionToRegionServerMapping().get(new Text(name));
 
           if (hostName == null) {
@@ -796,18 +795,18 @@ public class RegionManager {
           if (preferredHostNameTxt != null) {
             String preferredHost = preferredHostNameTxt.toString();
             if (hostName.startsWith(preferredHost)) {
-              LOG.debug("Doing Preferred Region Assignment for : " + name + 
+              LOG.debug("Doing Preferred Region Assignment for : " + name +
                   " to the " + hostName);
               // add the region to its preferred region server.
               regionsToAssign.add(s);
               continue;
-            } else if (holdRegionForBestRegionserver || 
+            } else if (holdRegionForBestRegionserver ||
                 quickStartRegionServerSet.contains(preferredHost)) {
               continue;
             }
           }
         }
-        // Only assign a configured number unassigned region at one time in the 
+        // Only assign a configured number unassigned region at one time in the
         // non preferred assignment case.
         if ((nonPreferredAssignmentCount++) < this.maxAssignInOneGo) {
           regionsToAssign.add(s);
@@ -1110,7 +1109,7 @@ public class RegionManager {
   throws IOException {
     createRegion(newRegion, server, metaRegionName, null);
   }
- 
+
   /**
    * Create a new HRegion, put a row for it into META (or ROOT), and mark the
    * new region unassigned so that it will get assigned to a region server.
@@ -1137,7 +1136,7 @@ public class RegionManager {
     // 3.1 Put the region info into meta table.
     put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
         Writables.getBytes(info));
-    
+
     // 3.2 Put the favorite nodes into meta.
     if (favoriteNodeList != null) {
       String favoredNodes = RegionPlacement.getFavoredNodes(favoriteNodeList);
@@ -1146,16 +1145,15 @@ public class RegionManager {
       LOG.info("Create the region " + info.getRegionNameAsString() +
           " with favored nodes " + favoredNodes);
     }
-
     server.put(metaRegionName, put);
 
     // 4. Close the new region to flush it to disk.  Close its log file too.
     region.close();
     region.getLog().closeAndDelete();
-    
+
     // After all regions are created, the caller will schedule
     // the meta scanner to run immediately and assign out the
-    // regions.    
+    // regions.
   }
 
   /**
@@ -1404,7 +1402,7 @@ public class RegionManager {
         }
         if (writeToZK) {
           zkWrapper.createOrUpdateUnassignedRegion(info.getEncodedName(), data);
-          LOG.debug("Created/updated UNASSIGNED zNode " + info.getRegionNameAsString() + 
+          LOG.debug("Created/updated UNASSIGNED zNode " + info.getRegionNameAsString() +
                     " in state " + HBaseEventType.M2ZK_REGION_OFFLINE);
         }
         s = new RegionState(info, RegionState.State.UNASSIGNED);
@@ -1549,7 +1547,7 @@ public class RegionManager {
         // marked offline so that it opens on the preferred server.
         this.assignmentManager.executeAssignmentPlan(regionInfo);
       }
-    }    
+    }
   }
 
   /**
@@ -1885,7 +1883,8 @@ public class RegionManager {
         Iterator<Map.Entry<byte[], Pair<HRegionInfo, HServerAddress>>> it2 =
           cfMap.entrySet().iterator();
         while (it2.hasNext()) {
-          Map.Entry mapPairs = it2.next();
+          Entry<byte[], Pair<HRegionInfo, HServerAddress>> mapPairs =
+              it2.next();
           Pair<HRegionInfo,HServerAddress> pair =
             (Pair<HRegionInfo,HServerAddress>)mapPairs.getValue();
           if (addr.equals(pair.getSecond())) {
@@ -1964,6 +1963,7 @@ public class RegionManager {
      * @param mostLoadedRegions the candidate regions for moving
      * @param returnMsgs region close messages to be passed to the server
      */
+    @Override
     public void loadBalancing(HServerInfo info, HRegionInfo[] mostLoadedRegions,
         ArrayList<HMsg> returnMsgs) {
 
@@ -2003,7 +2003,7 @@ public class RegionManager {
       for (HRegionInfo region : mostLoadedRegions) {
         List<HServerAddress> preferences =
             assignmentManager.getAssignmentFromPlan(region);
-        if (preferences == null || preferences.size() == 0) {
+        if (preferences == null || preferences.isEmpty()) {
           // No prefered assignment, do nothing.
           continue;
         } else if (info.getServerAddress().equals(preferences.get(0))) {
@@ -2051,7 +2051,7 @@ public class RegionManager {
       for (HRegionInfo region : mostLoadedRegions) {
         List<HServerAddress> preferences =
             assignmentManager.getAssignmentFromPlan(region);
-        if (preferences == null || preferences.size() == 0) {
+        if (preferences == null || preferences.isEmpty()) {
           // No preferredAssignment, do nothing.
           continue;
         } else if (preferences.contains(info.getServerAddress())) {
@@ -2116,7 +2116,7 @@ public class RegionManager {
       for (HRegionInfo region : mostLoadedRegions) {
         List<HServerAddress> preferences =
             assignmentManager.getAssignmentFromPlan(region);
-        if (preferences == null || preferences.size() == 0) {
+        if (preferences == null || preferences.isEmpty()) {
           // No preferredAssignment, do nothing.
           continue;
         } else if (info.getServerAddress().equals(preferences.get(0))) {
@@ -2144,7 +2144,7 @@ public class RegionManager {
 
           // Only move the region if the other server is under-loaded and the
           // current server is overloaded.
-          if (serverLoad - regionsUnassigned > avgLoadPlusSlop && 
+          if (serverLoad - regionsUnassigned > avgLoadPlusSlop &&
               otherLoad.getNumberOfRegions() < avgLoadMinusSlop) {
             if (unassignRegion(info, region, returnMsgs)) {
               unassignedFor(preferences.get(i));
@@ -2258,7 +2258,7 @@ public class RegionManager {
     DefaultLoadBalancer() {
       super();
     }
-    
+
     /**
      * Balance server load by unassigning some regions.
      *
@@ -2266,6 +2266,7 @@ public class RegionManager {
      * @param mostLoadedRegions - array of most loaded regions
      * @param returnMsgs - array of return massages
      */
+    @Override
     public void loadBalancing(HServerInfo info, HRegionInfo[] mostLoadedRegions,
         ArrayList<HMsg> returnMsgs) {
       HServerLoad servLoad = info.getLoad();
@@ -2323,7 +2324,7 @@ public class RegionManager {
     private int balanceToLowloaded(String srvName, HServerLoad srvLoad,
         double avgLoad) {
 
-      ServerLoadMap<HServerLoad> serverLoadMap = master.getServerManager().getServersToLoad(); 
+      ServerLoadMap<HServerLoad> serverLoadMap = master.getServerManager().getServersToLoad();
 
       if (!serverLoadMap.isMostLoadedServer(srvName))
         return 0;
@@ -2340,10 +2341,10 @@ public class RegionManager {
       int lowSrvCount = serverLoadMap.numServersByLoad(lowestServerLoad);
       int numSrvRegs = srvLoad.getNumberOfRegions();
       int numMoveToLowLoaded = (avgLoadMinusSlop - lowestLoad) * lowSrvCount;
-      
-      int numRegionsToClose = numSrvRegs - (int)Math.floor(avgLoad);      
+
+      int numRegionsToClose = numSrvRegs - (int)Math.floor(avgLoad);
       numRegionsToClose = Math.min(numRegionsToClose, numMoveToLowLoaded);
-         
+
       if (LOG.isDebugEnabled()) {
         LOG.debug("Server(s) are carrying only " + lowestLoad + " regions. " +
           "Server " + srvName + " is most loaded (" + numSrvRegs +
@@ -2586,7 +2587,7 @@ public class RegionManager {
   /**
    * Method used to do housekeeping for holding regions for a RegionServer going
    * down for a restart
-   * 
+   *
    * @param regionServer
    *          the RegionServer going down for a restart
    * @param regions
@@ -2594,7 +2595,7 @@ public class RegionManager {
    */
   public void addRegionServerForRestart(final HServerInfo regionServer,
       Set<HRegionInfo> regions) {
-    LOG.debug("Holding regions of restartng server: " +  
+    LOG.debug("Holding regions of restartng server: " +
         regionServer.getServerName());
     HServerAddress addr = regionServer.getServerAddress();
 
@@ -2628,7 +2629,7 @@ public class RegionManager {
     }
     return tablesReopeningRegions.get(tableName);
   }
-  
+
   /**
    * Return the throttler for this table
    * @param tableName
@@ -2637,7 +2638,7 @@ public class RegionManager {
   public ThrottledRegionReopener getThrottledReopener(String tableName) {
     return tablesReopeningRegions.get(tableName);
   }
-  
+
   /**
    * Delete the throttler when the operation is complete
    * @param tableName
@@ -2663,10 +2664,10 @@ public class RegionManager {
       LOG.debug("Tried to delete a throttled reopener, but it does not exist.");
     }
   }
-  
+
   /**
-   * When the region is opened, check if it is reopening and notify the throttler 
-   * for further processing.  
+   * When the region is opened, check if it is reopening and notify the throttler
+   * for further processing.
    * @param region
    */
   public void notifyRegionReopened(HRegionInfo region) {
@@ -2693,7 +2694,7 @@ public class RegionManager {
     return m;
   }
 
-  /** 
+  /**
    * Modifies region state in regionsInTransition based on the initial scan of the ZK unassigned
    * directory.
    * @param event event type written by the regionserver to the znode

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/RegionPlacement.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/RegionPlacement.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/RegionPlacement.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/RegionPlacement.java Wed Mar 12 21:17:13 2014
@@ -628,7 +628,7 @@ public class RegionPlacement implements 
         List<HRegionInfo> regionsToMoveFromRack = pickRegionsFromRack(plan,
             mapRackToRegionServers.get(rack), mapServerToRegions, rack,
             moveRegionsPerRack, newRack);
-        if (regionsToMoveFromRack.size() == 0) {
+        if (regionsToMoveFromRack.isEmpty()) {
           System.out
               .println("WARNING: number of regions to be moved from rack "
                   + rack + " is zero!");
@@ -808,7 +808,7 @@ public class RegionPlacement implements 
       // Get the all the region servers
       List<HServerAddress> servers = new ArrayList<HServerAddress>();
       servers.addAll(domain.getAllServers());
-      
+
       LOG.info("Start to generate assignment plan for " + numRegions +
           " regions from table " + tableName + " with " +
           servers.size() + " region servers");
@@ -1164,11 +1164,13 @@ public class RegionPlacement implements 
           // Update the current region server with its updated favored nodes
           HRegionInterface currentRegionServer =
             connection.getHRegionConnection(entry.getKey());
-          int updatedRegionNum =
+          int updatedRegionNum;
+          String hostName =
+            currentRegionServer.getHServerInfo().getHostnamePort();
+          updatedRegionNum =
             currentRegionServer.updateFavoredNodes(singleServerPlan);
-          LOG.info("Region server " +
-              currentRegionServer.getHServerInfo().getHostnamePort() +
-              " has updated " + updatedRegionNum + " / " +
+          LOG.info("Region server " + hostName + " has updated " +
+              updatedRegionNum + " / " +
               singleServerPlan.getAssignmentMap().size() +
               " regions with the assignment plan");
           succeededNum ++;
@@ -1292,7 +1294,7 @@ public class RegionPlacement implements 
    */
   public static InetSocketAddress[] getFavoredInetSocketAddress(
       List<HServerAddress> serverList) {
-    if (serverList == null || serverList.size() == 0)
+    if (serverList == null || serverList.isEmpty())
       return null;
 
     InetSocketAddress[] addresses =
@@ -1347,7 +1349,7 @@ public class RegionPlacement implements 
     // sort the map based on region info
     Map<HRegionInfo, List<HServerAddress>> assignmentMap =
       new TreeMap<HRegionInfo, List<HServerAddress>>(plan.getAssignmentMap());
-    
+
     for (Map.Entry<HRegionInfo, List<HServerAddress>> entry :
       assignmentMap.entrySet()) {
       String serverList = RegionPlacement.getFavoredNodes(entry.getValue());

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementPolicy.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementPolicy.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementPolicy.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementPolicy.java Wed Mar 12 21:17:13 2014
@@ -15,7 +15,7 @@ public interface RegionPlacementPolicy {
  */
   public AssignmentPlan getNewAssignmentPlan(HRegionInfo[] regions,
       AssignmentDomain domain) throws IOException;
-
+  
   /**
    * Get the favored assignment plan for all the regions
    * @return the favored assignment plan for all the regions
@@ -23,7 +23,7 @@ public interface RegionPlacementPolicy {
    */
   public AssignmentPlan getNewAssignmentPlan()
   throws IOException;
-
+  
   /**
    * Get the existing assignment plan for all the regions
    * @return the existing favored assignment plan for all the regions
@@ -31,7 +31,7 @@ public interface RegionPlacementPolicy {
    */
   public AssignmentPlan getExistingAssignmentPlan()
   throws IOException;
-
+  
   /**
    * Update the favored assignment plan
    * @param plan

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/RegionServerOperationQueue.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/RegionServerOperationQueue.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/RegionServerOperationQueue.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/RegionServerOperationQueue.java Wed Mar 12 21:17:13 2014
@@ -217,7 +217,7 @@ public class RegionServerOperationQueue 
     // for the missing meta region(s) to come back online, but since it
     // is waiting, it cannot process the meta region online operation it
     // is waiting for. So put this operation back on the queue for now.
-    if (toDoQueue.size() == 0) {
+    if (toDoQueue.isEmpty()) {
       // The queue is currently empty so wait for a while to see if what
       // we need comes in first
       this.sleeper.sleep();

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java Wed Mar 12 21:17:13 2014
@@ -109,7 +109,7 @@ public class SplitLogManager implements 
   private ConcurrentMap<String, Task> tasks =
     new ConcurrentHashMap<String, Task>();
   private TimeoutMonitor timeoutMonitor;
-
+  
   private Set<String> deadServers = null;
   private Object deadServersLock = new Object();
 
@@ -119,7 +119,7 @@ public class SplitLogManager implements 
    * copying recovered edits to their final destination. The task finisher
    * has to be robust because it can be arbitrarily restarted or called
    * multiple times.
-   *
+   * 
    * @param zkw
    * @param conf
    * @param stopper
@@ -155,13 +155,13 @@ public class SplitLogManager implements 
    * @param conf
    * @param stopper
    * @param serverName
-   * @param tf task finisher
+   * @param tf task finisher 
    */
   public SplitLogManager(ZooKeeperWrapper zkw, Configuration conf,
       Stoppable stopper, String serverName, TaskFinisher tf) {
     this.watcher = zkw;
     this.watcher.createZNodeIfNotExists(this.watcher.splitLogZNode, new byte[0],
-        CreateMode.PERSISTENT, false /* set watch? */);
+        CreateMode.PERSISTENT, false /* set watch? */); 
     this.taskFinisher = tf;
     this.conf = conf;
     this.stopper = stopper;
@@ -702,7 +702,7 @@ public class SplitLogManager implements 
       return oldtask;
     }
   }
-
+  
   Task findOrCreateOrphanTask(String path) {
     Task orphanTask = new Task();
     Task task;
@@ -842,7 +842,7 @@ public class SplitLogManager implements 
     public void heartbeatNoDetails(long time) {
       last_update = time;
     }
-
+    
     public void heartbeat(long time, int version, String server, String worker) {
       last_version = version;
       last_update = time;
@@ -862,7 +862,7 @@ public class SplitLogManager implements 
       last_update = -1;
     }
   }
-
+  
   public void handleDeadServer(String server_name) {
     // resubmit the tasks on the TimeoutMonitor thread. Makes it easier
     // to reason about concurrency. Makes it easier to retry.
@@ -1085,7 +1085,7 @@ public class SplitLogManager implements 
           // Therefore, we just bail out in this case.
           LOG.error("ZK session expired, cannot delete " + path + ". Master must be killed " +
               "in this case.");
-          return;
+          return; 
         }
         if (rc != KeeperException.Code.NONODE.intValue()) {
           tot_mgr_node_delete_err.incrementAndGet();
@@ -1187,7 +1187,7 @@ public class SplitLogManager implements 
     TerminationStatus(String msg) {
       statusMsg = msg;
     }
-
+    
     @Override
     public String toString() {
       return statusMsg;

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/TableDelete.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/TableDelete.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/TableDelete.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/TableDelete.java Wed Mar 12 21:17:13 2014
@@ -60,7 +60,8 @@ class TableDelete extends TableOperation
       }
       // Delete the region
       try {
-        HRegion.removeRegionFromMETA(server, m.getRegionName(), i.getRegionName());
+        HRegion.removeRegionFromMETA(server, m.getRegionName(),
+            i.getRegionName());
         HRegion.deleteRegion(this.master.getFileSystem(),
           this.master.getRootDir(), i);
 
@@ -74,4 +75,4 @@ class TableDelete extends TableOperation
     this.master.getFileSystem().delete(new Path(this.master.getRootDir(),
       Bytes.toString(this.tableName)), true);
   }
-}
\ No newline at end of file
+}

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/TableOperation.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/TableOperation.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/TableOperation.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/TableOperation.java Wed Mar 12 21:17:13 2014
@@ -19,6 +19,7 @@
  */
 package org.apache.hadoop.hbase.master;
 
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
@@ -37,6 +38,7 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.Set;
 import java.util.TreeSet;
+import java.util.concurrent.ExecutionException;
 
 /**
  * Abstract base class for operations that need to examine all HRegionInfo
@@ -50,7 +52,7 @@ abstract class TableOperation {
   protected final Set<HRegionInfo> unservedRegions = new TreeSet<HRegionInfo>();
   protected final Set<HRegionInfo> regionsToProcess = new TreeSet<HRegionInfo>();
   protected HMaster master;
-
+  
   protected TableOperation(final HMaster master, final byte [] tableName)
   throws IOException {
     this.master = master;
@@ -80,7 +82,7 @@ abstract class TableOperation {
       tableOp = operation;
     }
 
-    public Boolean call() throws IOException {
+    public Boolean call() throws IOException, InterruptedException, ExecutionException {
       boolean tableExists = false;
 
       // Open a scanner on the meta region
@@ -95,11 +97,13 @@ abstract class TableOperation {
       List<byte []> emptyRows = new ArrayList<byte []>();
       try {
         while (true) {
-          Result values = this.server.next(scannerId);
+          Result values =
+              BaseScanner.getOneResultFromScanner(this.server, scannerId);
           if (values == null || values.isEmpty()) {
             break;
           }
-          HRegionInfo info = this.master.getHRegionInfo(values.getRow(), values);
+          HRegionInfo info =
+              this.master.getHRegionInfo(values.getRow(), values);
           if (info == null) {
             emptyRows.add(values.getRow());
             LOG.error(Bytes.toString(HConstants.CATALOG_FAMILY) + ":"
@@ -120,7 +124,7 @@ abstract class TableOperation {
 
           tableExists = true;
           if(tableOp instanceof AddColumn || tableOp instanceof ModifyColumn ||
-              tableOp instanceof DeleteColumn ||
+              tableOp instanceof DeleteColumn || 
               tableOp instanceof MultiColumnOperation) {
             regionsToProcess.add(info);
           }
@@ -153,7 +157,7 @@ abstract class TableOperation {
       if (!tableExists) {
         throw new TableNotFoundException(Bytes.toString(tableName));
       }
-
+      
       postProcessMeta(m, server);
       unservedRegions.clear();
       return Boolean.TRUE;

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/ZKUnassignedWatcher.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/ZKUnassignedWatcher.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/ZKUnassignedWatcher.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/ZKUnassignedWatcher.java Wed Mar 12 21:17:13 2014
@@ -116,7 +116,7 @@ public class ZKUnassignedWatcher impleme
 
     LOG.debug("ZK-EVENT-PROCESS: Got zkEvent " + eventType +
               " path:" + event.getPath());
-
+ 
     try
     {
       /*

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetrics.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetrics.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetrics.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetrics.java Wed Mar 12 21:17:13 2014
@@ -61,36 +61,36 @@ public class MasterMetrics implements Up
   final MetricsLongValue  splitTime =
     new MetricsLongValue("splitTime", registry);
 
-
-  /*  Number of active region servers. This number is updated
-   *  every time a regionserver joins or leaves.
+  
+  /*  Number of active region servers. This number is updated 
+   *  every time a regionserver joins or leaves.   
    */
   public MetricsIntValue numRegionServers =
 	new MetricsIntValue("numRegionServers", registry);
-
-  /*  This is the number of dead region servers.
+   
+  /*  This is the number of dead region servers. 
    *  This is cumululative across all intervals from startup time.
    */
-  public MetricsIntValue numRSExpired =
+  public MetricsIntValue numRSExpired = 
 	new MetricsIntValue("numRSExpired", registry);
-
-  /** Metrics to keep track of the number and size of logs split.
-   *  This is cumulative across all intervals from startup time.
+  
+  /** Metrics to keep track of the number and size of logs split. 
+   *  This is cumulative across all intervals from startup time. 
    */
-  public MetricsLongValue numLogsSplit =
+  public MetricsLongValue numLogsSplit = 
 	  new MetricsLongValue("numLogsSplit", registry);
-
-  private MetricsLongValue sizeOfLogsSplit =
+  
+  private MetricsLongValue sizeOfLogsSplit = 
 	  new MetricsLongValue("sizeOfLogsSplit", registry);
-
-  /** Track the number of regions opened. Useful for identifying
-   *  open/close of regions due to load balancing.
-   *  This is a cumulative metric.
+  
+  /** Track the number of regions opened. Useful for identifying 
+   *  open/close of regions due to load balancing. 
+   *  This is a cumulative metric.   
    */
-  private MetricsIntValue numRegionsOpened =
+  private MetricsIntValue numRegionsOpened = 
 	  new MetricsIntValue("numRegionsOpened", registry);
-
-  private ServerManager serverManager;
+  
+  private ServerManager serverManager; 
 
   public MasterMetrics(final String name) {
     MetricsContext context = MetricsUtil.getContext("hbase");
@@ -101,23 +101,23 @@ public class MasterMetrics implements Up
     HBaseInfo.init();
     // expose the MBean for metrics
     masterStatistics = new MasterStatistics(this.registry);
-
+  
     // get custom attributes
     try {
-      Object m =
+      Object m = 
         ContextFactory.getFactory().getAttribute("hbase.extendedperiod");
       if (m instanceof String) {
         this.extendedPeriod = Long.parseLong((String) m)*1000;
       }
-    } catch (IOException ioe) {
+    } catch (IOException ioe) { 
       LOG.info("Couldn't load ContextFactory for Metrics config info");
     }
-
+    
     LOG.info("Initialized");
   }
   public MasterMetrics(final String name, ServerManager serverMgr) {
 	  this(name);
-	  serverManager = serverMgr;
+	  serverManager = serverMgr;	 
   }
 
   public void shutdown() {
@@ -131,12 +131,12 @@ public class MasterMetrics implements Up
    * @param unused
    */
   public void doUpdates(MetricsContext unused) {
-
+	  	
     synchronized (this) {
       this.lastUpdate = System.currentTimeMillis();
       this.numRegionServers.set(this.serverManager.numServers());
       // has the extended period for long-living stats elapsed?
-      if (this.extendedPeriod > 0 &&
+      if (this.extendedPeriod > 0 && 
           this.lastUpdate - this.lastExtUpdate >= this.extendedPeriod) {
         this.lastExtUpdate = this.lastUpdate;
         this.resetAllMinMax();
@@ -156,7 +156,7 @@ public class MasterMetrics implements Up
   public void resetAllMinMax() {
     // Nothing to do
   }
-
+  
   /**
    * Record a single instance of a split
    * @param time time that the split took
@@ -181,19 +181,19 @@ public class MasterMetrics implements Up
   public void incrementRequests(final int inc) {
     this.cluster_requests.inc(inc);
   }
-
+ 
   public synchronized void incRegionsOpened() {
 	  numRegionsOpened.set(numRegionsOpened.get() + 1);
   }
-
+  
   public synchronized int getRegionsOpened() {
     return numRegionsOpened.get();
   }
-
+   
   public synchronized void incRegionServerExpired() {
 	  numRSExpired.set(numRSExpired.get() + 1);
   }
-
+  
   public synchronized int getRegionServerExpired() {
     return numRSExpired.get();
   }

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/metrics/HBaseInfo.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/metrics/HBaseInfo.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/metrics/HBaseInfo.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/metrics/HBaseInfo.java Wed Mar 12 21:17:13 2014
@@ -34,13 +34,13 @@ import javax.management.ObjectName;
 public class HBaseInfo {
   protected static class HBaseInfoMBean extends MetricsMBeanBase {
     private final ObjectName mbeanName;
-
+  
     public HBaseInfoMBean(MetricsRegistry registry, String rsName) {
       super(registry, "HBaseInfo");
       mbeanName = MBeanUtil.registerMBean("HBase",
           "Info", this);
     }
-
+  
     public void shutdown() {
       if (mbeanName != null)
         MBeanUtil.unregisterMBean(mbeanName);
@@ -58,11 +58,11 @@ public class HBaseInfo {
       }
       return theInstance;
   }
-
+  
   // HBase jar info
   private MetricsString date = new MetricsString("date", registry,
       org.apache.hadoop.hbase.util.VersionInfo.getDate());
-  private MetricsString revision = new MetricsString("revision", registry,
+  private MetricsString revision = new MetricsString("revision", registry, 
       org.apache.hadoop.hbase.util.VersionInfo.getRevision());
   private MetricsString url = new MetricsString("url", registry,
       org.apache.hadoop.hbase.util.VersionInfo.getUrl());

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/metrics/MetricsString.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/metrics/MetricsString.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/metrics/MetricsString.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/metrics/MetricsString.java Wed Mar 12 21:17:13 2014
@@ -31,19 +31,19 @@ public class MetricsString extends Metri
 
   private String value;
 
-  public MetricsString(final String name, final MetricsRegistry registry,
+  public MetricsString(final String name, final MetricsRegistry registry, 
       final String value) {
     super(name, NO_DESCRIPTION);
     this.value = value;
     registry.add(name, this);
   }
-  public MetricsString(final String name, final String description,
+  public MetricsString(final String name, final String description, 
       final MetricsRegistry registry, final String value) {
     super(name, description);
     this.value = value;
     registry.add(name, this);
   }
-
+  
   public String getValue() {
     return this.value;
   }

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/metrics/PersistentMetricsTimeVaryingRate.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/metrics/PersistentMetricsTimeVaryingRate.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/metrics/PersistentMetricsTimeVaryingRate.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/metrics/PersistentMetricsTimeVaryingRate.java Wed Mar 12 21:17:13 2014
@@ -42,8 +42,8 @@ public class PersistentMetricsTimeVaryin
    * @param registry - where the metrics object will be registered
    * @param description metrics description
    */
-  public PersistentMetricsTimeVaryingRate(final String nam,
-      final MetricsRegistry registry,
+  public PersistentMetricsTimeVaryingRate(final String nam, 
+      final MetricsRegistry registry, 
       final String description) {
     super(nam, registry, description);
   }
@@ -53,14 +53,14 @@ public class PersistentMetricsTimeVaryin
    * @param nam the name of the metrics to be used to publish the metric
    * @param registry - where the metrics object will be registered
    */
-  public PersistentMetricsTimeVaryingRate(final String nam,
+  public PersistentMetricsTimeVaryingRate(final String nam, 
       MetricsRegistry registry) {
     this(nam, registry, NO_DESCRIPTION);
   }
 
   /**
    * Push updated metrics to the mr.
-   *
+   * 
    * Note this does NOT push to JMX
    * (JMX gets the info via {@link #getPreviousIntervalAverageTime()} and
    * {@link #getPreviousIntervalNumOps()}
@@ -88,7 +88,7 @@ public class PersistentMetricsTimeVaryin
       maintainStats();
     }
   }
-
+  
   /**
    * Increment the metrics for numOps operations
    * @param numOps - number of operations
@@ -99,7 +99,7 @@ public class PersistentMetricsTimeVaryin
     super.inc(numOps, time);
     totalOps += numOps;
   }
-
+  
   /**
    * Increment the metrics for numOps operations
    * @param time - time for numOps operations
@@ -109,7 +109,7 @@ public class PersistentMetricsTimeVaryin
     super.inc(time);
     ++totalOps;
   }
-
+  
   /**
    * Rollover to a new interval
    * NOTE: does not reset numOps.  this is an absolute value
@@ -119,7 +119,7 @@ public class PersistentMetricsTimeVaryin
   }
 
   /* MetricsTimeVaryingRate will reset every time pushMetric() is called
-   * This is annoying for long-running stats that might not get a single
+   * This is annoying for long-running stats that might not get a single 
    * operation in the polling period.  This function ensures that values
    * for those stat entries don't get reset.
    */

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/metrics/RequestMetrics.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/metrics/RequestMetrics.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/metrics/RequestMetrics.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/metrics/RequestMetrics.java Wed Mar 12 21:17:13 2014
@@ -30,7 +30,7 @@ public class RequestMetrics {
   private long lastUpdateTimeStamp = 0;
   private int requestPerSecond = 0;
   private static final long INTERVAL = 1000;
-
+  
   public RequestMetrics() {
     this.lastUpdateTimeStamp = EnvironmentEdgeManager.currentTimeMillis();
   }
@@ -38,11 +38,11 @@ public class RequestMetrics {
   public synchronized long getTotalRequestCount() {
     return totalRequestCount;
   }
-
+  
   public synchronized void incrTotalRequestCount(long incr) {
     this.totalRequestCount += incr;
   }
-
+  
   public synchronized void incrTotalRequestCount() {
     this.totalRequestCount ++;
   }
@@ -53,7 +53,7 @@ public class RequestMetrics {
   public synchronized int getRequestPerSecond() {
     long interval = EnvironmentEdgeManager.currentTimeMillis()
       - lastUpdateTimeStamp;
-    if (interval == 0)
+    if (interval == 0) 
       interval = 1;
 
     if (interval >= INTERVAL) {
@@ -61,12 +61,12 @@ public class RequestMetrics {
       int sec = (int) (interval / INTERVAL);
       long requsts = this.totalRequestCount - this.lastTotalRequestCount;
       requestPerSecond =  (int) (requsts / sec);
-
+      
       //update the last updated time stamp and last total request count
       this.lastTotalRequestCount = this.totalRequestCount;
       this.lastUpdateTimeStamp = EnvironmentEdgeManager.currentTimeMillis();
-    }
-
+    } 
+    
     return requestPerSecond;
   }
 

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandler.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandler.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandler.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandler.java Wed Mar 12 21:17:13 2014
@@ -25,8 +25,8 @@ import org.apache.hadoop.io.Writable;
 import java.lang.reflect.Method;
 
 /**
- * A MonitoredTask implementation optimized for use with RPC Handlers
- * handling frequent, short duration tasks. String concatenations and object
+ * A MonitoredTask implementation optimized for use with RPC Handlers 
+ * handling frequent, short duration tasks. String concatenations and object 
  * allocations are avoided in methods that will be hit by every RPC call.
  */
 public interface MonitoredRPCHandler extends MonitoredTask {
@@ -38,7 +38,7 @@ public interface MonitoredRPCHandler ext
   public abstract long getRPCQueueTime();
   public abstract boolean isRPCRunning();
   public abstract boolean isOperationRunning();
-
+  
   public abstract void setRPC(String methodName, Object [] params,
       long queueTime, Method realMethod);
   public abstract void setRPCPacket(Writable param);

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandlerImpl.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandlerImpl.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandlerImpl.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandlerImpl.java Wed Mar 12 21:17:13 2014
@@ -33,11 +33,11 @@ import java.util.HashMap;
 import java.util.Map;
 
 /**
- * A MonitoredTask implementation designed for use with RPC Handlers
- * handling frequent, short duration tasks. String concatenations and object
+ * A MonitoredTask implementation designed for use with RPC Handlers 
+ * handling frequent, short duration tasks. String concatenations and object 
  * allocations are avoided in methods that will be hit by every RPC call.
  */
-public class MonitoredRPCHandlerImpl extends MonitoredTaskImpl
+public class MonitoredRPCHandlerImpl extends MonitoredTaskImpl 
   implements MonitoredRPCHandler {
   private String clientAddress;
   private int remotePort;
@@ -50,7 +50,7 @@ public class MonitoredRPCHandlerImpl ext
 
   public MonitoredRPCHandlerImpl() {
     super();
-    // in this implementation, WAITING indicates that the handler is not
+    // in this implementation, WAITING indicates that the handler is not 
     // actively servicing an RPC call.
     setState(State.WAITING);
   }
@@ -61,7 +61,7 @@ public class MonitoredRPCHandlerImpl ext
   }
 
   /**
-   * Gets the status of this handler; if it is currently servicing an RPC,
+   * Gets the status of this handler; if it is currently servicing an RPC, 
    * this status will include the RPC information.
    * @return a String describing the current status.
    */
@@ -74,7 +74,7 @@ public class MonitoredRPCHandlerImpl ext
   }
 
   /**
-   * Accesses the queue time for the currently running RPC on the
+   * Accesses the queue time for the currently running RPC on the 
    * monitored Handler.
    * @return the queue timestamp or -1 if there is no RPC currently running.
    */
@@ -86,7 +86,7 @@ public class MonitoredRPCHandlerImpl ext
   }
 
   /**
-   * Accesses the start time for the currently running RPC on the
+   * Accesses the start time for the currently running RPC on the 
    * monitored Handler.
    * @return the start timestamp or -1 if there is no RPC currently running.
    */
@@ -149,9 +149,9 @@ public class MonitoredRPCHandlerImpl ext
   }
 
   /**
-   * If an RPC call is currently running, produces a String representation of
+   * If an RPC call is currently running, produces a String representation of 
    * the connection from which it was received.
-   * @return A human-readable string representation of the address and port
+   * @return A human-readable string representation of the address and port 
    *  of the client.
    */
   public String getClient() {
@@ -159,7 +159,7 @@ public class MonitoredRPCHandlerImpl ext
   }
 
   /**
-   * Indicates to the client whether this task is monitoring a currently active
+   * Indicates to the client whether this task is monitoring a currently active 
    * RPC call.
    * @return true if the monitored handler is currently servicing an RPC call.
    */
@@ -168,8 +168,8 @@ public class MonitoredRPCHandlerImpl ext
   }
 
   /**
-   * Indicates to the client whether this task is monitoring a currently active
-   * RPC call to a database command. (as defined by
+   * Indicates to the client whether this task is monitoring a currently active 
+   * RPC call to a database command. (as defined by 
    * o.a.h.h.client.Operation)
    * @return true if the monitored handler is currently servicing an RPC call
    * to a database command.
@@ -191,7 +191,7 @@ public class MonitoredRPCHandlerImpl ext
    * @param methodName The name of the method that will be called by the RPC.
    * @param params The parameters that will be passed to the indicated method.
    */
-  public synchronized void setRPC(String methodName, Object [] params,
+  public synchronized void setRPC(String methodName, Object [] params, 
       long queueTime, Method realMethod) {
     this.realMethod = realMethod;
     this.methodName = methodName;
@@ -202,7 +202,7 @@ public class MonitoredRPCHandlerImpl ext
   }
 
   /**
-   * Gives this instance a reference to the Writable received by the RPC, so
+   * Gives this instance a reference to the Writable received by the RPC, so 
    * that it can later compute its size if asked for it.
    * @param param The Writable received by the RPC for this call
    */
@@ -215,7 +215,7 @@ public class MonitoredRPCHandlerImpl ext
    * @param clientAddress the address of the current client
    * @param remotePort the port from which the client connected
    */
-  public synchronized void setConnection(String clientAddress,
+  public synchronized void setConnection(String clientAddress, 
       int remotePort) {
     this.clientAddress = clientAddress;
     this.remotePort = remotePort;

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTask.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTask.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTask.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTask.java Wed Mar 12 21:17:13 2014
@@ -57,14 +57,14 @@ public interface MonitoredTask extends C
   public abstract void cleanup();
 
   /**
-   * Public exposure of Object.clone() in order to allow clients to easily
+   * Public exposure of Object.clone() in order to allow clients to easily 
    * capture current state.
    * @returns a copy of the object whose references will not change
    */
   public abstract MonitoredTask clone();
 
   /**
-   * Creates a string map of internal details for extensible exposure of
+   * Creates a string map of internal details for extensible exposure of 
    * monitored tasks.
    * @return A Map containing information for this task.
    */

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTaskImpl.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTaskImpl.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTaskImpl.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTaskImpl.java Wed Mar 12 21:17:13 2014
@@ -31,10 +31,10 @@ class MonitoredTaskImpl implements Monit
   private long startTime;
   private long statusTime;
   private long stateTime;
-
+  
   private volatile String status;
   private volatile String description;
-
+  
   protected volatile State state = State.RUNNING;
 
   HBaseServer processingServer;
@@ -58,7 +58,7 @@ class MonitoredTaskImpl implements Monit
   public long getStartTime() {
     return startTime;
   }
-
+  
   @Override
   public String getDescription() {
     return description;
@@ -73,17 +73,17 @@ class MonitoredTaskImpl implements Monit
   public long getStatusTime() {
     return statusTime;
   }
-
+  
   @Override
   public State getState() {
     return state;
   }
-
+  
   @Override
   public long getStateTime() {
     return stateTime;
   }
-
+  
   @Override
   public long getCompletionTimestamp() {
     if (state == State.COMPLETE || state == State.ABORTED) {
@@ -115,7 +115,7 @@ class MonitoredTaskImpl implements Monit
     setStatus(msg);
     setState(State.ABORTED);
   }
-
+  
   @Override
   public void setStatus(String status) {
     this.status = status;

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java Wed Mar 12 21:17:13 2014
@@ -47,7 +47,7 @@ public class TaskMonitor {
 
   @VisibleForTesting
   static final int MAX_TASKS = 1000;
-
+  
   private static TaskMonitor instance;
   private List<TaskAndWeakRefPair> tasks =
     Lists.newArrayList();
@@ -62,7 +62,7 @@ public class TaskMonitor {
     }
     return instance;
   }
-
+  
   public synchronized MonitoredTask createStatus(String description) {
     MonitoredTask stat = new MonitoredTaskImpl();
     stat.setDescription(description);
@@ -89,12 +89,12 @@ public class TaskMonitor {
 
   private synchronized void purgeExpiredTasks() {
     int size = 0;
-
+    
     for (Iterator<TaskAndWeakRefPair> it = tasks.iterator();
          it.hasNext();) {
       TaskAndWeakRefPair pair = it.next();
       MonitoredTask stat = pair.get();
-
+      
       if (pair.isDead()) {
         // The class who constructed this leaked it. So we can
         // assume it's done.
@@ -104,14 +104,14 @@ public class TaskMonitor {
           stat.cleanup();
         }
       }
-
+      
       if (canPurge(stat)) {
         it.remove();
       } else {
         size++;
       }
     }
-
+    
     if (size > MAX_TASKS) {
       LOG.warn("Too many actions in action monitor! Purging some.");
       tasks = tasks.subList(size - MAX_TASKS, size);
@@ -119,7 +119,7 @@ public class TaskMonitor {
   }
 
   /**
-   * Produces a list containing copies of the current state of all non-expired
+   * Produces a list containing copies of the current state of all non-expired 
    * MonitoredTasks handled by this TaskMonitor.
    * @return A complete list of MonitoredTasks.
    */
@@ -137,7 +137,7 @@ public class TaskMonitor {
     long cts = stat.getCompletionTimestamp();
     return (cts > 0 && System.currentTimeMillis() - cts > EXPIRATION_TIME);
   }
-
+  
   /**
    * This class encapsulates an object as well as a weak reference to a proxy
    * that passes through calls to that object. In art form:
@@ -147,7 +147,7 @@ public class TaskMonitor {
    *       v                        \
    * PassthroughInvocationHandler   |  weak reference
    *       |                       /
-   * MonitoredTaskImpl            /
+   * MonitoredTaskImpl            / 
    *       |                     /
    * StatAndWeakRefProxy  ------/
    *
@@ -159,29 +159,29 @@ public class TaskMonitor {
   private static class TaskAndWeakRefPair {
     private MonitoredTask impl;
     private WeakReference<MonitoredTask> weakProxy;
-
+    
     public TaskAndWeakRefPair(MonitoredTask stat,
         MonitoredTask proxy) {
       this.impl = stat;
       this.weakProxy = new WeakReference<MonitoredTask>(proxy);
     }
-
+    
     public MonitoredTask get() {
       return impl;
     }
-
+    
     public boolean isDead() {
       return weakProxy.get() == null;
     }
   }
-
+  
   /**
-   * An InvocationHandler that simply passes through calls to the original
+   * An InvocationHandler that simply passes through calls to the original 
    * object.
    */
   private static class PassthroughInvocationHandler<T> implements InvocationHandler {
     private T delegatee;
-
+    
     public PassthroughInvocationHandler(T delegatee) {
       this.delegatee = delegatee;
     }
@@ -190,6 +190,6 @@ public class TaskMonitor {
     public Object invoke(Object proxy, Method method, Object[] args)
         throws Throwable {
       return method.invoke(delegatee, args);
-    }
+    }    
   }
 }

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java?rev=1576909&r1=1576908&r2=1576909&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java Wed Mar 12 21:17:13 2014
@@ -136,7 +136,7 @@ public class CompactSplitThread implemen
       requestCompaction(r, s, why, p);
     }
   }
-
+  
   /**
    * @param r HRegion store belongs to
    * @param s Store to request compaction on