You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by en...@apache.org on 2014/04/08 17:45:10 UTC

svn commit: r1585765 [1/2] - in /hbase/branches/hbase-10070: hbase-client/src/main/java/org/apache/hadoop/hbase/ hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/ hbase-client/src/main/java/org/apache/hadoop/hbase/client/ hbase-client/src/tes...

Author: enis
Date: Tue Apr  8 15:45:09 2014
New Revision: 1585765

URL: http://svn.apache.org/r1585765
Log:
HBASE-10701 Cache invalidation improvements from client side (REVERTED commit which includes HBASE-10701 and HBASE-10859)

Modified:
    hbase/branches/hbase-10070/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
    hbase/branches/hbase-10070/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLocations.java
    hbase/branches/hbase-10070/hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java
    hbase/branches/hbase-10070/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
    hbase/branches/hbase-10070/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
    hbase/branches/hbase-10070/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaCache.java
    hbase/branches/hbase-10070/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java
    hbase/branches/hbase-10070/hbase-client/src/test/java/org/apache/hadoop/hbase/TestRegionLocations.java
    hbase/branches/hbase-10070/hbase-common/src/main/java/org/apache/hadoop/hbase/util/BoundedCompletionService.java
    hbase/branches/hbase-10070/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Threads.java
    hbase/branches/hbase-10070/hbase-server/src/main/java/org/apache/hadoop/hbase/client/CoprocessorHConnection.java
    hbase/branches/hbase-10070/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
    hbase/branches/hbase-10070/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
    hbase/branches/hbase-10070/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
    hbase/branches/hbase-10070/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java
    hbase/branches/hbase-10070/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java
    hbase/branches/hbase-10070/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
    hbase/branches/hbase-10070/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java
    hbase/branches/hbase-10070/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java
    hbase/branches/hbase-10070/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileRefresherChore.java
    hbase/branches/hbase-10070/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java
    hbase/branches/hbase-10070/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriter.java
    hbase/branches/hbase-10070/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriterBase.java

Modified: hbase/branches/hbase-10070/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-10070/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java?rev=1585765&r1=1585764&r2=1585765&view=diff
==============================================================================
--- hbase/branches/hbase-10070/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java (original)
+++ hbase/branches/hbase-10070/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java Tue Apr  8 15:45:09 2014
@@ -561,9 +561,7 @@ public class HRegionInfo implements Comp
         break;
       }
     }
-    if (offset == -1) {
-      throw new IOException("Invalid regionName format: " + Bytes.toStringBinary(regionName));
-    }
+    if(offset == -1) throw new IOException("Invalid regionName format");
     byte[] tableName = new byte[offset];
     System.arraycopy(regionName, 0, tableName, 0, offset);
     offset = -1;
@@ -592,9 +590,7 @@ public class HRegionInfo implements Comp
         break;
       }
     }
-    if (offset == -1) {
-      throw new IOException("Invalid regionName format: " + Bytes.toStringBinary(regionName));
-    }
+    if(offset == -1) throw new IOException("Invalid regionName format");
     byte [] startKey = HConstants.EMPTY_BYTE_ARRAY;
     if(offset != tableName.length + 1) {
       startKey = new byte[offset - tableName.length - 1];

Modified: hbase/branches/hbase-10070/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLocations.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-10070/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLocations.java?rev=1585765&r1=1585764&r2=1585765&view=diff
==============================================================================
--- hbase/branches/hbase-10070/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLocations.java (original)
+++ hbase/branches/hbase-10070/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLocations.java Tue Apr  8 15:45:09 2014
@@ -21,6 +21,7 @@ package org.apache.hadoop.hbase;
 import java.util.Collection;
 
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.util.Bytes;
 
 /**
@@ -33,42 +34,27 @@ import org.apache.hadoop.hbase.util.Byte
 public class RegionLocations {
 
   private final int numNonNullElements;
-
-  // locations array contains the HRL objects for known region replicas indexes by the replicaId.
-  // elements can be null if the region replica is not known at all. A null value indicates
-  // that there is a region replica with the index as replicaId, but the location is not known
-  // in the cache.
   private final HRegionLocation[] locations; // replicaId -> HRegionLocation.
 
   /**
    * Constructs the region location list. The locations array should
    * contain all the locations for known replicas for the region, and should be
-   * sorted in replicaId ascending order, although it can contain nulls indicating replicaIds
-   * that the locations of which are not known.
+   * sorted in replicaId ascending order.
    * @param locations an array of HRegionLocations for the same region range
    */
   public RegionLocations(HRegionLocation... locations) {
     int numNonNullElements = 0;
     int maxReplicaId = -1;
-    int maxReplicaIdIndex = -1;
-    int index = 0;
     for (HRegionLocation loc : locations) {
       if (loc != null) {
-        if (loc.getServerName() != null) {
-          numNonNullElements++;
-        }
-        if (loc.getRegionInfo().getReplicaId() >= maxReplicaId) {
+        numNonNullElements++;
+        if (loc.getRegionInfo().getReplicaId() > maxReplicaId) {
           maxReplicaId = loc.getRegionInfo().getReplicaId();
-          maxReplicaIdIndex = index;
         }
       }
-      index++;
     }
     this.numNonNullElements = numNonNullElements;
 
-    // account for the null elements in the array after maxReplicaIdIndex
-    maxReplicaId = maxReplicaId + (locations.length - (maxReplicaIdIndex + 1) );
-
     if (maxReplicaId + 1 == locations.length) {
       this.locations = locations;
     } else {
@@ -111,10 +97,10 @@ public class RegionLocations {
   }
 
   /**
-   * Returns a new RegionLocations with the locations removed (set to null)
+   * Returns a new HRegionLocationList with the locations removed (set to null)
    * which have the destination server as given.
    * @param serverName the serverName to remove locations of
-   * @return an RegionLocations object with removed locations or the same object
+   * @return an HRegionLocationList object with removed locations or the same object
    * if nothing is removed
    */
   public RegionLocations removeByServer(ServerName serverName) {
@@ -137,58 +123,36 @@ public class RegionLocations {
   /**
    * Removes the given location from the list
    * @param location the location to remove
-   * @return an RegionLocations object with removed locations or the same object
+   * @return an HRegionLocationList object with removed locations or the same object
    * if nothing is removed
    */
   public RegionLocations remove(HRegionLocation location) {
-    if (location == null) return this;
-    if (location.getRegionInfo() == null) return this;
-    int replicaId = location.getRegionInfo().getReplicaId();
-    if (replicaId >= locations.length) return this;
-
-    // check whether something to remove. HRL.compareTo() compares ONLY the
-    // serverName. We want to compare the HRI's as well.
-    if (locations[replicaId] == null
-        || !location.getRegionInfo().equals(locations[replicaId].getRegionInfo())
-        || !location.equals(locations[replicaId])) {
-      return this;
-    }
-
-    HRegionLocation[] newLocations = new HRegionLocation[locations.length];
-    System.arraycopy(locations, 0, newLocations, 0, locations.length);
-    newLocations[replicaId] = null;
-
-    return new RegionLocations(newLocations);
-  }
-
-  /**
-   * Removes location of the given replicaId from the list
-   * @param replicaId the replicaId of the location to remove
-   * @return an RegionLocations object with removed locations or the same object
-   * if nothing is removed
-   */
-  public RegionLocations remove(int replicaId) {
-    if (getRegionLocation(replicaId) == null) {
-      return this;
-    }
-
-    HRegionLocation[] newLocations = new HRegionLocation[locations.length];
-
-    System.arraycopy(locations, 0, newLocations, 0, locations.length);
-    if (replicaId < newLocations.length) {
-      newLocations[replicaId] = null;
+    HRegionLocation[] newLocations = null;
+    for (int i = 0; i < locations.length; i++) {
+      // check whether something to remove. HRL.compareTo() compares ONLY the
+      // serverName. We want to compare the HRI's as well.
+      if (locations[i] != null
+          && location.getRegionInfo().equals(locations[i].getRegionInfo())
+          && location.equals(locations[i])) {
+        if (newLocations == null) { //first time
+          newLocations = new HRegionLocation[locations.length];
+          System.arraycopy(locations, 0, newLocations, 0, i);
+        }
+        newLocations[i] = null;
+      } else if (newLocations != null) {
+        newLocations[i] = locations[i];
+      }
     }
-
-    return new RegionLocations(newLocations);
+    return newLocations == null ? this : new RegionLocations(newLocations);
   }
 
   /**
-   * Merges this RegionLocations list with the given list assuming
+   * Merges this HRegionLocation list with the given list assuming
    * same range, and keeping the most up to date version of the
    * HRegionLocation entries from either list according to seqNum. If seqNums
    * are equal, the location from the argument (other) is taken.
    * @param other the locations to merge with
-   * @return an RegionLocations object with merged locations or the same object
+   * @return an HRegionLocationList object with merged locations or the same object
    * if nothing is merged
    */
   public RegionLocations mergeLocations(RegionLocations other) {
@@ -196,9 +160,7 @@ public class RegionLocations {
 
     HRegionLocation[] newLocations = null;
 
-    // Use the length from other, since it is coming from meta. Otherwise,
-    // in case of region replication going down, we might have a leak here.
-    int max = other.locations.length;
+    int max = Math.max(this.locations.length, other.locations.length);
 
     for (int i = 0; i < max; i++) {
       HRegionLocation thisLoc = this.getRegionLocation(i);
@@ -245,7 +207,7 @@ public class RegionLocations {
    * @param checkForEquals whether to update the location if seqNums for the
    * HRegionLocations for the old and new location are the same
    * @param force whether to force update
-   * @return an RegionLocations object with updated locations or the same object
+   * @return an HRegionLocationList object with updated locations or the same object
    * if nothing is updated
    */
   public RegionLocations updateLocation(HRegionLocation location,
@@ -320,10 +282,12 @@ public class RegionLocations {
   public String toString() {
     StringBuilder builder = new StringBuilder("[");
     for (HRegionLocation loc : locations) {
-      if (builder.length() > 1) {
-        builder.append(", ");
+      if (loc != null) {
+        if (builder.length() > 1) {
+          builder.append(", ");
+        }
+        builder.append(loc);
       }
-      builder.append(loc == null ? "null" : loc);
     }
     builder.append("]");
     return builder.toString();

Modified: hbase/branches/hbase-10070/hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-10070/hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java?rev=1585765&r1=1585764&r2=1585765&view=diff
==============================================================================
--- hbase/branches/hbase-10070/hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java (original)
+++ hbase/branches/hbase-10070/hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java Tue Apr  8 15:45:09 2014
@@ -237,7 +237,7 @@ public class MetaReader {
       parsedInfo = parseRegionInfoFromRegionName(regionName);
       row = getMetaKeyForRegion(parsedInfo);
     } catch (Exception parseEx) {
-      // Ignore. This is used with tableName passed as regionName.
+      LOG.warn("Received parse exception:" + parseEx);
     }
     Get get = new Get(row);
     get.addFamily(HConstants.CATALOG_FAMILY);

Modified: hbase/branches/hbase-10070/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-10070/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java?rev=1585765&r1=1585764&r2=1585765&view=diff
==============================================================================
--- hbase/branches/hbase-10070/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java (original)
+++ hbase/branches/hbase-10070/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java Tue Apr  8 15:45:09 2014
@@ -38,7 +38,6 @@ import org.apache.hadoop.hbase.protobuf.
 interface ClusterConnection extends HConnection {
 
   /** @return - true if the master server is running */
-  @Override
   boolean isMasterRunning()
       throws MasterNotRunningException, ZooKeeperConnectionException;
 
@@ -54,10 +53,9 @@ interface ClusterConnection extends HCon
    * @throws IOException
    *           if a remote or network exception occurs
    */
-  @Override
   boolean isTableAvailable(TableName tableName, byte[][] splitKeys) throws
       IOException;
-
+  
   /**
    * Find the location of the region of <i>tableName</i> that <i>row</i>
    * lives in.
@@ -67,14 +65,12 @@ interface ClusterConnection extends HCon
    * question
    * @throws IOException if a remote or network exception occurs
    */
-  @Override
   public HRegionLocation locateRegion(final TableName tableName,
       final byte [] row) throws IOException;
 
   /**
    * Allows flushing the region cache.
    */
-  @Override
   void clearRegionCache();
 
   /**
@@ -83,14 +79,12 @@ interface ClusterConnection extends HCon
    * @param tableName Name of the table whose regions we are to remove from
    * cache.
    */
-  @Override
   void clearRegionCache(final TableName tableName);
 
   /**
    * Deletes cached locations for the specific region.
    * @param location The location object for the region, to be purged from cache.
    */
-  @Override
   void deleteCachedRegionLocation(final HRegionLocation location);
 
   /**
@@ -102,24 +96,10 @@ interface ClusterConnection extends HCon
    * question
    * @throws IOException if a remote or network exception occurs
    */
-  @Override
   HRegionLocation relocateRegion(final TableName tableName,
       final byte [] row) throws IOException;
 
   /**
-   * Find the location of the region of <i>tableName</i> that <i>row</i>
-   * lives in, ignoring any value that might be in the cache.
-   * @param tableName name of the table <i>row</i> is in
-   * @param row row key you're trying to find the region of
-   * @param replicaId the replicaId of the region
-   * @return HRegionLocation that describes where to find the region in
-   * question
-   * @throws IOException if a remote or network exception occurs
-   */
-  HRegionLocation relocateRegion(final TableName tableName,
-      final byte [] row, int replicaId) throws IOException;
-
-  /**
    * Update the location cache. This is used internally by HBase, in most cases it should not be
    *  used by the client application.
    * @param tableName the table name
@@ -139,7 +119,6 @@ interface ClusterConnection extends HCon
    * question
    * @throws IOException if a remote or network exception occurs
    */
-  @Override
   HRegionLocation locateRegion(final byte[] regionName)
   throws IOException;
 
@@ -149,7 +128,6 @@ interface ClusterConnection extends HCon
    * @return list of region locations for all regions of table
    * @throws IOException
    */
-  @Override
   List<HRegionLocation> locateRegions(final TableName tableName) throws IOException;
 
   /**
@@ -161,7 +139,6 @@ interface ClusterConnection extends HCon
    * @return list of region locations for all regions of table
    * @throws IOException
    */
-  @Override
   List<HRegionLocation> locateRegions(final TableName tableName,
       final boolean useCache,
       final boolean offlined) throws IOException;
@@ -177,24 +154,9 @@ interface ClusterConnection extends HCon
    */
   RegionLocations locateRegion(TableName tableName,
                                byte[] row, boolean useCache, boolean retry) throws IOException;
-
-  /**
-  *
-  * @param tableName table to get regions of
-  * @param row the row
-  * @param useCache Should we use the cache to retrieve the region information.
-  * @param retry do we retry
-  * @param replicaId the replicaId for the region
-  * @return region locations for this row.
-  * @throws IOException
-  */
- RegionLocations locateRegion(TableName tableName,
-                              byte[] row, boolean useCache, boolean retry, int replicaId) throws IOException;
-
   /**
    * Returns a {@link MasterKeepAliveConnection} to the active master
    */
-  @Override
   MasterService.BlockingInterface getMaster() throws IOException;
 
 
@@ -204,7 +166,6 @@ interface ClusterConnection extends HCon
    * @return proxy for HRegionServer
    * @throws IOException if a remote or network exception occurs
    */
-  @Override
   AdminService.BlockingInterface getAdmin(final ServerName serverName) throws IOException;
 
   /**
@@ -216,7 +177,6 @@ interface ClusterConnection extends HCon
    * @throws IOException if a remote or network exception occurs
    *
    */
-  @Override
   ClientService.BlockingInterface getClient(final ServerName serverName) throws IOException;
 
   /**
@@ -227,7 +187,6 @@ interface ClusterConnection extends HCon
    * @return Location of row.
    * @throws IOException if a remote or network exception occurs
    */
-  @Override
   HRegionLocation getRegionLocation(TableName tableName, byte [] row,
     boolean reload)
   throws IOException;
@@ -236,7 +195,6 @@ interface ClusterConnection extends HCon
    * Clear any caches that pertain to server name <code>sn</code>.
    * @param sn A server name
    */
-  @Override
   void clearCaches(final ServerName sn);
 
   /**
@@ -245,7 +203,6 @@ interface ClusterConnection extends HCon
    * @return The shared instance. Never returns null.
    * @throws MasterNotRunningException
    */
-  @Override
   @Deprecated
   MasterKeepAliveConnection getKeepAliveMasterService()
   throws MasterNotRunningException;
@@ -254,14 +211,12 @@ interface ClusterConnection extends HCon
    * @param serverName
    * @return true if the server is known as dead, false otherwise.
    * @deprecated internal method, do not use thru HConnection */
-  @Override
   @Deprecated
   boolean isDeadServer(ServerName serverName);
 
   /**
    * @return Nonce generator for this HConnection; may be null if disabled in configuration.
    */
-  @Override
   public NonceGenerator getNonceGenerator();
 
   /**
@@ -273,5 +228,4 @@ interface ClusterConnection extends HCon
    * @return All locations for a particular region.
    */
   RegionLocations locateRegionAll(TableName tableName, byte[] row) throws IOException;
-
 }
\ No newline at end of file

Modified: hbase/branches/hbase-10070/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-10070/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java?rev=1585765&r1=1585764&r2=1585765&view=diff
==============================================================================
--- hbase/branches/hbase-10070/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java (original)
+++ hbase/branches/hbase-10070/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java Tue Apr  8 15:45:09 2014
@@ -35,6 +35,9 @@ import java.util.NavigableMap;
 import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.ConcurrentSkipListMap;
+import java.util.concurrent.ConcurrentSkipListSet;
+import java.util.concurrent.CopyOnWriteArraySet;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.ThreadPoolExecutor;
@@ -45,6 +48,7 @@ import java.util.concurrent.atomic.Atomi
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Chore;
 import org.apache.hadoop.hbase.HBaseConfiguration;
@@ -75,87 +79,7 @@ import org.apache.hadoop.hbase.protobuf.
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteColumnRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteColumnResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.OfflineRegionRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.OfflineRegionResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.*;
 import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.security.UserProvider;
@@ -430,7 +354,6 @@ class ConnectionManager {
    * @param conf configuration whose identity is used to find {@link HConnection} instance.
    * @deprecated
    */
-  @Deprecated
   public static void deleteConnection(Configuration conf) {
     deleteConnection(new HConnectionKey(conf), false);
   }
@@ -442,7 +365,6 @@ class ConnectionManager {
    * @param connection
    * @deprecated
    */
-  @Deprecated
   public static void deleteStaleConnection(HConnection connection) {
     deleteConnection(connection, true);
   }
@@ -453,7 +375,6 @@ class ConnectionManager {
    *  staleConnection to true.
    * @deprecated
    */
-  @Deprecated
   public static void deleteAllConnections(boolean staleConnection) {
     synchronized (CONNECTION_INSTANCES) {
       Set<HConnectionKey> connectionKeys = new HashSet<HConnectionKey>();
@@ -1082,12 +1003,6 @@ class ConnectionManager {
     @Override
     public HRegionLocation relocateRegion(final TableName tableName,
         final byte [] row) throws IOException{
-      return relocateRegion(tableName, row, RegionReplicaUtil.DEFAULT_REPLICA_ID);
-    }
-
-    @Override
-    public HRegionLocation relocateRegion(final TableName tableName,
-        final byte [] row, int replicaId) throws IOException{
       // Since this is an explicit request not to use any caching, finding
       // disabled tables should not be desirable.  This will ensure that an exception is thrown when
       // the first time a disabled table is interacted with.
@@ -1095,8 +1010,8 @@ class ConnectionManager {
         throw new TableNotEnabledException(tableName.getNameAsString() + " is disabled.");
       }
 
-      RegionLocations locations = locateRegion(tableName, row, false, true, replicaId);
-      return locations == null ? null : locations.getRegionLocation(replicaId);
+      RegionLocations locations = locateRegion(tableName, row, false, true);
+      return locations == null ? null : locations.getRegionLocation();
     }
 
     @Override
@@ -1105,16 +1020,10 @@ class ConnectionManager {
       return relocateRegion(TableName.valueOf(tableName), row);
     }
 
-    @Override
-    public RegionLocations locateRegion(final TableName tableName,
-      final byte [] row, boolean useCache, boolean retry)
-    throws IOException {
-      return locateRegion(tableName, row, useCache, retry, RegionReplicaUtil.DEFAULT_REPLICA_ID);
-    }
 
     @Override
     public RegionLocations locateRegion(final TableName tableName,
-      final byte [] row, boolean useCache, boolean retry, int replicaId)
+      final byte [] row, boolean useCache, boolean retry)
     throws IOException {
       if (this.closed) throw new IOException(toString() + " closed");
       if (tableName== null || tableName.getName().length == 0) {
@@ -1127,7 +1036,7 @@ class ConnectionManager {
       } else {
         // Region not in the cache - have to go to the meta RS
         return locateRegionInMeta(TableName.META_TABLE_NAME, tableName, row,
-          useCache, userRegionLock, retry, replicaId);
+          useCache, userRegionLock, retry);
       }
     }
 
@@ -1191,15 +1100,15 @@ class ConnectionManager {
       */
     private RegionLocations locateRegionInMeta(final TableName parentTable,
       final TableName tableName, final byte [] row, boolean useCache,
-      Object regionLockObject, boolean retry, int replicaId)
+      Object regionLockObject, boolean retry)
     throws IOException {
-      RegionLocations locations;
+      RegionLocations location;
       // If we are supposed to be using the cache, look in the cache to see if
       // we already have the region.
       if (useCache) {
-        locations = getCachedLocation(tableName, row);
-        if (locations != null && locations.getRegionLocation(replicaId) != null) {
-          return locations;
+        location = getCachedLocation(tableName, row);
+        if (location != null) {
+          return location;
         }
       }
       int localNumRetries = retry ? numTries : 1;
@@ -1218,7 +1127,7 @@ class ConnectionManager {
         try {
           // locate the meta region
           RegionLocations metaLocations = locateRegion(parentTable, metaKey, true, false);
-          metaLocation = metaLocations == null ? null : metaLocations.getDefaultRegionLocation();
+          metaLocation = metaLocations == null ? null : metaLocations.getRegionLocation();
           // If null still, go around again.
           if (metaLocation == null) continue;
           ClientService.BlockingInterface service = getClient(metaLocation.getServerName());
@@ -1233,23 +1142,23 @@ class ConnectionManager {
               synchronized (regionLockObject) {
                 // Check the cache again for a hit in case some other thread made the
                 // same query while we were waiting on the lock.
-                locations = getCachedLocation(tableName, row);
-                if (locations != null && locations.getRegionLocation(replicaId) != null) {
-                  return locations;
+                location = getCachedLocation(tableName, row);
+                if (location != null) {
+                  return location;
                 }
                 // If the parent table is META, we may want to pre-fetch some
                 // region info into the global region cache for this table.
                 prefetchRegionCache(tableName, row);
               }
             }
-            locations = getCachedLocation(tableName, row);
-            if (locations != null && locations.getRegionLocation(replicaId) != null) {
-              return locations;
+            location = getCachedLocation(tableName, row);
+            if (location != null) {
+              return location;
             }
           } else {
             // If we are not supposed to be using the cache, delete any existing cached location
             // so it won't interfere.
-            metaCache.clearCache(tableName, row, replicaId);
+            metaCache.clearCache(tableName, row);
           }
 
           // Query the meta region for the location of the meta region
@@ -1262,12 +1171,12 @@ class ConnectionManager {
           }
 
           // convert the row result into the HRegionLocation we need!
-          locations = MetaReader.getRegionLocations(regionInfoRow);
-          if (locations == null || locations.getRegionLocation(replicaId) == null) {
+          location = MetaReader.getRegionLocations(regionInfoRow);
+          if (location == null || location.getRegionLocation() == null) {
             throw new IOException("HRegionInfo was null in " +
               parentTable + ", row=" + regionInfoRow);
           }
-          HRegionInfo regionInfo = locations.getRegionLocation(replicaId).getRegionInfo();
+          HRegionInfo regionInfo = location.getRegionLocation().getRegionInfo();
           if (regionInfo == null) {
             throw new IOException("HRegionInfo was null or empty in " +
               parentTable + ", row=" + regionInfoRow);
@@ -1291,7 +1200,7 @@ class ConnectionManager {
               regionInfo.getRegionNameAsString());
           }
 
-          ServerName serverName = locations.getRegionLocation(replicaId).getServerName();
+          ServerName serverName = location.getRegionLocation().getServerName();
           if (serverName == null) {
             throw new NoServerForRegionException("No server address listed " +
               "in " + parentTable + " for region " +
@@ -1305,8 +1214,8 @@ class ConnectionManager {
                 ", but it is dead.");
           }
 
-          cacheLocation(tableName, locations);
-          return locations;
+          cacheLocation(tableName, location);
+          return location;
         } catch (TableNotFoundException e) {
           // if we got this error, probably means the table just plain doesn't
           // exist. rethrow the error immediately. this should always be coming
@@ -1333,7 +1242,7 @@ class ConnectionManager {
           // Only relocate the parent region if necessary
           if(!(e instanceof RegionOfflineException ||
               e instanceof NoServerForRegionException)) {
-            relocateRegion(parentTable, metaKey, replicaId);
+            relocateRegion(parentTable, metaKey);
           }
         }
         try{

Modified: hbase/branches/hbase-10070/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaCache.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-10070/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaCache.java?rev=1585765&r1=1585764&r2=1585765&view=diff
==============================================================================
--- hbase/branches/hbase-10070/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaCache.java (original)
+++ hbase/branches/hbase-10070/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaCache.java Tue Apr  8 15:45:09 2014
@@ -114,9 +114,6 @@ public class MetaCache {
     RegionLocations oldLocations = tableLocations.putIfAbsent(startKey, locations);
     boolean isNewCacheEntry = (oldLocations == null);
     if (isNewCacheEntry) {
-      if (LOG.isTraceEnabled()) {
-        LOG.trace("Cached location: " + location);
-      }
       addToCachedServers(locations);
       return;
     }
@@ -134,10 +131,7 @@ public class MetaCache {
     // an additional counter on top of seqNum would be necessary to handle them all.
     RegionLocations updatedLocations = oldLocations.updateLocation(location, false, force);
     if (oldLocations != updatedLocations) {
-      boolean replaced = tableLocations.replace(startKey, oldLocations, updatedLocations);
-      if (replaced && LOG.isTraceEnabled()) {
-        LOG.trace("Changed cached location to: " + location);
-      }
+      tableLocations.replace(startKey, oldLocations, updatedLocations);
       addToCachedServers(updatedLocations);
     }
   }
@@ -145,30 +139,24 @@ public class MetaCache {
   /**
    * Put a newly discovered HRegionLocation into the cache.
    * @param tableName The table name.
-   * @param locations the new locations
+   * @param location the new location
    */
-  public void cacheLocation(final TableName tableName, final RegionLocations locations) {
-    byte [] startKey = locations.getRegionLocation().getRegionInfo().getStartKey();
+  public void cacheLocation(final TableName tableName, final RegionLocations location) {
+    byte [] startKey = location.getRegionLocation().getRegionInfo().getStartKey();
     ConcurrentMap<byte[], RegionLocations> tableLocations = getTableLocations(tableName);
-    RegionLocations oldLocation = tableLocations.putIfAbsent(startKey, locations);
+    RegionLocations oldLocation = tableLocations.putIfAbsent(startKey, location);
     boolean isNewCacheEntry = (oldLocation == null);
     if (isNewCacheEntry) {
-      if (LOG.isTraceEnabled()) {
-        LOG.trace("Cached location: " + locations);
-      }
-      addToCachedServers(locations);
+      addToCachedServers(location);
       return;
     }
 
     // merge old and new locations and add it to the cache
     // Meta record might be stale - some (probably the same) server has closed the region
     // with later seqNum and told us about the new location.
-    RegionLocations mergedLocation = oldLocation.mergeLocations(locations);
-    boolean replaced = tableLocations.replace(startKey, oldLocation, mergedLocation);
-    if (replaced && LOG.isTraceEnabled()) {
-      LOG.trace("Merged cached locations: " + mergedLocation);
-    }
-    addToCachedServers(locations);
+    RegionLocations mergedLocation = oldLocation.mergeLocations(location);
+    tableLocations.replace(startKey, oldLocation, mergedLocation);
+    addToCachedServers(location);
   }
 
   private void addToCachedServers(RegionLocations locations) {
@@ -257,11 +245,12 @@ public class MetaCache {
           RegionLocations regionLocations = e.getValue();
           if (regionLocations != null) {
             RegionLocations updatedLocations = regionLocations.removeByServer(serverName);
+            deletedSomething |= regionLocations == updatedLocations;
             if (updatedLocations != regionLocations) {
               if (updatedLocations.isEmpty()) {
-                deletedSomething |= tableLocations.remove(e.getKey(), regionLocations);
+                tableLocations.remove(e.getKey(), regionLocations);
               } else {
-                deletedSomething |= tableLocations.replace(e.getKey(), regionLocations, updatedLocations);
+                tableLocations.replace(e.getKey(), regionLocations, updatedLocations);
               }
             }
           }
@@ -269,8 +258,8 @@ public class MetaCache {
       }
       this.cachedServers.remove(serverName);
     }
-    if (deletedSomething && LOG.isTraceEnabled()) {
-      LOG.trace("Removed all cached region locations that map to " + serverName);
+    if (deletedSomething && LOG.isDebugEnabled()) {
+      LOG.debug("Removed all cached region locations that map to " + serverName);
     }
   }
 
@@ -278,9 +267,6 @@ public class MetaCache {
    * Delete all cached entries of a table.
    */
   public void clearCache(final TableName tableName) {
-    if (LOG.isTraceEnabled()) {
-      LOG.trace("Removed all cached region locations for table " + tableName);
-    }
     this.cachedRegionLocations.remove(tableName);
   }
 
@@ -289,34 +275,6 @@ public class MetaCache {
    * @param tableName tableName
    * @param row
    */
-  public void clearCache(final TableName tableName, final byte [] row, int replicaId) {
-    ConcurrentMap<byte[], RegionLocations> tableLocations = getTableLocations(tableName);
-
-    boolean removed = false;
-    RegionLocations regionLocations = getCachedLocation(tableName, row);
-    if (regionLocations != null) {
-      HRegionLocation toBeRemoved = regionLocations.getRegionLocation(replicaId);
-      RegionLocations updatedLocations = regionLocations.remove(replicaId);
-      if (updatedLocations != regionLocations) {
-        byte[] startKey = regionLocations.getRegionLocation().getRegionInfo().getStartKey();
-        if (updatedLocations.isEmpty()) {
-          removed = tableLocations.remove(startKey, regionLocations);
-        } else {
-          removed = tableLocations.replace(startKey, regionLocations, updatedLocations);
-        }
-      }
-
-      if (removed && LOG.isTraceEnabled() && toBeRemoved != null) {
-        LOG.trace("Removed " + toBeRemoved + " from cache");
-      }
-    }
-  }
-
-  /**
-   * Delete a cached location, no matter what it is. Called when we were told to not use cache.
-   * @param tableName tableName
-   * @param row
-   */
   public void clearCache(final TableName tableName, final byte [] row) {
     ConcurrentMap<byte[], RegionLocations> tableLocations = getTableLocations(tableName);
 
@@ -324,8 +282,8 @@ public class MetaCache {
     if (regionLocations != null) {
       byte[] startKey = regionLocations.getRegionLocation().getRegionInfo().getStartKey();
       boolean removed = tableLocations.remove(startKey, regionLocations);
-      if (removed && LOG.isTraceEnabled()) {
-        LOG.trace("Removed " + regionLocations + " from cache");
+      if (removed && LOG.isDebugEnabled()) {
+        LOG.debug("Removed " + regionLocations + " from cache");
       }
     }
   }
@@ -341,15 +299,10 @@ public class MetaCache {
       RegionLocations updatedLocations = regionLocations.removeByServer(serverName);
       if (updatedLocations != regionLocations) {
         byte[] startKey = regionLocations.getRegionLocation().getRegionInfo().getStartKey();
-        boolean removed = false;
         if (updatedLocations.isEmpty()) {
-          removed = tableLocations.remove(startKey, regionLocations);
+          tableLocations.remove(startKey, regionLocations);
         } else {
-          removed = tableLocations.replace(startKey, regionLocations, updatedLocations);
-        }
-        if (removed && LOG.isTraceEnabled()) {
-          LOG.trace("Removed locations of table: " + tableName + " ,row: " + Bytes.toString(row)
-            + " mapping to server: " + serverName + " from cache");
+          tableLocations.replace(startKey, regionLocations, updatedLocations);
         }
       }
     }
@@ -364,17 +317,12 @@ public class MetaCache {
     RegionLocations regionLocations = tableLocations.get(hri.getStartKey());
     if (regionLocations != null) {
       HRegionLocation oldLocation = regionLocations.getRegionLocation(hri.getReplicaId());
-      if (oldLocation == null) return;
       RegionLocations updatedLocations = regionLocations.remove(oldLocation);
-      boolean removed = false;
       if (updatedLocations != regionLocations) {
         if (updatedLocations.isEmpty()) {
-          removed = tableLocations.remove(hri.getStartKey(), regionLocations);
+          tableLocations.remove(hri.getStartKey(), regionLocations);
         } else {
-          removed = tableLocations.replace(hri.getStartKey(), regionLocations, updatedLocations);
-        }
-        if (removed && LOG.isTraceEnabled()) {
-          LOG.trace("Removed " + oldLocation + " from cache");
+          tableLocations.replace(hri.getStartKey(), regionLocations, updatedLocations);
         }
       }
     }
@@ -384,22 +332,22 @@ public class MetaCache {
     if (location == null) {
       return;
     }
+
     TableName tableName = location.getRegionInfo().getTable();
     ConcurrentMap<byte[], RegionLocations> tableLocations = getTableLocations(tableName);
-    RegionLocations regionLocations = tableLocations.get(location.getRegionInfo().getStartKey());
-    if (regionLocations != null) {
-      RegionLocations updatedLocations = regionLocations.remove(location);
-      boolean removed = false;
-      if (updatedLocations != regionLocations) {
-        if (updatedLocations.isEmpty()) {
-          removed = tableLocations.remove(location.getRegionInfo().getStartKey(), regionLocations);
-        } else {
-          removed = tableLocations.replace(location.getRegionInfo().getStartKey(), regionLocations, updatedLocations);
-        }
-        if (removed && LOG.isTraceEnabled()) {
-          LOG.trace("Removed " + location + " from cache");
-        }
-      }
+    RegionLocations rll = tableLocations.get(location.getRegionInfo().getStartKey());
+    if (rll == null) {
+      return;
+    }
+    RegionLocations updatedLocations = rll.remove(location);
+    if (updatedLocations.isEmpty()) {
+      tableLocations.remove(location.getRegionInfo().getStartKey(), rll);
+    }
+    if (LOG.isDebugEnabled() && (rll == updatedLocations)) {
+      LOG.debug("Removed " +
+          location.getRegionInfo().getRegionNameAsString() +
+          " for tableName=" + tableName +
+          " from cache");
     }
   }
 

Modified: hbase/branches/hbase-10070/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-10070/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java?rev=1585765&r1=1585764&r2=1585765&view=diff
==============================================================================
--- hbase/branches/hbase-10070/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java (original)
+++ hbase/branches/hbase-10070/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java Tue Apr  8 15:45:09 2014
@@ -21,18 +21,6 @@
 package org.apache.hadoop.hbase.client;
 
 
-import java.io.IOException;
-import java.io.InterruptedIOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.concurrent.Callable;
-import java.util.concurrent.CancellationException;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -46,6 +34,17 @@ import org.apache.hadoop.hbase.protobuf.
 import org.apache.hadoop.hbase.util.BoundedCompletionService;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 
+import java.io.IOException;
+import java.io.InterruptedIOException;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.Callable;
+import java.util.concurrent.CancellationException;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+
 /**
  * Caller that goes to replica if the primary region does no answer within a configurable
  * timeout. If the timeout is reached, it calls all the secondary replicas, and returns
@@ -105,11 +104,11 @@ public class RpcRetryingCallerWithReadRe
       }
 
       if (reload || location == null) {
-        RegionLocations rl = getRegionLocations(false, id);
+        RegionLocations rl = getRegionLocations(false);
         location = id < rl.size() ? rl.getRegionLocation(id) : null;
       }
 
-      if (location == null || location.getServerName() == null) {
+      if (location == null) {
         // With this exception, there will be a retry. The location can be null for a replica
         //  when the table is created or after a split.
         throw new HBaseIOException("There is no location for replica id #" + id);
@@ -171,61 +170,30 @@ public class RpcRetryingCallerWithReadRe
    */
   public synchronized Result call()
       throws DoNotRetryIOException, InterruptedIOException, RetriesExhaustedException {
-    RegionLocations rl = getRegionLocations(true, RegionReplicaUtil.DEFAULT_REPLICA_ID);
+    RegionLocations rl = getRegionLocations(true);
     BoundedCompletionService<Result> cs = new BoundedCompletionService<Result>(pool, rl.size());
 
-    List<ExecutionException> exceptions = null;
-    int submitted = 0, completed = 0;
-    // submit call for the primary replica.
-    submitted += addCallsForReplica(cs, rl, 0, 0);
+    addCallsForReplica(cs, rl, 0, 0); // primary.
+
     try {
-      // wait for the timeout to see whether the primary responds back
       Future<Result> f = cs.poll(timeBeforeReplicas, TimeUnit.MICROSECONDS); // Yes, microseconds
-      if (f != null) {
-        return f.get(); //great we got a response
+      if (f == null) {
+        addCallsForReplica(cs, rl, 1, rl.size() - 1);  // secondaries
+        f = cs.take();
       }
+      return f.get();
     } catch (ExecutionException e) {
-      // the primary call failed with RetriesExhaustedException or DoNotRetryIOException
-      // but the secondaries might still succeed. Continue on the replica RPCs.
-      exceptions = new ArrayList<ExecutionException>(rl.size());
-      exceptions.add(e);
-      completed++;
-    } catch (CancellationException e) {
-      throw new InterruptedIOException();
-    } catch (InterruptedException e) {
-      throw new InterruptedIOException();
-    }
-
-    // submit call for the all of the secondaries at once
-    // TODO: this may be an overkill for large region replication
-    submitted += addCallsForReplica(cs, rl, 1, rl.size() - 1);
-    try {
-      while (completed < submitted) {
-        try {
-          Future<Result> f = cs.take();
-          return f.get(); // great we got an answer
-        } catch (ExecutionException e) {
-          // if not cancel or interrupt, wait until all RPC's are done
-          // one of the tasks failed. Save the exception for later.
-          if (exceptions == null) exceptions = new ArrayList<ExecutionException>(rl.size());
-          exceptions.add(e);
-          completed++;
-        }
-      }
+      throwEnrichedException(e);
+      return null; // unreachable
     } catch (CancellationException e) {
       throw new InterruptedIOException();
     } catch (InterruptedException e) {
       throw new InterruptedIOException();
     } finally {
       // We get there because we were interrupted or because one or more of the
-      // calls succeeded or failed. In all case, we stop all our tasks.
+      //  calls succeeded or failed. In all case, we stop all our tasks.
       cs.cancelAll(true);
     }
-
-    if (exceptions != null && !exceptions.isEmpty()) {
-      throwEnrichedException(exceptions.get(0)); // just rethrow the first exception for now.
-    }
-    return null; // unreachable
   }
 
   /**
@@ -262,9 +230,8 @@ public class RpcRetryingCallerWithReadRe
    * @param rl         - the region locations
    * @param min        - the id of the first replica, inclusive
    * @param max        - the id of the last replica, inclusive.
-   * @return the number of submitted calls
    */
-  private int addCallsForReplica(BoundedCompletionService<Result> cs,
+  private void addCallsForReplica(BoundedCompletionService<Result> cs,
                                   RegionLocations rl, int min, int max) {
     for (int id = min; id <= max; id++) {
       HRegionLocation hrl = rl.getRegionLocation(id);
@@ -272,22 +239,21 @@ public class RpcRetryingCallerWithReadRe
       RetryingRPC retryingOnReplica = new RetryingRPC(callOnReplica);
       cs.submit(retryingOnReplica);
     }
-    return max - min + 1;
   }
 
-  private RegionLocations getRegionLocations(boolean useCache, int replicaId)
-      throws RetriesExhaustedException, DoNotRetryIOException, InterruptedIOException {
+  private RegionLocations getRegionLocations(boolean useCache)
+      throws RetriesExhaustedException, DoNotRetryIOException {
     RegionLocations rl;
     try {
-      rl = cConnection.locateRegion(tableName, get.getRow(), useCache, true, replicaId);
-    } catch (DoNotRetryIOException e) {
-      throw e;
-    } catch (RetriesExhaustedException e) {
-      throw e;
-    } catch (InterruptedIOException e) {
-      throw e;
+      rl = cConnection.locateRegion(tableName, get.getRow(), useCache, true);
     } catch (IOException e) {
-      throw new RetriesExhaustedException("Can't get the location", e);
+      if (e instanceof DoNotRetryIOException) {
+        throw (DoNotRetryIOException) e;
+      } else if (e instanceof RetriesExhaustedException) {
+        throw (RetriesExhaustedException) e;
+      } else {
+        throw new RetriesExhaustedException("Can't get the location", e);
+      }
     }
     if (rl == null) {
       throw new RetriesExhaustedException("Can't get the locations");
@@ -295,4 +261,4 @@ public class RpcRetryingCallerWithReadRe
 
     return rl;
   }
-}
+}
\ No newline at end of file

Modified: hbase/branches/hbase-10070/hbase-client/src/test/java/org/apache/hadoop/hbase/TestRegionLocations.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-10070/hbase-client/src/test/java/org/apache/hadoop/hbase/TestRegionLocations.java?rev=1585765&r1=1585764&r2=1585765&view=diff
==============================================================================
--- hbase/branches/hbase-10070/hbase-client/src/test/java/org/apache/hadoop/hbase/TestRegionLocations.java (original)
+++ hbase/branches/hbase-10070/hbase-client/src/test/java/org/apache/hadoop/hbase/TestRegionLocations.java Tue Apr  8 15:45:09 2014
@@ -46,25 +46,25 @@ public class TestRegionLocations {
 
     list = hrll((HRegionLocation)null);
     assertTrue(list.isEmpty());
-    assertEquals(1, list.size());
+    assertEquals(0, list.size());
     assertEquals(0, list.numNonNullElements());
 
     HRegionInfo info0 = hri(0);
     list = hrll(hrl(info0, null));
-    assertTrue(list.isEmpty());
+    assertFalse(list.isEmpty());
     assertEquals(1, list.size());
-    assertEquals(0, list.numNonNullElements());
+    assertEquals(1, list.numNonNullElements());
 
     HRegionInfo info9 = hri(9);
     list = hrll(hrl(info9, null));
-    assertTrue(list.isEmpty());
+    assertFalse(list.isEmpty());
     assertEquals(10, list.size());
-    assertEquals(0, list.numNonNullElements());
+    assertEquals(1, list.numNonNullElements());
 
     list = hrll(hrl(info0, null), hrl(info9, null));
-    assertTrue(list.isEmpty());
+    assertFalse(list.isEmpty());
     assertEquals(10, list.size());
-    assertEquals(0, list.numNonNullElements());
+    assertEquals(2, list.numNonNullElements());
   }
 
   private HRegionInfo hri(int replicaId) {
@@ -100,7 +100,7 @@ public class TestRegionLocations {
     list = hrll(hrl(info0, sn0));
     assertTrue(list == list.removeByServer(sn1));
     list = list.removeByServer(sn0);
-    assertEquals(0, list.numNonNullElements());
+    assertTrue(list.isEmpty());
 
     // test remove from multi element list
     list = hrll(hrl(info0, sn0), hrl(info1, sn1), hrl(info2, sn2), hrl(info9, sn2));
@@ -226,7 +226,7 @@ public class TestRegionLocations {
     list1 = list2.mergeLocations(list1);
     assertEquals(sn0, list1.getRegionLocation(0).getServerName());
     assertEquals(sn1, list1.getRegionLocation(1).getServerName());
-    assertEquals(2, list1.size()); // the size is taken from the argument list to merge
+    assertEquals(sn2, list1.getRegionLocation(2).getServerName());
 
     // do the other way merge as well
     list1 = hrll(hrl(info0, sn0), hrl(info1, sn1));
@@ -240,9 +240,10 @@ public class TestRegionLocations {
     list1 = hrll(hrl(info0, sn0), hrl(info1, sn1));
     list2 = hrll(hrl(info0, sn2), hrl(info1, sn2), hrl(info9, sn3));
     list1 = list2.mergeLocations(list1); // list1 should override
-    assertEquals(2, list1.size());
+    assertEquals(10, list1.size());
     assertEquals(sn0, list1.getRegionLocation(0).getServerName());
     assertEquals(sn1, list1.getRegionLocation(1).getServerName());
+    assertEquals(sn3, list1.getRegionLocation(9).getServerName());
 
     // do the other way
     list1 = hrll(hrl(info0, sn0), hrl(info1, sn1));
@@ -271,35 +272,4 @@ public class TestRegionLocations {
     assertEquals(sn2, list1.getRegionLocation(1).getServerName());
     assertEquals(sn3, list1.getRegionLocation(9).getServerName());
   }
-
-  @Test
-  public void testConstructWithNullElements() {
-    // RegionLocations can contain null elements as well. These null elements can
-
-    RegionLocations list = new RegionLocations((HRegionLocation)null);
-    assertTrue(list.isEmpty());
-    assertEquals(1, list.size());
-    assertEquals(0, list.numNonNullElements());
-
-    list = new RegionLocations(null, hrl(info1, sn0));
-    assertFalse(list.isEmpty());
-    assertEquals(2, list.size());
-    assertEquals(1, list.numNonNullElements());
-
-    list = new RegionLocations(hrl(info0, sn0), null);
-    assertEquals(2, list.size());
-    assertEquals(1, list.numNonNullElements());
-
-    list = new RegionLocations(null, hrl(info2, sn0), null, hrl(info9, sn0));
-    assertEquals(10, list.size());
-    assertEquals(2, list.numNonNullElements());
-
-    list = new RegionLocations(null, hrl(info2, sn0), null, hrl(info9, sn0), null);
-    assertEquals(11, list.size());
-    assertEquals(2, list.numNonNullElements());
-
-    list = new RegionLocations(null, hrl(info2, sn0), null, hrl(info9, sn0), null, null);
-    assertEquals(12, list.size());
-    assertEquals(2, list.numNonNullElements());
-  }
 }

Modified: hbase/branches/hbase-10070/hbase-common/src/main/java/org/apache/hadoop/hbase/util/BoundedCompletionService.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-10070/hbase-common/src/main/java/org/apache/hadoop/hbase/util/BoundedCompletionService.java?rev=1585765&r1=1585764&r2=1585765&view=diff
==============================================================================
--- hbase/branches/hbase-10070/hbase-common/src/main/java/org/apache/hadoop/hbase/util/BoundedCompletionService.java (original)
+++ hbase/branches/hbase-10070/hbase-common/src/main/java/org/apache/hadoop/hbase/util/BoundedCompletionService.java Tue Apr  8 15:45:09 2014
@@ -34,12 +34,11 @@ import java.util.concurrent.TimeUnit;
  * A completion service, close to the one available in the JDK 1.7
  * However, this ones keeps the list of the future, and allows to cancel them all.
  * This means as well that it can be used for a small set of tasks only.
- * <br>Implementation is not Thread safe.
  */
 public class BoundedCompletionService<V> {
   private final Executor executor;
-  private final List<Future<V>> tasks; // alls the tasks
-  private final BlockingQueue<Future<V>> completed; // all the tasks that are completed
+  private final List<Future<V>> sent; // alls the call we sent
+  private final BlockingQueue<Future<V>> completed; // all the results we got so far.
 
   class QueueingFuture extends FutureTask<V> {
 
@@ -47,7 +46,6 @@ public class BoundedCompletionService<V>
       super(callable);
     }
 
-    @Override
     protected void done() {
       completed.add(QueueingFuture.this);
     }
@@ -55,7 +53,7 @@ public class BoundedCompletionService<V>
 
   public BoundedCompletionService(Executor executor, int maxTasks) {
     this.executor = executor;
-    this.tasks = new ArrayList<Future<V>>(maxTasks);
+    this.sent = new ArrayList<Future<V>>(maxTasks);
     this.completed = new ArrayBlockingQueue<Future<V>>(maxTasks);
   }
 
@@ -63,7 +61,7 @@ public class BoundedCompletionService<V>
   public Future<V> submit(Callable<V> task) {
     QueueingFuture newFuture = new QueueingFuture(task);
     executor.execute(newFuture);
-    tasks.add(newFuture);
+    sent.add(newFuture);
     return newFuture;
   }
 
@@ -76,7 +74,7 @@ public class BoundedCompletionService<V>
   }
 
   public void cancelAll(boolean interrupt) {
-    for (Future<V> future : tasks) {
+    for (Future<V> future : sent) {
       future.cancel(interrupt);
     }
   }

Modified: hbase/branches/hbase-10070/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Threads.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-10070/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Threads.java?rev=1585765&r1=1585764&r2=1585765&view=diff
==============================================================================
--- hbase/branches/hbase-10070/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Threads.java (original)
+++ hbase/branches/hbase-10070/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Threads.java Tue Apr  8 15:45:09 2014
@@ -30,7 +30,6 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.util.ReflectionUtils;
-import org.apache.hadoop.util.StringUtils;
 
 /**
  * Thread Utility
@@ -39,16 +38,6 @@ import org.apache.hadoop.util.StringUtil
 public class Threads {
   protected static final Log LOG = LogFactory.getLog(Threads.class);
   private static final AtomicInteger poolNumber = new AtomicInteger(1);
-
-  private static UncaughtExceptionHandler LOGGING_EXCEPTION_HANDLER =
-    new UncaughtExceptionHandler() {
-    @Override
-    public void uncaughtException(Thread t, Throwable e) {
-      LOG.warn("Thread:" + t + " exited with Exception:"
-          + StringUtils.stringifyException(e));
-    }
-  };
-
   /**
    * Utility method that sets name, daemon status and starts passed thread.
    * @param t thread to run
@@ -171,15 +160,15 @@ public class Threads {
   }
 
   /**
-   * Create a new CachedThreadPool with a bounded number as the maximum
+   * Create a new CachedThreadPool with a bounded number as the maximum 
    * thread size in the pool.
-   *
+   * 
    * @param maxCachedThread the maximum thread could be created in the pool
    * @param timeout the maximum time to wait
    * @param unit the time unit of the timeout argument
    * @param threadFactory the factory to use when creating new threads
-   * @return threadPoolExecutor the cachedThreadPool with a bounded number
-   * as the maximum thread size in the pool.
+   * @return threadPoolExecutor the cachedThreadPool with a bounded number 
+   * as the maximum thread size in the pool. 
    */
   public static ThreadPoolExecutor getBoundedCachedThreadPool(
       int maxCachedThread, long timeout, TimeUnit unit,
@@ -191,8 +180,8 @@ public class Threads {
     boundedCachedThreadPool.allowCoreThreadTimeOut(true);
     return boundedCachedThreadPool;
   }
-
-
+  
+  
   /**
    * Returns a {@link java.util.concurrent.ThreadFactory} that names each created thread uniquely,
    * with a common prefix.
@@ -241,8 +230,6 @@ public class Threads {
         Thread t = namedFactory.newThread(r);
         if (handler != null) {
           t.setUncaughtExceptionHandler(handler);
-        } else {
-          t.setUncaughtExceptionHandler(LOGGING_EXCEPTION_HANDLER);
         }
         if (!t.isDaemon()) {
           t.setDaemon(true);
@@ -255,11 +242,4 @@ public class Threads {
 
     };
   }
-
-  /** Sets an UncaughtExceptionHandler for the thread which logs the
-   * Exception stack if the thread dies.
-   */
-  public static void setLoggingUncaughtExceptionHandler(Thread t) {
-    t.setUncaughtExceptionHandler(LOGGING_EXCEPTION_HANDLER);
-  }
 }

Modified: hbase/branches/hbase-10070/hbase-server/src/main/java/org/apache/hadoop/hbase/client/CoprocessorHConnection.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-10070/hbase-server/src/main/java/org/apache/hadoop/hbase/client/CoprocessorHConnection.java?rev=1585765&r1=1585764&r2=1585765&view=diff
==============================================================================
--- hbase/branches/hbase-10070/hbase-server/src/main/java/org/apache/hadoop/hbase/client/CoprocessorHConnection.java (original)
+++ hbase/branches/hbase-10070/hbase-server/src/main/java/org/apache/hadoop/hbase/client/CoprocessorHConnection.java Tue Apr  8 15:45:09 2014
@@ -274,12 +274,6 @@ class CoprocessorHConnection implements 
   }
 
   @Override
-  public HRegionLocation relocateRegion(TableName tableName, byte[] row, int replicaId)
-      throws IOException {
-    return delegate.relocateRegion(tableName, row, replicaId);
-  }
-
-  @Override
   public HRegionLocation relocateRegion(byte[] tableName, byte[] row) throws IOException {
     return delegate.relocateRegion(tableName, row);
   }
@@ -337,12 +331,6 @@ class CoprocessorHConnection implements 
   }
 
   @Override
-  public RegionLocations locateRegion(TableName tableName, byte[] row, boolean useCache,
-      boolean retry, int replicaId) throws IOException {
-    return delegate.locateRegion(tableName, row, useCache, retry, replicaId);
-  }
-
-  @Override
   public List<HRegionLocation> locateRegions(byte[] tableName, boolean useCache, boolean offlined)
       throws IOException {
     return delegate.locateRegions(tableName, useCache, offlined);

Modified: hbase/branches/hbase-10070/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-10070/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java?rev=1585765&r1=1585764&r2=1585765&view=diff
==============================================================================
--- hbase/branches/hbase-10070/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java (original)
+++ hbase/branches/hbase-10070/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java Tue Apr  8 15:45:09 2014
@@ -637,29 +637,25 @@ public class HRegion implements HeapSize
     status.setStatus("Writing region info on filesystem");
     fs.checkRegionInfoOnFilesystem();
 
+    // Remove temporary data left over from old regions
+    status.setStatus("Cleaning up temporary data from old regions");
+    fs.cleanupTempDir();
+
     // Initialize all the HStores
     status.setStatus("Initializing all the Stores");
     long maxSeqId = initializeRegionStores(reporter, status);
 
+    status.setStatus("Cleaning up detritus from prior splits");
+    // Get rid of any splits or merges that were lost in-progress.  Clean out
+    // these directories here on open.  We may be opening a region that was
+    // being split but we crashed in the middle of it all.
+    fs.cleanupAnySplitDetritus();
+    fs.cleanupMergesDir();
+
     this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
     this.writestate.flushRequested = false;
     this.writestate.compacting = 0;
 
-    if (this.writestate.writesEnabled) {
-      // Remove temporary data left over from old regions
-      status.setStatus("Cleaning up temporary data from old regions");
-      fs.cleanupTempDir();
-    }
-
-    if (this.writestate.writesEnabled) {
-      status.setStatus("Cleaning up detritus from prior splits");
-      // Get rid of any splits or merges that were lost in-progress.  Clean out
-      // these directories here on open.  We may be opening a region that was
-      // being split but we crashed in the middle of it all.
-      fs.cleanupAnySplitDetritus();
-      fs.cleanupMergesDir();
-    }
-
     // Initialize split policy
     this.splitPolicy = RegionSplitPolicy.create(this, conf);
 
@@ -757,12 +753,9 @@ public class HRegion implements HeapSize
       }
     }
     mvcc.initialize(maxMemstoreTS + 1);
-
-    if (ServerRegionReplicaUtil.shouldReplayRecoveredEdits(this)) {
-      // Recover any edits if available.
-      maxSeqId = Math.max(maxSeqId, replayRecoveredEditsIfAny(
-          this.fs.getRegionDir(), maxSeqIdInStores, reporter, status));
-    }
+    // Recover any edits if available.
+    maxSeqId = Math.max(maxSeqId, replayRecoveredEditsIfAny(
+        this.fs.getRegionDir(), maxSeqIdInStores, reporter, status));
     return maxSeqId;
   }
 

Modified: hbase/branches/hbase-10070/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-10070/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java?rev=1585765&r1=1585764&r2=1585765&view=diff
==============================================================================
--- hbase/branches/hbase-10070/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java (original)
+++ hbase/branches/hbase-10070/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java Tue Apr  8 15:45:09 2014
@@ -192,9 +192,8 @@ public class HRegionFileSystem {
     ArrayList<StoreFileInfo> storeFiles = new ArrayList<StoreFileInfo>(files.length);
     for (FileStatus status: files) {
       if (!StoreFileInfo.isValid(status)) continue;
-      StoreFileInfo info = ServerRegionReplicaUtil.getStoreFileInfo(conf, fs, regionInfo,
-        regionInfoForFs, familyName, status);
-      storeFiles.add(info);
+
+      storeFiles.add(new StoreFileInfo(this.conf, this.fs, status));
     }
     return storeFiles;
   }

Modified: hbase/branches/hbase-10070/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-10070/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java?rev=1585765&r1=1585764&r2=1585765&view=diff
==============================================================================
--- hbase/branches/hbase-10070/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java (original)
+++ hbase/branches/hbase-10070/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java Tue Apr  8 15:45:09 2014
@@ -497,7 +497,7 @@ public class HStore implements Store {
       completionService.submit(new Callable<StoreFile>() {
         @Override
         public StoreFile call() throws IOException {
-          StoreFile storeFile = createStoreFileAndReader(storeFileInfo);
+          StoreFile storeFile = createStoreFileAndReader(storeFileInfo.getPath());
           return storeFile;
         }
       });
@@ -592,10 +592,6 @@ public class HStore implements Store {
 
   private StoreFile createStoreFileAndReader(final Path p) throws IOException {
     StoreFileInfo info = new StoreFileInfo(conf, this.getFileSystem(), p);
-    return createStoreFileAndReader(info);
-  }
-
-  private StoreFile createStoreFileAndReader(final StoreFileInfo info) throws IOException {
     info.setRegionCoprocessorHost(this.region.getCoprocessorHost());
     StoreFile storeFile = new StoreFile(this.getFileSystem(), info, this.conf, this.cacheConf,
       this.family.getBloomFilterType());

Modified: hbase/branches/hbase-10070/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-10070/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java?rev=1585765&r1=1585764&r2=1585765&view=diff
==============================================================================
--- hbase/branches/hbase-10070/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java (original)
+++ hbase/branches/hbase-10070/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java Tue Apr  8 15:45:09 2014
@@ -19,7 +19,6 @@
 
 package org.apache.hadoop.hbase.regionserver;
 
-import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
@@ -36,6 +35,7 @@ import org.apache.hadoop.hbase.io.FSData
 import org.apache.hadoop.hbase.io.HFileLink;
 import org.apache.hadoop.hbase.io.HalfStoreFileReader;
 import org.apache.hadoop.hbase.io.Reference;
+import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.util.FSUtils;
 
@@ -133,22 +133,6 @@ public class StoreFileInfo implements Co
   }
 
   /**
-   * Create a Store File Info from an HFileLink
-   * @param conf the {@link Configuration} to use
-   * @param fs The current file system to use.
-   * @param fileStatus The {@link FileStatus} of the file
-   */
-  public StoreFileInfo(final Configuration conf, final FileSystem fs, final FileStatus fileStatus,
-      final HFileLink link)
-      throws IOException {
-    this.conf = conf;
-    this.fileStatus = fileStatus;
-      // HFileLink
-    this.reference = null;
-    this.link = link;
-  }
-
-  /**
    * Sets the region coprocessor env.
    * @param coprocessorHost
    */
@@ -211,8 +195,6 @@ public class StoreFileInfo implements Co
     long length = status.getLen();
     if (this.reference != null) {
       hdfsBlocksDistribution = computeRefFileHDFSBlockDistribution(fs, reference, status);
-    } else if (this.link != null) {
-      hdfsBlocksDistribution = computeHDFSBlocksDistribution(fs);
     } else {
       hdfsBlocksDistribution = FSUtils.computeHDFSBlocksDistribution(fs, status, 0, length);
     }
@@ -244,18 +226,8 @@ public class StoreFileInfo implements Co
     FileStatus status;
     if (this.reference != null) {
       if (this.link != null) {
-        FileNotFoundException exToThrow = null;
-        for (int i = 0; i < this.link.getLocations().length; i++) {
-          // HFileLink Reference
-          try {
-            status = link.getFileStatus(fs);
-            return computeRefFileHDFSBlockDistribution(fs, reference, status);
-          } catch (FileNotFoundException ex) {
-            // try the other location
-            exToThrow = ex;
-          }
-        }
-        throw exToThrow;
+        // HFileLink Reference
+        status = link.getFileStatus(fs);
       } else {
         // HFile Reference
         Path referencePath = getReferredToFile(this.getPath());
@@ -264,18 +236,8 @@ public class StoreFileInfo implements Co
       return computeRefFileHDFSBlockDistribution(fs, reference, status);
     } else {
       if (this.link != null) {
-        FileNotFoundException exToThrow = null;
-        for (int i = 0; i < this.link.getLocations().length; i++) {
-          // HFileLink
-          try {
-            status = link.getFileStatus(fs);
-            return FSUtils.computeHDFSBlocksDistribution(fs, status, 0, status.getLen());
-          } catch (FileNotFoundException ex) {
-            // try the other location
-            exToThrow = ex;
-          }
-        }
-        throw exToThrow;
+        // HFileLink
+        status = link.getFileStatus(fs);
       } else {
         status = this.fileStatus;
       }

Modified: hbase/branches/hbase-10070/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-10070/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java?rev=1585765&r1=1585764&r2=1585765&view=diff
==============================================================================
--- hbase/branches/hbase-10070/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java (original)
+++ hbase/branches/hbase-10070/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java Tue Apr  8 15:45:09 2014
@@ -18,16 +18,9 @@
 
 package org.apache.hadoop.hbase.util;
 
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
-import org.apache.hadoop.hbase.io.HFileLink;
 import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
 
 /**
  * Similar to {@link RegionReplicaUtil} but for the server side
@@ -55,39 +48,5 @@ public class ServerRegionReplicaUtil ext
       || !isDefaultReplica(region.getRegionInfo());
   }
 
-  /**
-   * Returns whether to replay the recovered edits to flush the results.
-   * Currently secondary region replicas do not replay the edits, since it would
-   * cause flushes which might affect the primary region. Primary regions even opened
-   * in read only mode should replay the edits.
-   * @param region the HRegion object
-   * @return whether recovered edits should be replayed.
-   */
-  public static boolean shouldReplayRecoveredEdits(HRegion region) {
-    return isDefaultReplica(region.getRegionInfo());
-  }
-
-  /**
-   * Returns a StoreFileInfo from the given FileStatus. Secondary replicas refer to the
-   * files of the primary region, so an HFileLink is used to construct the StoreFileInfo. This
-   * way ensures that the secondary will be able to continue reading the store files even if
-   * they are moved to archive after compaction
-   * @throws IOException
-   */
-  public static StoreFileInfo getStoreFileInfo(Configuration conf, FileSystem fs,
-      HRegionInfo regionInfo, HRegionInfo regionInfoForFs, String familyName, FileStatus status)
-      throws IOException {
-
-    // if this is a primary region, just return the StoreFileInfo constructed from path
-    if (regionInfo.equals(regionInfoForFs)) {
-      return new StoreFileInfo(conf, fs, status);
-    }
-
-    // else create a store file link. The link file does not exists on filesystem though.
-    HFileLink link = new HFileLink(conf,
-      HFileLink.createPath(regionInfoForFs.getTable(), regionInfoForFs.getEncodedName()
-        , familyName, status.getPath().getName()));
-    return new StoreFileInfo(conf, fs, status, link);
-  }
 
 }

Modified: hbase/branches/hbase-10070/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-10070/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java?rev=1585765&r1=1585764&r2=1585765&view=diff
==============================================================================
--- hbase/branches/hbase-10070/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java (original)
+++ hbase/branches/hbase-10070/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java Tue Apr  8 15:45:09 2014
@@ -3928,7 +3928,6 @@ public class TestHRegion {
     // create a primary region, load some data and flush
     // create a secondary region, and do a get against that
     Path rootDir = new Path(DIR + "testRegionReplicaSecondary");
-    TEST_UTIL.getConfiguration().set(HConstants.HBASE_DIR, rootDir.toString());
 
     byte[][] families = new byte[][] {
         Bytes.toBytes("cf1"), Bytes.toBytes("cf2"), Bytes.toBytes("cf3")
@@ -3978,7 +3977,6 @@ public class TestHRegion {
     // create a primary region, load some data and flush
     // create a secondary region, and do a put against that
     Path rootDir = new Path(DIR + "testRegionReplicaSecondary");
-    TEST_UTIL.getConfiguration().set(HConstants.HBASE_DIR, rootDir.toString());
 
     byte[][] families = new byte[][] {
         Bytes.toBytes("cf1"), Bytes.toBytes("cf2"), Bytes.toBytes("cf3")
@@ -4026,60 +4024,7 @@ public class TestHRegion {
         HRegion.closeHRegion(secondaryRegion);
       }
     }
-  }
-
-  @Test
-  public void testCompactionFromPrimary() throws IOException {
-    Path rootDir = new Path(DIR + "testRegionReplicaSecondary");
-    TEST_UTIL.getConfiguration().set(HConstants.HBASE_DIR, rootDir.toString());
-
-    byte[][] families = new byte[][] {
-        Bytes.toBytes("cf1"), Bytes.toBytes("cf2"), Bytes.toBytes("cf3")
-    };
-    byte[] cq = Bytes.toBytes("cq");
-    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("testRegionReplicaSecondary"));
-    for (byte[] family : families) {
-      htd.addFamily(new HColumnDescriptor(family));
-    }
-
-    long time = System.currentTimeMillis();
-    HRegionInfo primaryHri = new HRegionInfo(htd.getTableName(),
-      HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW,
-      false, time, 0);
-    HRegionInfo secondaryHri = new HRegionInfo(htd.getTableName(),
-      HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW,
-      false, time, 1);
-
-    HRegion primaryRegion = null, secondaryRegion = null;
 
-    try {
-      primaryRegion = HRegion.createHRegion(primaryHri,
-        rootDir, TEST_UTIL.getConfiguration(), htd);
-
-      // load some data
-      putData(primaryRegion, 0, 1000, cq, families);
-
-      // flush region
-      primaryRegion.flushcache();
-
-      // open secondary region
-      secondaryRegion = HRegion.openHRegion(rootDir, secondaryHri, htd, null, conf);
-
-      // move the file of the primary region to the archive, simulating a compaction
-      Collection<StoreFile> storeFiles = primaryRegion.getStore(families[0]).getStorefiles();
-      primaryRegion.getRegionFileSystem().removeStoreFiles(Bytes.toString(families[0]), storeFiles);
-      Collection<StoreFileInfo> storeFileInfos = primaryRegion.getRegionFileSystem().getStoreFiles(families[0]);
-      Assert.assertTrue(storeFileInfos == null || storeFileInfos.size() == 0);
-
-      verifyData(secondaryRegion, 0, 1000, cq, families);
-    } finally {
-      if (primaryRegion != null) {
-        HRegion.closeHRegion(primaryRegion);
-      }
-      if (secondaryRegion != null) {
-        HRegion.closeHRegion(secondaryRegion);
-      }
-    }
   }
 
   private void putData(int startRow, int numRows, byte[] qf, byte[]... families) throws IOException {