You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by ar...@apache.org on 2013/12/12 08:01:25 UTC

svn commit: r1550357 - in /hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ src/main/java/org/apache/hadoop/hdfs/server/protocol/

Author: arp
Date: Thu Dec 12 07:01:24 2013
New Revision: 1550357

URL: http://svn.apache.org/r1550357
Log:
HDFS-5648. Get rid of FsDatasetImpl#perVolumeReplicaMap.

Modified:
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-2832.txt
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeStorage.java

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-2832.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-2832.txt?rev=1550357&r1=1550356&r2=1550357&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-2832.txt (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-2832.txt Thu Dec 12 07:01:24 2013
@@ -123,3 +123,5 @@ IMPROVEMENTS:
     HDFS-5484. StorageType and State in DatanodeStorageInfo in NameNode is
     not accurate. (Eric Sirianni via Arpit Agarwal)
 
+    HDFS-5648. Get rid of FsDatasetImpl#perVolumeReplicaMap. (Arpit Agarwal)
+

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java?rev=1550357&r1=1550356&r2=1550357&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java Thu Dec 12 07:01:24 2013
@@ -38,6 +38,7 @@ import javax.management.NotCompliantMBea
 import javax.management.ObjectName;
 import javax.management.StandardMBean;
 
+import com.google.common.base.Preconditions;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -197,15 +198,8 @@ class FsDatasetImpl implements FsDataset
   final FsDatasetCache cacheManager;
   private final int validVolsRequired;
 
-  // TODO HDFS-2832: Consider removing duplicated block info from these
-  // two maps and move the perVolumeReplicaMap to FsVolumeImpl.
-  // This might require some refactoring.
   final ReplicaMap volumeMap;
 
-  // Map from StorageID to ReplicaMap.
-  final Map<String, ReplicaMap> perVolumeReplicaMap;
-
-
   // Used for synchronizing access to usage stats
   private final Object statsLock = new Object();
 
@@ -254,7 +248,6 @@ class FsDatasetImpl implements FsDataset
       LOG.info("Added volume - " + dir + ", StorageType: " + storageType);
     }
     volumeMap = new ReplicaMap(this);
-    perVolumeReplicaMap = new HashMap<String, ReplicaMap>();
 
     @SuppressWarnings("unchecked")
     final VolumeChoosingPolicy<FsVolumeImpl> blockChooserImpl =
@@ -263,7 +256,7 @@ class FsDatasetImpl implements FsDataset
             RoundRobinVolumeChoosingPolicy.class,
             VolumeChoosingPolicy.class), conf);
     volumes = new FsVolumeList(volArray, volsFailed, blockChooserImpl);
-    volumes.initializeReplicaMaps(perVolumeReplicaMap, volumeMap, this);
+    volumes.initializeReplicaMaps(volumeMap);
 
     File[] roots = new File[storage.getNumStorageDirs()];
     for (int idx = 0; idx < storage.getNumStorageDirs(); idx++) {
@@ -661,7 +654,6 @@ class FsDatasetImpl implements FsDataset
     
     // Replace finalized replica by a RBW replica in replicas map
     volumeMap.add(bpid, newReplicaInfo);
-    perVolumeReplicaMap.get(v.getStorageID()).add(bpid, newReplicaInfo);
     
     return newReplicaInfo;
   }
@@ -792,7 +784,6 @@ class FsDatasetImpl implements FsDataset
     ReplicaBeingWritten newReplicaInfo = new ReplicaBeingWritten(b.getBlockId(), 
         b.getGenerationStamp(), v, f.getParentFile());
     volumeMap.add(b.getBlockPoolId(), newReplicaInfo);
-    perVolumeReplicaMap.get(v.getStorageID()).add(b.getBlockPoolId(), newReplicaInfo);
     return newReplicaInfo;
   }
   
@@ -911,7 +902,6 @@ class FsDatasetImpl implements FsDataset
     rbw.setBytesAcked(visible);
     // overwrite the RBW in the volume map
     volumeMap.add(b.getBlockPoolId(), rbw);
-    perVolumeReplicaMap.get(v.getStorageID()).add(b.getBlockPoolId(), rbw);
     return rbw;
   }
 
@@ -931,7 +921,6 @@ class FsDatasetImpl implements FsDataset
     ReplicaInPipeline newReplicaInfo = new ReplicaInPipeline(b.getBlockId(), 
         b.getGenerationStamp(), v, f.getParentFile());
     volumeMap.add(b.getBlockPoolId(), newReplicaInfo);
-    perVolumeReplicaMap.get(v.getStorageID()).add(b.getBlockPoolId(), newReplicaInfo);
     
     return newReplicaInfo;
   }
@@ -1000,8 +989,6 @@ class FsDatasetImpl implements FsDataset
       newReplicaInfo = new FinalizedReplica(replicaInfo, v, dest.getParentFile());
     }
     volumeMap.add(bpid, newReplicaInfo);
-    perVolumeReplicaMap.get(newReplicaInfo.getVolume().getStorageID())
-        .add(bpid, newReplicaInfo);
     return newReplicaInfo;
   }
 
@@ -1015,8 +1002,6 @@ class FsDatasetImpl implements FsDataset
     if (replicaInfo != null && replicaInfo.getState() == ReplicaState.TEMPORARY) {
       // remove from volumeMap
       volumeMap.remove(b.getBlockPoolId(), b.getLocalBlock());
-      perVolumeReplicaMap.get(replicaInfo.getVolume().getStorageID())
-          .remove(b.getBlockPoolId(), b.getLocalBlock());
       
       // delete the on-disk temp file
       if (delBlockFromDisk(replicaInfo.getBlockFile(), 
@@ -1051,39 +1036,6 @@ class FsDatasetImpl implements FsDataset
     return true;
   }
 
-  private BlockListAsLongs getBlockReportWithReplicaMap(
-      String bpid, ReplicaMap rMap) {
-    int size =  rMap.size(bpid);
-    ArrayList<ReplicaInfo> finalized = new ArrayList<ReplicaInfo>(size);
-    ArrayList<ReplicaInfo> uc = new ArrayList<ReplicaInfo>();
-    if (size == 0) {
-      return new BlockListAsLongs(finalized, uc);
-    }
-    
-    synchronized(this) {
-      for (ReplicaInfo b : rMap.replicas(bpid)) {
-        switch(b.getState()) {
-        case FINALIZED:
-          finalized.add(b);
-          break;
-        case RBW:
-        case RWR:
-          uc.add(b);
-          break;
-        case RUR:
-          ReplicaUnderRecovery rur = (ReplicaUnderRecovery)b;
-          uc.add(rur.getOriginalReplica());
-          break;
-        case TEMPORARY:
-          break;
-        default:
-          assert false : "Illegal ReplicaInfo state.";
-        }
-      }
-      return new BlockListAsLongs(finalized, uc);
-    }
-  }
-
   @Override // FsDatasetSpi
   public List<Long> getCacheReport(String bpid) {
     return cacheManager.getCachedBlocks(bpid);
@@ -1091,16 +1043,49 @@ class FsDatasetImpl implements FsDataset
 
   @Override
   public Map<DatanodeStorage, BlockListAsLongs> getBlockReports(String bpid) {
-    Map<DatanodeStorage, BlockListAsLongs> blockReportMap =
+    Map<DatanodeStorage, BlockListAsLongs> blockReportsMap =
         new HashMap<DatanodeStorage, BlockListAsLongs>();
 
-    for (FsVolumeImpl v : getVolumes()) {
-      ReplicaMap rMap = perVolumeReplicaMap.get(v.getStorageID());
-      BlockListAsLongs blockList = getBlockReportWithReplicaMap(bpid, rMap);
-      blockReportMap.put(v.toDatanodeStorage(), blockList);
+    Map<String, ArrayList<ReplicaInfo>> finalized =
+        new HashMap<String, ArrayList<ReplicaInfo>>();
+    Map<String, ArrayList<ReplicaInfo>> uc =
+        new HashMap<String, ArrayList<ReplicaInfo>>();
+
+    for (FsVolumeSpi v : volumes.volumes) {
+      finalized.put(v.getStorageID(), new ArrayList<ReplicaInfo>());
+      uc.put(v.getStorageID(), new ArrayList<ReplicaInfo>());
+    }
+
+    synchronized(this) {
+      for (ReplicaInfo b : volumeMap.replicas(bpid)) {
+        switch(b.getState()) {
+          case FINALIZED:
+            finalized.get(b.getVolume().getStorageID()).add(b);
+            break;
+          case RBW:
+          case RWR:
+            uc.get(b.getVolume().getStorageID()).add(b);
+            break;
+          case RUR:
+            ReplicaUnderRecovery rur = (ReplicaUnderRecovery)b;
+            uc.get(rur.getVolume().getStorageID()).add(rur.getOriginalReplica());
+            break;
+          case TEMPORARY:
+            break;
+          default:
+            assert false : "Illegal ReplicaInfo state.";
+        }
+      }
+    }
+
+    for (FsVolumeImpl v : volumes.volumes) {
+      ArrayList<ReplicaInfo> finalizedList = finalized.get(v.getStorageID());
+      ArrayList<ReplicaInfo> ucList = uc.get(v.getStorageID());
+      blockReportsMap.put(v.toDatanodeStorage(),
+                          new BlockListAsLongs(finalizedList, ucList));
     }
 
-    return blockReportMap;
+    return blockReportsMap;
   }
 
   /**
@@ -1244,7 +1229,6 @@ class FsDatasetImpl implements FsDataset
           v.clearPath(bpid, parent);
         }
         volumeMap.remove(bpid, invalidBlks[i]);
-        perVolumeReplicaMap.get(v.getStorageID()).remove(bpid, invalidBlks[i]);
       }
       // If the block is cached, start uncaching it.
       cacheManager.uncacheBlock(bpid, invalidBlks[i].getBlockId());
@@ -1374,8 +1358,6 @@ class FsDatasetImpl implements FsDataset
               LOG.warn("Removing replica " + bpid + ":" + b.getBlockId()
                   + " on failed volume " + fv.getCurrentDir().getAbsolutePath());
               ib.remove();
-              perVolumeReplicaMap.get(fv.getStorageID())
-                  .remove(bpid, b.getBlockId());
               removedBlocks++;
             }
           }
@@ -1492,7 +1474,6 @@ class FsDatasetImpl implements FsDataset
           // Block is in memory and not on the disk
           // Remove the block from volumeMap
           volumeMap.remove(bpid, blockId);
-          perVolumeReplicaMap.get(vol.getStorageID()).remove(bpid, blockId);
           final DataBlockScanner blockScanner = datanode.getBlockScanner();
           if (blockScanner != null) {
             blockScanner.deleteBlock(bpid, new Block(blockId));
@@ -1516,8 +1497,6 @@ class FsDatasetImpl implements FsDataset
         ReplicaInfo diskBlockInfo = new FinalizedReplica(blockId, 
             diskFile.length(), diskGS, vol, diskFile.getParentFile());
         volumeMap.add(bpid, diskBlockInfo);
-        perVolumeReplicaMap.get(vol.getStorageID())
-            .remove(bpid, diskBlockInfo);
         final DataBlockScanner blockScanner = datanode.getBlockScanner();
         if (blockScanner != null) {
           blockScanner.addBlock(new ExtendedBlock(bpid, diskBlockInfo));
@@ -1792,13 +1771,6 @@ class FsDatasetImpl implements FsDataset
     volumes.addBlockPool(bpid, conf);
     volumeMap.initBlockPool(bpid);
     volumes.getAllVolumesMap(bpid, volumeMap);
-
-    // TODO: Avoid the double scan.
-    for (FsVolumeImpl v : getVolumes()) {
-      ReplicaMap rMap = perVolumeReplicaMap.get(v.getStorageID());
-      rMap.initBlockPool(bpid);
-      volumes.getVolumeMap(bpid, v, rMap);
-    }
   }
 
   @Override

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java?rev=1550357&r1=1550356&r2=1550357&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java Thu Dec 12 07:01:24 2013
@@ -90,14 +90,9 @@ class FsVolumeList {
     return remaining;
   }
     
-  void initializeReplicaMaps(Map<String, ReplicaMap> perVolumeReplicaMap,
-                             ReplicaMap globalReplicaMap,
-                             Object mutex) throws IOException {
+  void initializeReplicaMaps(ReplicaMap globalReplicaMap) throws IOException {
     for (FsVolumeImpl v : volumes) {
-      ReplicaMap rMap = new ReplicaMap(mutex);
-      v.getVolumeMap(rMap);
-      perVolumeReplicaMap.put(v.getStorageID(), rMap);
-      globalReplicaMap.addAll(rMap);
+      v.getVolumeMap(globalReplicaMap);
     }
   }
   

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeStorage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeStorage.java?rev=1550357&r1=1550356&r2=1550357&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeStorage.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeStorage.java Thu Dec 12 07:01:24 2013
@@ -71,4 +71,23 @@ public class DatanodeStorage {
   public static String generateUuid() {
     return "DS-" + UUID.randomUUID();
   }
+
+  @Override
+  public boolean equals(Object other){
+    if (other == this) {
+      return true;
+    }
+
+    if ((other == null) ||
+        !(other instanceof DatanodeStorage)) {
+      return false;
+    }
+    DatanodeStorage otherStorage = (DatanodeStorage) other;
+    return otherStorage.getStorageID().compareTo(getStorageID()) == 0;
+  }
+
+  @Override
+  public int hashCode() {
+    return getStorageID().hashCode();
+  }
 }