You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by zj...@apache.org on 2015/03/30 21:37:24 UTC

[18/20] hadoop git commit: HDFS-7261. storageMap is accessed without synchronization in DatanodeDescriptor#updateHeartbeatState() (Brahma Reddy Battula via Colin P. McCabe)

HDFS-7261. storageMap is accessed without synchronization in DatanodeDescriptor#updateHeartbeatState() (Brahma Reddy Battula via Colin P. McCabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/afb05c84
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/afb05c84
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/afb05c84

Branch: refs/heads/YARN-2928
Commit: afb05c84e625d85fd12287968ee6124470016ad7
Parents: 5c42a67
Author: Colin Patrick Mccabe <cm...@cloudera.com>
Authored: Mon Mar 30 10:46:21 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Mon Mar 30 12:10:49 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  4 +++
 .../blockmanagement/DatanodeDescriptor.java     | 29 ++++++++++++--------
 2 files changed, 21 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/afb05c84/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index efba80e..79a81c6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -379,6 +379,10 @@ Release 2.8.0 - UNRELEASED
     HDFS-8002. Website refers to /trash directory. (Brahma Reddy Battula via
     aajisaka)
 
+    HDFS-7261. storageMap is accessed without synchronization in
+    DatanodeDescriptor#updateHeartbeatState() (Brahma Reddy Battula via Colin
+    P. McCabe)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/afb05c84/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
index d0d7a72..4731ad4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
@@ -447,8 +447,10 @@ public class DatanodeDescriptor extends DatanodeInfo {
     if (checkFailedStorages) {
       LOG.info("Number of failed storage changes from "
           + this.volumeFailures + " to " + volFailures);
-      failedStorageInfos = new HashSet<DatanodeStorageInfo>(
-          storageMap.values());
+      synchronized (storageMap) {
+        failedStorageInfos =
+            new HashSet<DatanodeStorageInfo>(storageMap.values());
+      }
     }
 
     setCacheCapacity(cacheCapacity);
@@ -480,8 +482,11 @@ public class DatanodeDescriptor extends DatanodeInfo {
     if (checkFailedStorages) {
       updateFailedStorage(failedStorageInfos);
     }
-
-    if (storageMap.size() != reports.length) {
+    long storageMapSize;
+    synchronized (storageMap) {
+      storageMapSize = storageMap.size();
+    }
+    if (storageMapSize != reports.length) {
       pruneStorageMap(reports);
     }
   }
@@ -491,14 +496,14 @@ public class DatanodeDescriptor extends DatanodeInfo {
    * as long as they have associated block replicas.
    */
   private void pruneStorageMap(final StorageReport[] reports) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Number of storages reported in heartbeat=" + reports.length +
-                    "; Number of storages in storageMap=" + storageMap.size());
-    }
+    synchronized (storageMap) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Number of storages reported in heartbeat=" + reports.length
+            + "; Number of storages in storageMap=" + storageMap.size());
+      }
 
-    HashMap<String, DatanodeStorageInfo> excessStorages;
+      HashMap<String, DatanodeStorageInfo> excessStorages;
 
-    synchronized (storageMap) {
       // Init excessStorages with all known storages.
       excessStorages = new HashMap<String, DatanodeStorageInfo>(storageMap);
 
@@ -515,8 +520,8 @@ public class DatanodeDescriptor extends DatanodeInfo {
           LOG.info("Removed storage " + storageInfo + " from DataNode" + this);
         } else if (LOG.isDebugEnabled()) {
           // This can occur until all block reports are received.
-          LOG.debug("Deferring removal of stale storage " + storageInfo +
-                        " with " + storageInfo.numBlocks() + " blocks");
+          LOG.debug("Deferring removal of stale storage " + storageInfo
+              + " with " + storageInfo.numBlocks() + " blocks");
         }
       }
     }