You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by be...@apache.org on 2015/12/23 00:28:39 UTC

hadoop git commit: HDFS-9034. StorageTypeStats Metric should not count failed storage. Contributed by Surendra Singh Lilhore.

Repository: hadoop
Updated Branches:
  refs/heads/trunk 867048c3e -> df8323094


HDFS-9034. StorageTypeStats Metric should not count failed storage. Contributed by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/df832309
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/df832309
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/df832309

Branch: refs/heads/trunk
Commit: df83230948204ee2d2b06ecc66ce0163e2df27ef
Parents: 867048c
Author: Benoy Antony <be...@apache.org>
Authored: Tue Dec 22 15:28:17 2015 -0800
Committer: Benoy Antony <be...@apache.org>
Committed: Tue Dec 22 15:28:17 2015 -0800

----------------------------------------------------------------------
 .../server/blockmanagement/DatanodeManager.java |  1 +
 .../server/blockmanagement/DatanodeStats.java   | 19 ++++--
 .../blockmanagement/HeartbeatManager.java       |  6 +-
 .../blockmanagement/TestBlockStatsMXBean.java   | 62 ++++++++++++++++++++
 4 files changed, 81 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/df832309/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index 0c0c01a..53f7043 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -963,6 +963,7 @@ public class DatanodeManager {
         // no need to update its timestamp
         // because its is done when the descriptor is created
         heartbeatManager.addDatanode(nodeDescr);
+        heartbeatManager.updateDnStat(nodeDescr);
         incrementVersionCount(nodeReg.getSoftwareVersion());
         startDecommissioningIfExcluded(nodeDescr);
         success = true;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/df832309/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStats.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStats.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStats.java
index 4c39c41..bcc9bba 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStats.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStats.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.DFSUtilClient;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 
 import java.util.EnumMap;
 import java.util.HashSet;
@@ -61,8 +62,10 @@ class DatanodeStats {
     }
     Set<StorageType> storageTypes = new HashSet<>();
     for (DatanodeStorageInfo storageInfo : node.getStorageInfos()) {
-      statsMap.addStorage(storageInfo, node);
-      storageTypes.add(storageInfo.getStorageType());
+      if (storageInfo.getState() != DatanodeStorage.State.FAILED) {
+        statsMap.addStorage(storageInfo, node);
+        storageTypes.add(storageInfo.getStorageType());
+      }
     }
     for (StorageType storageType : storageTypes) {
       statsMap.addNode(storageType, node);
@@ -86,8 +89,10 @@ class DatanodeStats {
     }
     Set<StorageType> storageTypes = new HashSet<>();
     for (DatanodeStorageInfo storageInfo : node.getStorageInfos()) {
-      statsMap.subtractStorage(storageInfo, node);
-      storageTypes.add(storageInfo.getStorageType());
+      if (storageInfo.getState() != DatanodeStorage.State.FAILED) {
+        statsMap.subtractStorage(storageInfo, node);
+        storageTypes.add(storageInfo.getStorageType());
+      }
     }
     for (StorageType storageType : storageTypes) {
       statsMap.subtractNode(storageType, node);
@@ -202,10 +207,12 @@ class DatanodeStats {
 
     private void subtractNode(StorageType storageType,
         final DatanodeDescriptor node) {
-      StorageTypeStats storageTypeStats =
-          storageTypeStatsMap.get(storageType);
+      StorageTypeStats storageTypeStats = storageTypeStatsMap.get(storageType);
       if (storageTypeStats != null) {
         storageTypeStats.subtractNode(node);
+        if (storageTypeStats.getNodesInService() == 0) {
+          storageTypeStatsMap.remove(storageType);
+        }
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/df832309/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java
index 9f23b32..7546b1a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java
@@ -203,6 +203,7 @@ class HeartbeatManager implements DatanodeStatistics {
 
       //update its timestamp
       d.updateHeartbeatState(StorageReport.EMPTY_ARRAY, 0L, 0L, 0, 0, null);
+      stats.add(d);
     }
   }
 
@@ -212,11 +213,14 @@ class HeartbeatManager implements DatanodeStatistics {
 
   synchronized void addDatanode(final DatanodeDescriptor d) {
     // update in-service node count
-    stats.add(d);
     datanodes.add(d);
     d.setAlive(true);
   }
 
+  void updateDnStat(final DatanodeDescriptor d){
+    stats.add(d);
+  }
+
   synchronized void removeDatanode(DatanodeDescriptor node) {
     if (node.isAlive()) {
       stats.subtract(node);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/df832309/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java
index f5b5641..9e3112e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java
@@ -18,10 +18,12 @@
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
+import java.io.File;
 import java.io.IOException;
 import java.io.InputStream;
 import java.net.URL;
@@ -29,9 +31,12 @@ import java.util.HashSet;
 import java.util.Map;
 import java.util.Set;
 
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -143,4 +148,61 @@ public class TestBlockStatsMXBean {
     assertTrue(typesPresent.contains("DISK"));
     assertTrue(typesPresent.contains("RAM_DISK"));
   }
+
+  @Test
+  public void testStorageTypeStatsWhenStorageFailed() throws Exception {
+    DFSTestUtil.createFile(cluster.getFileSystem(),
+        new Path("/blockStatsFile1"), 1024, (short) 1, 0L);
+    Map<StorageType, StorageTypeStats> storageTypeStatsMap = cluster
+        .getNamesystem().getBlockManager().getStorageTypeStats();
+
+    StorageTypeStats storageTypeStats = storageTypeStatsMap
+        .get(StorageType.RAM_DISK);
+    assertEquals(6, storageTypeStats.getNodesInService());
+
+    storageTypeStats = storageTypeStatsMap.get(StorageType.DISK);
+    assertEquals(3, storageTypeStats.getNodesInService());
+
+    storageTypeStats = storageTypeStatsMap.get(StorageType.ARCHIVE);
+    assertEquals(3, storageTypeStats.getNodesInService());
+    String dataDir = cluster.getDataDirectory();
+    File dn1ArcVol1 = new File(dataDir, "data" + (3 * 0 + 2));
+    File dn2ArcVol1 = new File(dataDir, "data" + (3 * 1 + 2));
+    File dn3ArcVol1 = new File(dataDir, "data" + (3 * 2 + 2));
+    DataNodeTestUtils.injectDataDirFailure(dn1ArcVol1);
+    DataNodeTestUtils.injectDataDirFailure(dn2ArcVol1);
+    DataNodeTestUtils.injectDataDirFailure(dn3ArcVol1);
+    try {
+      DFSTestUtil.createFile(cluster.getFileSystem(), new Path(
+          "/blockStatsFile2"), 1024, (short) 1, 0L);
+      fail("Should throw exception, becuase no DISK storage available");
+    } catch (Exception e) {
+      assertTrue(e.getMessage().contains(
+          "could only be replicated to 0 nodes instead"));
+    }
+    // wait for heartbeat
+    Thread.sleep(6000);
+    storageTypeStatsMap = cluster.getNamesystem().getBlockManager()
+        .getStorageTypeStats();
+    assertFalse("StorageTypeStatsMap should not contain DISK Storage type",
+        storageTypeStatsMap.containsKey(StorageType.DISK));
+    DataNodeTestUtils.restoreDataDirFromFailure(dn1ArcVol1);
+    DataNodeTestUtils.restoreDataDirFromFailure(dn2ArcVol1);
+    DataNodeTestUtils.restoreDataDirFromFailure(dn3ArcVol1);
+    for (int i = 0; i < 3; i++) {
+      cluster.restartDataNode(0, true);
+    }
+    // wait for heartbeat
+    Thread.sleep(6000);
+    storageTypeStatsMap = cluster.getNamesystem().getBlockManager()
+        .getStorageTypeStats();
+    storageTypeStats = storageTypeStatsMap.get(StorageType.RAM_DISK);
+    assertEquals(6, storageTypeStats.getNodesInService());
+
+    storageTypeStats = storageTypeStatsMap.get(StorageType.DISK);
+    assertEquals(3, storageTypeStats.getNodesInService());
+
+    storageTypeStats = storageTypeStatsMap.get(StorageType.ARCHIVE);
+    assertEquals(3, storageTypeStats.getNodesInService());
+  }
 }