You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by ki...@apache.org on 2014/05/19 19:52:55 UTC
svn commit: r1595978 - in
/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: CHANGES.txt
src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java
Author: kihwal
Date: Mon May 19 17:52:55 2014
New Revision: 1595978
URL: http://svn.apache.org/r1595978
Log:
HDFS-6397. NN shows inconsistent value in deadnode count. Contributed by Mohammad Kamrul Islam.
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1595978&r1=1595977&r2=1595978&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Mon May 19 17:52:55 2014
@@ -578,6 +578,9 @@ Release 2.4.1 - UNRELEASED
HDFS-6325. Append should fail if the last block has insufficient number of
replicas (Keith Pak via cos)
+ HDFS-6397. NN shows inconsistent value in deadnode count.
+ (Mohammad Kamrul Islam via kihwal)
+
Release 2.4.0 - 2014-04-07
INCOMPATIBLE CHANGES
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java?rev=1595978&r1=1595977&r2=1595978&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java Mon May 19 17:52:55 2014
@@ -1057,15 +1057,7 @@ public class DatanodeManager {
/** @return the number of dead datanodes. */
public int getNumDeadDataNodes() {
- int numDead = 0;
- synchronized (datanodeMap) {
- for(DatanodeDescriptor dn : datanodeMap.values()) {
- if (isDatanodeDead(dn) ) {
- numDead++;
- }
- }
- }
- return numDead;
+ return getDatanodeListForReport(DatanodeReportType.DEAD).size();
}
/** @return list of datanodes where decommissioning is in progress. */
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java?rev=1595978&r1=1595977&r2=1595978&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java Mon May 19 17:52:55 2014
@@ -129,4 +129,44 @@ public class TestHostsFiles {
cluster.shutdown();
}
}
+
+ @Test
+ public void testHostsIncludeForDeadCount() throws Exception {
+ Configuration conf = getConf();
+
+ // Configure an excludes file
+ FileSystem localFileSys = FileSystem.getLocal(conf);
+ Path workingDir = localFileSys.getWorkingDirectory();
+ Path dir = new Path(workingDir, "build/test/data/temp/decommission");
+ Path excludeFile = new Path(dir, "exclude");
+ Path includeFile = new Path(dir, "include");
+ assertTrue(localFileSys.mkdirs(dir));
+ StringBuilder includeHosts = new StringBuilder();
+ includeHosts.append("localhost:52").append("\n").append("127.0.0.1:7777")
+ .append("\n");
+ DFSTestUtil.writeFile(localFileSys, excludeFile, "");
+ DFSTestUtil.writeFile(localFileSys, includeFile, includeHosts.toString());
+ conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
+ conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());
+
+ MiniDFSCluster cluster = null;
+ try {
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
+ final FSNamesystem ns = cluster.getNameNode().getNamesystem();
+ assertTrue(ns.getNumDeadDataNodes() == 2);
+ assertTrue(ns.getNumLiveDataNodes() == 0);
+
+ // Testing using MBeans
+ MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
+ ObjectName mxbeanName = new ObjectName(
+ "Hadoop:service=NameNode,name=FSNamesystemState");
+ String nodes = mbs.getAttribute(mxbeanName, "NumDeadDataNodes") + "";
+ assertTrue((Integer) mbs.getAttribute(mxbeanName, "NumDeadDataNodes") == 2);
+ assertTrue((Integer) mbs.getAttribute(mxbeanName, "NumLiveDataNodes") == 0);
+ } finally {
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+ }
}