You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by jl...@apache.org on 2012/10/11 17:09:59 UTC
svn commit: r1397100 - in
/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs:
CHANGES.txt
src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java
Author: jlowe
Date: Thu Oct 11 15:09:58 2012
New Revision: 1397100
URL: http://svn.apache.org/viewvc?rev=1397100&view=rev
Log:
HDFS-3224. Bug in check for DN re-registration with different storage ID. Contributed by Jason Lowe
Modified:
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java
Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1397100&r1=1397099&r2=1397100&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Thu Oct 11 15:09:58 2012
@@ -24,6 +24,9 @@ Release 0.23.5 - UNRELEASED
HDFS-3824. TestHftpDelegationToken fails intermittently with JDK7 (Trevor
Robinson via tgraves)
+ HDFS-3224. Bug in check for DN re-registration with different storage ID
+ (jlowe)
+
Release 0.23.4 - UNRELEASED
INCOMPATIBLE CHANGES
Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java?rev=1397100&r1=1397099&r2=1397100&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java Thu Oct 11 15:09:58 2012
@@ -574,7 +574,8 @@ public class DatanodeManager {
+ " storage " + nodeReg.getStorageID());
DatanodeDescriptor nodeS = datanodeMap.get(nodeReg.getStorageID());
- DatanodeDescriptor nodeN = getDatanodeByHost(nodeReg.getName());
+ DatanodeDescriptor nodeN = host2DatanodeMap.getDatanodeByName(
+ nodeReg.getName());
if (nodeN != null && nodeN != nodeS) {
NameNode.LOG.info("BLOCK* NameSystem.registerDatanode: "
Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java?rev=1397100&r1=1397099&r2=1397100&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java Thu Oct 11 15:09:58 2012
@@ -17,13 +17,20 @@
*/
package org.apache.hadoop.hdfs;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+
import java.net.InetSocketAddress;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.DFSClient;
import junit.framework.TestCase;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.server.common.StorageInfo;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
+
/**
* This class tests that a file need not be closed before its
* data can be read by another client.
@@ -74,4 +81,50 @@ public class TestDatanodeRegistration ex
}
}
}
+
+ public void testChangeStorageID() throws Exception {
+ final String DN_IP_ADDR = "127.0.0.1";
+ final int DN_XFER_PORT = 12345;
+ HdfsConfiguration conf = new HdfsConfiguration();
+ MiniDFSCluster cluster = null;
+ try {
+ cluster = new MiniDFSCluster.Builder(conf)
+ .numDataNodes(0)
+ .build();
+ InetSocketAddress addr = new InetSocketAddress(
+ "localhost",
+ cluster.getNameNodePort());
+ DFSClient client = new DFSClient(addr, conf);
+ NamenodeProtocols rpcServer = cluster.getNameNodeRpc();
+
+ // register a datanode
+ String nodeName = DN_IP_ADDR + ":" + DN_XFER_PORT;
+ long nnCTime = cluster.getNameNodeRpc().versionRequest().getCTime();
+ StorageInfo mockStorageInfo = mock(StorageInfo.class);
+ doReturn(nnCTime).when(mockStorageInfo).getCTime();
+ doReturn(HdfsConstants.LAYOUT_VERSION).when(mockStorageInfo)
+ .getLayoutVersion();
+ doReturn("fake-storage-id").when(mockStorageInfo).getClusterID();
+ DatanodeRegistration dnReg = new DatanodeRegistration(nodeName);
+ dnReg.storageInfo = mockStorageInfo;
+ rpcServer.registerDatanode(dnReg);
+
+ DatanodeInfo[] report = client.datanodeReport(DatanodeReportType.ALL);
+ assertEquals("Expected a registered datanode", 1, report.length);
+
+ // register the same datanode again with a different storage ID
+ doReturn("changed-fake-storage-id").when(mockStorageInfo).getClusterID();
+ dnReg = new DatanodeRegistration(nodeName);
+ dnReg.storageInfo = mockStorageInfo;
+ rpcServer.registerDatanode(dnReg);
+
+ report = client.datanodeReport(DatanodeReportType.ALL);
+ assertEquals("Datanode with changed storage ID not recognized",
+ 1, report.length);
+ } finally {
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+ }
}