You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by wa...@apache.org on 2016/03/21 21:14:47 UTC
[34/50] [abbrv] hadoop git commit: HDFS-9949. Add a test case to
ensure that the DataNode does not regenerate its UUID when a storage
directory is cleared (Harsh J via cmccabe)
HDFS-9949. Add a test case to ensure that the DataNode does not regenerate its UUID when a storage directory is cleared (Harsh J via cmccabe)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dc951e60
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dc951e60
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dc951e60
Branch: refs/heads/YARN-3368
Commit: dc951e606f40bb779632a8a3e3a46aeccc4a446a
Parents: ca8106d
Author: Colin Patrick Mccabe <cm...@cloudera.com>
Authored: Thu Mar 17 10:37:42 2016 -0700
Committer: Colin Patrick Mccabe <cm...@cloudera.com>
Committed: Thu Mar 17 10:37:42 2016 -0700
----------------------------------------------------------------------
.../hdfs/server/datanode/TestDataNodeUUID.java | 52 ++++++++++++++++++++
1 file changed, 52 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc951e60/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeUUID.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeUUID.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeUUID.java
index 34e53a3..ebf7c35 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeUUID.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeUUID.java
@@ -19,17 +19,21 @@
package org.apache.hadoop.hdfs.server.datanode;
+import org.apache.commons.io.FileUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.junit.Test;
+import java.io.File;
import java.net.InetSocketAddress;
import java.util.ArrayList;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertTrue;
public class TestDataNodeUUID {
@@ -62,4 +66,52 @@ public class TestDataNodeUUID {
// Make sure that we have a valid DataNodeUUID at that point of time.
assertNotEquals(dn.getDatanodeUuid(), nullString);
}
+
+ @Test(timeout = 10000)
+ public void testUUIDRegeneration() throws Exception {
+ File baseDir = new File(System.getProperty("test.build.data"));
+ File disk1 = new File(baseDir, "disk1");
+ File disk2 = new File(baseDir, "disk2");
+
+ // Ensure the configured disks do not pre-exist
+ FileUtils.deleteDirectory(disk1);
+ FileUtils.deleteDirectory(disk2);
+
+ MiniDFSCluster cluster = null;
+ HdfsConfiguration conf = new HdfsConfiguration();
+ conf.setStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
+ disk1.toURI().toString(),
+ disk2.toURI().toString());
+ try {
+ cluster = new MiniDFSCluster.Builder(conf)
+ .numDataNodes(1)
+ .manageDataDfsDirs(false)
+ .build();
+ cluster.waitActive();
+
+ // Grab the new-cluster UUID as the original one to test against
+ String originalUUID = cluster.getDataNodes().get(0).getDatanodeUuid();
+ // Stop and simulate a DN wipe or unmount-but-root-path condition
+ // on the second disk
+ MiniDFSCluster.DataNodeProperties dn = cluster.stopDataNode(0);
+ FileUtils.deleteDirectory(disk2);
+ assertTrue("Failed to recreate the data directory: " + disk2,
+ disk2.mkdirs());
+
+ // Restart and check if the UUID changed
+ assertTrue("DataNode failed to start up: " + dn,
+ cluster.restartDataNode(dn));
+ // We need to wait until the DN has completed registration
+ while (!cluster.getDataNodes().get(0).isDatanodeFullyStarted()) {
+ Thread.sleep(50);
+ }
+ assertEquals(
+ "DN generated a new UUID despite disk1 having it intact",
+ originalUUID, cluster.getDataNodes().get(0).getDatanodeUuid());
+ } finally {
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+ }
}