You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by cn...@apache.org on 2013/10/31 00:50:23 UTC
svn commit: r1537347 - in
/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs: CHANGES.txt
src/test/java/org/apache/hadoop/hdfs/TestDFSClientExcludedNodes.java
Author: cnauroth
Date: Wed Oct 30 23:50:22 2013
New Revision: 1537347
URL: http://svn.apache.org/r1537347
Log:
HDFS-4633. Merging change r1461846 and r1537345 from trunk to branch-2.
Modified:
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientExcludedNodes.java
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1537347&r1=1537346&r2=1537347&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Wed Oct 30 23:50:22 2013
@@ -214,6 +214,9 @@ Release 2.2.1 - UNRELEASED
HDFS-5065. TestSymlinkHdfsDisable fails on Windows. (ivanmi)
+ HDFS-4633 TestDFSClientExcludedNodes fails sporadically if excluded nodes
+ cache expires too quickly (Chris Nauroth via Sanjay)
+
Release 2.2.0 - 2013-10-13
INCOMPATIBLE CHANGES
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientExcludedNodes.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientExcludedNodes.java?rev=1537347&r1=1537346&r2=1537347&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientExcludedNodes.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientExcludedNodes.java Wed Oct 30 23:50:22 2013
@@ -32,6 +32,8 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
import org.apache.hadoop.util.ThreadUtil;
+import org.junit.After;
+import org.junit.Before;
import org.junit.Test;
@@ -41,10 +43,25 @@ import org.junit.Test;
*/
public class TestDFSClientExcludedNodes {
- @Test(timeout=10000)
+ private MiniDFSCluster cluster;
+ private Configuration conf;
+
+ @Before
+ public void setUp() {
+ cluster = null;
+ conf = new HdfsConfiguration();
+ }
+
+ @After
+ public void tearDown() {
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+
+ @Test(timeout=60000)
public void testExcludedNodes() throws IOException {
- Configuration conf = new HdfsConfiguration();
- MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
FileSystem fs = cluster.getFileSystem();
Path filePath = new Path("/testExcludedNodes");
@@ -67,17 +84,16 @@ public class TestDFSClientExcludedNodes
}
}
- @Test(timeout=10000)
+ @Test(timeout=60000)
public void testExcludedNodesForgiveness() throws IOException {
- Configuration conf = new HdfsConfiguration();
- // Forgive nodes in under 1s for this test case.
+ // Forgive nodes in under 2.5s for this test case.
conf.setLong(
DFSConfigKeys.DFS_CLIENT_WRITE_EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL,
- 1000);
+ 2500);
// We'll be using a 512 bytes block size just for tests
// so making sure the checksum bytes too match it.
conf.setInt("io.bytes.per.checksum", 512);
- MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
List<DataNodeProperties> props = cluster.dataNodes;
FileSystem fs = cluster.getFileSystem();
Path filePath = new Path("/testForgivingExcludedNodes");
@@ -112,11 +128,11 @@ public class TestDFSClientExcludedNodes
Assert.assertEquals(true, cluster.restartDataNode(two, true));
cluster.waitActive();
- // Sleep for 2s, to let the excluded nodes be expired
+ // Sleep for 5s, to let the excluded nodes be expired
// from the excludes list (i.e. forgiven after the configured wait period).
- // [Sleeping just in case the restart of the DNs completed < 2s cause
+ // [Sleeping just in case the restart of the DNs completed < 5s cause
// otherwise, we'll end up quickly excluding those again.]
- ThreadUtil.sleepAtLeastIgnoreInterrupts(2000);
+ ThreadUtil.sleepAtLeastIgnoreInterrupts(5000);
// Terminate the last good DN, to assert that there's no
// single-DN-available scenario, caused by not forgiving the other