You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by su...@apache.org on 2014/07/25 22:33:22 UTC
svn commit: r1613514 [6/6] - in
/hadoop/common/branches/YARN-1051/hadoop-hdfs-project:
hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/
hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/
hadoop-hdfs-nfs/src/test/java/org/apac...
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java?rev=1613514&r1=1613513&r2=1613514&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java Fri Jul 25 20:33:09 2014
@@ -39,14 +39,18 @@ import org.apache.hadoop.hdfs.DFSConfigK
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.TestDFSClientRetries;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
-import org.apache.hadoop.hdfs.TestDFSClientRetries;
+import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
+import org.apache.hadoop.ipc.RetriableException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.Assert;
import org.junit.Test;
+import org.mockito.internal.util.reflection.Whitebox;
/** Test WebHDFS */
public class TestWebHDFS {
@@ -445,4 +449,37 @@ public class TestWebHDFS {
}
}
}
+
+ /**
+ * Make sure a RetriableException is thrown when rpcServer is null in
+ * NamenodeWebHdfsMethods.
+ */
+ @Test
+ public void testRaceWhileNNStartup() throws Exception {
+ MiniDFSCluster cluster = null;
+ final Configuration conf = WebHdfsTestUtil.createConf();
+ try {
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
+ cluster.waitActive();
+ final NameNode namenode = cluster.getNameNode();
+ final NamenodeProtocols rpcServer = namenode.getRpcServer();
+ Whitebox.setInternalState(namenode, "rpcServer", null);
+
+ final Path foo = new Path("/foo");
+ final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
+ WebHdfsFileSystem.SCHEME);
+ try {
+ webHdfs.mkdirs(foo);
+ fail("Expected RetriableException");
+ } catch (RetriableException e) {
+ GenericTestUtils.assertExceptionContains("Namenode is in startup mode",
+ e);
+ }
+ Whitebox.setInternalState(namenode, "rpcServer", rpcServer);
+ } finally {
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+ }
}
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java?rev=1613514&r1=1613513&r2=1613514&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java Fri Jul 25 20:33:09 2014
@@ -18,6 +18,15 @@
package org.apache.hadoop.hdfs.web;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.verify;
+
+import java.io.IOException;
+import java.net.URI;
+import java.util.HashMap;
+import java.util.Map;
+
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
@@ -29,18 +38,14 @@ import org.apache.hadoop.hdfs.DFSTestUti
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
+import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.token.Token;
import org.junit.Assert;
import org.junit.Test;
-
-import java.io.IOException;
-import java.net.URI;
-
-import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
-import static org.mockito.Mockito.spy;
-import static org.mockito.Mockito.verify;
+import org.mockito.internal.util.reflection.Whitebox;
public class TestWebHDFSForHA {
private static final String LOGICAL_NAME = "minidfs";
@@ -182,4 +187,61 @@ public class TestWebHDFSForHA {
}
}
}
+
+ /**
+ * Make sure the WebHdfsFileSystem will retry based on RetriableException when
+ * rpcServer is null in NamenodeWebHdfsMethods while NameNode starts up.
+ */
+ @Test (timeout=120000)
+ public void testRetryWhileNNStartup() throws Exception {
+ final Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
+ MiniDFSCluster cluster = null;
+ final Map<String, Boolean> resultMap = new HashMap<String, Boolean>();
+
+ try {
+ cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo)
+ .numDataNodes(0).build();
+ HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME);
+ cluster.waitActive();
+ cluster.transitionToActive(0);
+
+ final NameNode namenode = cluster.getNameNode(0);
+ final NamenodeProtocols rpcServer = namenode.getRpcServer();
+ Whitebox.setInternalState(namenode, "rpcServer", null);
+
+ new Thread() {
+ @Override
+ public void run() {
+ boolean result = false;
+ FileSystem fs = null;
+ try {
+ fs = FileSystem.get(WEBHDFS_URI, conf);
+ final Path dir = new Path("/test");
+ result = fs.mkdirs(dir);
+ } catch (IOException e) {
+ result = false;
+ } finally {
+ IOUtils.cleanup(null, fs);
+ }
+ synchronized (TestWebHDFSForHA.this) {
+ resultMap.put("mkdirs", result);
+ TestWebHDFSForHA.this.notifyAll();
+ }
+ }
+ }.start();
+
+ Thread.sleep(1000);
+ Whitebox.setInternalState(namenode, "rpcServer", rpcServer);
+ synchronized (this) {
+ while (!resultMap.containsKey("mkdirs")) {
+ this.wait();
+ }
+ Assert.assertTrue(resultMap.get("mkdirs"));
+ }
+ } finally {
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+ }
}
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java?rev=1613514&r1=1613513&r2=1613514&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java Fri Jul 25 20:33:09 2014
@@ -355,12 +355,6 @@ public class TestParam {
public void testXAttrNameParam() {
final XAttrNameParam p = new XAttrNameParam("user.a1");
Assert.assertEquals(p.getXAttrName(), "user.a1");
- try {
- new XAttrNameParam("a1");
- Assert.fail();
- } catch (IllegalArgumentException e) {
- LOG.info("EXPECTED: " + e);
- }
}
@Test
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java?rev=1613514&r1=1613513&r2=1613514&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java Fri Jul 25 20:33:09 2014
@@ -60,7 +60,14 @@ public class TestNetworkTopology {
DFSTestUtil.getDatanodeDescriptor("10.10.10.10", "/d3/r1"),
DFSTestUtil.getDatanodeDescriptor("11.11.11.11", "/d3/r1"),
DFSTestUtil.getDatanodeDescriptor("12.12.12.12", "/d3/r2"),
- DFSTestUtil.getDatanodeDescriptor("13.13.13.13", "/d3/r2")
+ DFSTestUtil.getDatanodeDescriptor("13.13.13.13", "/d3/r2"),
+ DFSTestUtil.getDatanodeDescriptor("14.14.14.14", "/d4/r1"),
+ DFSTestUtil.getDatanodeDescriptor("15.15.15.15", "/d4/r1"),
+ DFSTestUtil.getDatanodeDescriptor("16.16.16.16", "/d4/r1"),
+ DFSTestUtil.getDatanodeDescriptor("17.17.17.17", "/d4/r1"),
+ DFSTestUtil.getDatanodeDescriptor("18.18.18.18", "/d4/r1"),
+ DFSTestUtil.getDatanodeDescriptor("19.19.19.19", "/d4/r1"),
+ DFSTestUtil.getDatanodeDescriptor("20.20.20.20", "/d4/r1"),
};
for (int i = 0; i < dataNodes.length; i++) {
cluster.add(dataNodes[i]);
@@ -107,7 +114,7 @@ public class TestNetworkTopology {
@Test
public void testRacks() throws Exception {
- assertEquals(cluster.getNumOfRacks(), 5);
+ assertEquals(cluster.getNumOfRacks(), 6);
assertTrue(cluster.isOnSameRack(dataNodes[0], dataNodes[1]));
assertFalse(cluster.isOnSameRack(dataNodes[1], dataNodes[2]));
assertTrue(cluster.isOnSameRack(dataNodes[2], dataNodes[3]));
@@ -133,7 +140,7 @@ public class TestNetworkTopology {
testNodes[1] = dataNodes[2];
testNodes[2] = dataNodes[0];
cluster.sortByDistance(dataNodes[0], testNodes,
- testNodes.length, 0xDEADBEEF);
+ testNodes.length, 0xDEADBEEF, false);
assertTrue(testNodes[0] == dataNodes[0]);
assertTrue(testNodes[1] == dataNodes[1]);
assertTrue(testNodes[2] == dataNodes[2]);
@@ -146,7 +153,7 @@ public class TestNetworkTopology {
dtestNodes[3] = dataNodes[9];
dtestNodes[4] = dataNodes[10];
cluster.sortByDistance(dataNodes[8], dtestNodes,
- dtestNodes.length - 2, 0xDEADBEEF);
+ dtestNodes.length - 2, 0xDEADBEEF, false);
assertTrue(dtestNodes[0] == dataNodes[8]);
assertTrue(dtestNodes[1] == dataNodes[11]);
assertTrue(dtestNodes[2] == dataNodes[12]);
@@ -158,7 +165,7 @@ public class TestNetworkTopology {
testNodes[1] = dataNodes[3];
testNodes[2] = dataNodes[0];
cluster.sortByDistance(dataNodes[0], testNodes,
- testNodes.length, 0xDEADBEEF);
+ testNodes.length, 0xDEADBEEF, false);
assertTrue(testNodes[0] == dataNodes[0]);
assertTrue(testNodes[1] == dataNodes[1]);
assertTrue(testNodes[2] == dataNodes[3]);
@@ -168,7 +175,7 @@ public class TestNetworkTopology {
testNodes[1] = dataNodes[3];
testNodes[2] = dataNodes[1];
cluster.sortByDistance(dataNodes[0], testNodes,
- testNodes.length, 0xDEADBEEF);
+ testNodes.length, 0xDEADBEEF, false);
assertTrue(testNodes[0] == dataNodes[1]);
assertTrue(testNodes[1] == dataNodes[3]);
assertTrue(testNodes[2] == dataNodes[5]);
@@ -178,7 +185,7 @@ public class TestNetworkTopology {
testNodes[1] = dataNodes[5];
testNodes[2] = dataNodes[3];
cluster.sortByDistance(dataNodes[0], testNodes,
- testNodes.length, 0xDEADBEEF);
+ testNodes.length, 0xDEADBEEF, false);
assertTrue(testNodes[0] == dataNodes[1]);
assertTrue(testNodes[1] == dataNodes[3]);
assertTrue(testNodes[2] == dataNodes[5]);
@@ -188,7 +195,7 @@ public class TestNetworkTopology {
testNodes[1] = dataNodes[5];
testNodes[2] = dataNodes[3];
cluster.sortByDistance(dataNodes[0], testNodes,
- testNodes.length, 0xDEAD);
+ testNodes.length, 0xDEAD, false);
// sortByDistance does not take the "data center" layer into consideration
// and it doesn't sort by getDistance, so 1, 5, 3 is also valid here
assertTrue(testNodes[0] == dataNodes[1]);
@@ -204,7 +211,27 @@ public class TestNetworkTopology {
testNodes[1] = dataNodes[6];
testNodes[2] = dataNodes[7];
cluster.sortByDistance(dataNodes[i], testNodes,
- testNodes.length, 0xBEADED+i);
+ testNodes.length, 0xBEADED+i, false);
+ if (first == null) {
+ first = testNodes[0];
+ } else {
+ if (first != testNodes[0]) {
+ foundRandom = true;
+ break;
+ }
+ }
+ }
+ assertTrue("Expected to find a different first location", foundRandom);
+ // Array of rack local nodes with randomizeBlockLocationsPerBlock set to
+ // true
+ // Expect random order of block locations for same block
+ first = null;
+ for (int i = 1; i <= 4; i++) {
+ testNodes[0] = dataNodes[13];
+ testNodes[1] = dataNodes[14];
+ testNodes[2] = dataNodes[15];
+ cluster.sortByDistance(dataNodes[15 + i], testNodes, testNodes.length,
+ 0xBEADED, true);
if (first == null) {
first = testNodes[0];
} else {
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java?rev=1613514&r1=1613513&r2=1613514&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java Fri Jul 25 20:33:09 2014
@@ -151,8 +151,10 @@ public class TestRefreshUserMappings {
final String [] GROUP_NAMES2 = new String [] {"gr3" , "gr4"};
//keys in conf
- String userKeyGroups = DefaultImpersonationProvider.getProxySuperuserGroupConfKey(SUPER_USER);
- String userKeyHosts = DefaultImpersonationProvider.getProxySuperuserIpConfKey (SUPER_USER);
+ String userKeyGroups = DefaultImpersonationProvider.getTestProvider().
+ getProxySuperuserGroupConfKey(SUPER_USER);
+ String userKeyHosts = DefaultImpersonationProvider.getTestProvider().
+ getProxySuperuserIpConfKey (SUPER_USER);
config.set(userKeyGroups, "gr3,gr4,gr5"); // superuser can proxy for this group
config.set(userKeyHosts,"127.0.0.1");
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored?rev=1613514&r1=1613513&r2=1613514&view=diff
==============================================================================
Binary files - no diff available.
Modified: hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml?rev=1613514&r1=1613513&r2=1613514&view=diff
==============================================================================
--- hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml (original)
+++ hadoop/common/branches/YARN-1051/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml Fri Jul 25 20:33:09 2014
@@ -986,6 +986,8 @@
<NAMESPACE>USER</NAMESPACE>
<NAME>a2</NAME>
</XATTR>
+ <RPC_CLIENTID>e03f4a52-3d85-4e05-8942-286185e639bd</RPC_CLIENTID>
+ <RPC_CALLID>82</RPC_CALLID>
</DATA>
</RECORD>
<RECORD>