You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by to...@apache.org on 2014/07/23 03:47:36 UTC
svn commit: r1612742 [4/4] - in
/hadoop/common/branches/MR-2841/hadoop-hdfs-project:
hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/
hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/ hadoop-hdfs/
hadoop-hdfs/src/contrib/bkjo...
Modified: hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java?rev=1612742&r1=1612741&r2=1612742&view=diff
==============================================================================
--- hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java (original)
+++ hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java Wed Jul 23 01:47:28 2014
@@ -92,7 +92,7 @@ public class TestWebHdfsDataLocality {
//The chosen datanode must be the same as the client address
final DatanodeInfo chosen = NamenodeWebHdfsMethods.chooseDatanode(
- namenode, f, PutOpParam.Op.CREATE, -1L, blocksize);
+ namenode, f, PutOpParam.Op.CREATE, -1L, blocksize, null);
Assert.assertEquals(ipAddr, chosen.getIpAddr());
}
}
@@ -117,23 +117,104 @@ public class TestWebHdfsDataLocality {
{ //test GETFILECHECKSUM
final DatanodeInfo chosen = NamenodeWebHdfsMethods.chooseDatanode(
- namenode, f, GetOpParam.Op.GETFILECHECKSUM, -1L, blocksize);
+ namenode, f, GetOpParam.Op.GETFILECHECKSUM, -1L, blocksize, null);
Assert.assertEquals(expected, chosen);
}
{ //test OPEN
final DatanodeInfo chosen = NamenodeWebHdfsMethods.chooseDatanode(
- namenode, f, GetOpParam.Op.OPEN, 0, blocksize);
+ namenode, f, GetOpParam.Op.OPEN, 0, blocksize, null);
Assert.assertEquals(expected, chosen);
}
{ //test APPEND
final DatanodeInfo chosen = NamenodeWebHdfsMethods.chooseDatanode(
- namenode, f, PostOpParam.Op.APPEND, -1L, blocksize);
+ namenode, f, PostOpParam.Op.APPEND, -1L, blocksize, null);
Assert.assertEquals(expected, chosen);
}
} finally {
cluster.shutdown();
}
}
+
+ @Test
+ public void testExcludeDataNodes() throws Exception {
+ final Configuration conf = WebHdfsTestUtil.createConf();
+ final String[] racks = {RACK0, RACK0, RACK1, RACK1, RACK2, RACK2};
+ final String[] hosts = {"DataNode1", "DataNode2", "DataNode3","DataNode4","DataNode5","DataNode6"};
+ final int nDataNodes = hosts.length;
+ LOG.info("nDataNodes=" + nDataNodes + ", racks=" + Arrays.asList(racks)
+ + ", hosts=" + Arrays.asList(hosts));
+
+ final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+ .hosts(hosts).numDataNodes(nDataNodes).racks(racks).build();
+
+ try {
+ cluster.waitActive();
+
+ final DistributedFileSystem dfs = cluster.getFileSystem();
+ final NameNode namenode = cluster.getNameNode();
+ final DatanodeManager dm = namenode.getNamesystem().getBlockManager(
+ ).getDatanodeManager();
+ LOG.info("dm=" + dm);
+
+ final long blocksize = DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT;
+ final String f = "/foo";
+
+ //create a file with three replica.
+ final Path p = new Path(f);
+ final FSDataOutputStream out = dfs.create(p, (short)3);
+ out.write(1);
+ out.close();
+
+ //get replica location.
+ final LocatedBlocks locatedblocks = NameNodeAdapter.getBlockLocations(
+ namenode, f, 0, 1);
+ final List<LocatedBlock> lb = locatedblocks.getLocatedBlocks();
+ Assert.assertEquals(1, lb.size());
+ final DatanodeInfo[] locations = lb.get(0).getLocations();
+ Assert.assertEquals(3, locations.length);
+
+
+ //For GETFILECHECKSUM, OPEN and APPEND,
+ //the chosen datanode must be different with exclude nodes.
+
+ StringBuffer sb = new StringBuffer();
+ for (int i = 0; i < 2; i++) {
+ sb.append(locations[i].getXferAddr());
+ { // test GETFILECHECKSUM
+ final DatanodeInfo chosen = NamenodeWebHdfsMethods.chooseDatanode(
+ namenode, f, GetOpParam.Op.GETFILECHECKSUM, -1L, blocksize,
+ sb.toString());
+ for (int j = 0; j <= i; j++) {
+ Assert.assertNotEquals(locations[j].getHostName(),
+ chosen.getHostName());
+ }
+ }
+
+ { // test OPEN
+ final DatanodeInfo chosen = NamenodeWebHdfsMethods.chooseDatanode(
+ namenode, f, GetOpParam.Op.OPEN, 0, blocksize, sb.toString());
+ for (int j = 0; j <= i; j++) {
+ Assert.assertNotEquals(locations[j].getHostName(),
+ chosen.getHostName());
+ }
+ }
+
+ { // test APPEND
+ final DatanodeInfo chosen = NamenodeWebHdfsMethods
+ .chooseDatanode(namenode, f, PostOpParam.Op.APPEND, -1L,
+ blocksize, sb.toString());
+ for (int j = 0; j <= i; j++) {
+ Assert.assertNotEquals(locations[j].getHostName(),
+ chosen.getHostName());
+ }
+ }
+
+ sb.append(",");
+ }
+ } finally {
+ cluster.shutdown();
+ }
+ }
}
\ No newline at end of file
Modified: hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java?rev=1612742&r1=1612741&r2=1612742&view=diff
==============================================================================
--- hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java (original)
+++ hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java Wed Jul 23 01:47:28 2014
@@ -21,6 +21,7 @@ import java.io.ByteArrayOutputStream;
import java.io.PrintStream;
import com.google.common.base.Charsets;
+
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
@@ -46,6 +47,7 @@ public class TestDFSAdminWithHA {
private PrintStream originErr;
private static final String NSID = "ns1";
+ private static String newLine = System.getProperty("line.separator");
private void assertOutputMatches(String string) {
String errOutput = new String(out.toByteArray(), Charsets.UTF_8);
@@ -99,6 +101,14 @@ public class TestDFSAdminWithHA {
System.err.flush();
System.setOut(originOut);
System.setErr(originErr);
+ if (admin != null) {
+ admin.close();
+ }
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ out.reset();
+ err.reset();
}
@Test(timeout = 30000)
@@ -108,25 +118,25 @@ public class TestDFSAdminWithHA {
int exitCode = admin.run(new String[] {"-safemode", "enter"});
assertEquals(err.toString().trim(), 0, exitCode);
String message = "Safe mode is ON in.*";
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
// Get safemode
exitCode = admin.run(new String[] {"-safemode", "get"});
assertEquals(err.toString().trim(), 0, exitCode);
message = "Safe mode is ON in.*";
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
// Leave safemode
exitCode = admin.run(new String[] {"-safemode", "leave"});
assertEquals(err.toString().trim(), 0, exitCode);
message = "Safe mode is OFF in.*";
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
// Get safemode
exitCode = admin.run(new String[] {"-safemode", "get"});
assertEquals(err.toString().trim(), 0, exitCode);
message = "Safe mode is OFF in.*";
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
}
@Test (timeout = 30000)
@@ -136,12 +146,12 @@ public class TestDFSAdminWithHA {
int exitCode = admin.run(new String[] {"-safemode", "enter"});
assertEquals(err.toString().trim(), 0, exitCode);
String message = "Safe mode is ON in.*";
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
exitCode = admin.run(new String[] {"-saveNamespace"});
assertEquals(err.toString().trim(), 0, exitCode);
message = "Save namespace successful for.*";
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
}
@Test (timeout = 30000)
@@ -151,17 +161,17 @@ public class TestDFSAdminWithHA {
assertEquals(err.toString().trim(), 0, exitCode);
String message = "restoreFailedStorage is set to false for.*";
// Default is false
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
exitCode = admin.run(new String[] {"-restoreFailedStorage", "true"});
assertEquals(err.toString().trim(), 0, exitCode);
message = "restoreFailedStorage is set to true for.*";
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
exitCode = admin.run(new String[] {"-restoreFailedStorage", "false"});
assertEquals(err.toString().trim(), 0, exitCode);
message = "restoreFailedStorage is set to false for.*";
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
}
@Test (timeout = 30000)
@@ -170,7 +180,7 @@ public class TestDFSAdminWithHA {
int exitCode = admin.run(new String[] {"-refreshNodes"});
assertEquals(err.toString().trim(), 0, exitCode);
String message = "Refresh nodes successful for.*";
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
}
@Test (timeout = 30000)
@@ -179,7 +189,7 @@ public class TestDFSAdminWithHA {
int exitCode = admin.run(new String[] {"-setBalancerBandwidth", "10"});
assertEquals(err.toString().trim(), 0, exitCode);
String message = "Balancer bandwidth is set to 10 for.*";
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
}
@Test (timeout = 30000)
@@ -189,7 +199,7 @@ public class TestDFSAdminWithHA {
assertEquals(err.toString().trim(), 0, exitCode);
String message = "Created metasave file dfs.meta in the log directory"
+ " of namenode.*";
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
}
@Test (timeout = 30000)
@@ -198,7 +208,7 @@ public class TestDFSAdminWithHA {
int exitCode = admin.run(new String[] {"-refreshServiceAcl"});
assertEquals(err.toString().trim(), 0, exitCode);
String message = "Refresh service acl successful for.*";
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
}
@Test (timeout = 30000)
@@ -207,7 +217,7 @@ public class TestDFSAdminWithHA {
int exitCode = admin.run(new String[] {"-refreshUserToGroupsMappings"});
assertEquals(err.toString().trim(), 0, exitCode);
String message = "Refresh user to groups mapping successful for.*";
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
}
@Test (timeout = 30000)
@@ -217,7 +227,7 @@ public class TestDFSAdminWithHA {
new String[] {"-refreshSuperUserGroupsConfiguration"});
assertEquals(err.toString().trim(), 0, exitCode);
String message = "Refresh super user groups configuration successful for.*";
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
}
@Test (timeout = 30000)
@@ -226,6 +236,6 @@ public class TestDFSAdminWithHA {
int exitCode = admin.run(new String[] {"-refreshCallQueue"});
assertEquals(err.toString().trim(), 0, exitCode);
String message = "Refresh call queue successful for.*";
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
}
}
Modified: hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java?rev=1612742&r1=1612741&r2=1612742&view=diff
==============================================================================
--- hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java (original)
+++ hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java Wed Jul 23 01:47:28 2014
@@ -60,7 +60,14 @@ public class TestNetworkTopology {
DFSTestUtil.getDatanodeDescriptor("10.10.10.10", "/d3/r1"),
DFSTestUtil.getDatanodeDescriptor("11.11.11.11", "/d3/r1"),
DFSTestUtil.getDatanodeDescriptor("12.12.12.12", "/d3/r2"),
- DFSTestUtil.getDatanodeDescriptor("13.13.13.13", "/d3/r2")
+ DFSTestUtil.getDatanodeDescriptor("13.13.13.13", "/d3/r2"),
+ DFSTestUtil.getDatanodeDescriptor("14.14.14.14", "/d4/r1"),
+ DFSTestUtil.getDatanodeDescriptor("15.15.15.15", "/d4/r1"),
+ DFSTestUtil.getDatanodeDescriptor("16.16.16.16", "/d4/r1"),
+ DFSTestUtil.getDatanodeDescriptor("17.17.17.17", "/d4/r1"),
+ DFSTestUtil.getDatanodeDescriptor("18.18.18.18", "/d4/r1"),
+ DFSTestUtil.getDatanodeDescriptor("19.19.19.19", "/d4/r1"),
+ DFSTestUtil.getDatanodeDescriptor("20.20.20.20", "/d4/r1"),
};
for (int i = 0; i < dataNodes.length; i++) {
cluster.add(dataNodes[i]);
@@ -107,7 +114,7 @@ public class TestNetworkTopology {
@Test
public void testRacks() throws Exception {
- assertEquals(cluster.getNumOfRacks(), 5);
+ assertEquals(cluster.getNumOfRacks(), 6);
assertTrue(cluster.isOnSameRack(dataNodes[0], dataNodes[1]));
assertFalse(cluster.isOnSameRack(dataNodes[1], dataNodes[2]));
assertTrue(cluster.isOnSameRack(dataNodes[2], dataNodes[3]));
@@ -133,7 +140,7 @@ public class TestNetworkTopology {
testNodes[1] = dataNodes[2];
testNodes[2] = dataNodes[0];
cluster.sortByDistance(dataNodes[0], testNodes,
- testNodes.length, 0xDEADBEEF);
+ testNodes.length, 0xDEADBEEF, false);
assertTrue(testNodes[0] == dataNodes[0]);
assertTrue(testNodes[1] == dataNodes[1]);
assertTrue(testNodes[2] == dataNodes[2]);
@@ -146,7 +153,7 @@ public class TestNetworkTopology {
dtestNodes[3] = dataNodes[9];
dtestNodes[4] = dataNodes[10];
cluster.sortByDistance(dataNodes[8], dtestNodes,
- dtestNodes.length - 2, 0xDEADBEEF);
+ dtestNodes.length - 2, 0xDEADBEEF, false);
assertTrue(dtestNodes[0] == dataNodes[8]);
assertTrue(dtestNodes[1] == dataNodes[11]);
assertTrue(dtestNodes[2] == dataNodes[12]);
@@ -158,7 +165,7 @@ public class TestNetworkTopology {
testNodes[1] = dataNodes[3];
testNodes[2] = dataNodes[0];
cluster.sortByDistance(dataNodes[0], testNodes,
- testNodes.length, 0xDEADBEEF);
+ testNodes.length, 0xDEADBEEF, false);
assertTrue(testNodes[0] == dataNodes[0]);
assertTrue(testNodes[1] == dataNodes[1]);
assertTrue(testNodes[2] == dataNodes[3]);
@@ -168,7 +175,7 @@ public class TestNetworkTopology {
testNodes[1] = dataNodes[3];
testNodes[2] = dataNodes[1];
cluster.sortByDistance(dataNodes[0], testNodes,
- testNodes.length, 0xDEADBEEF);
+ testNodes.length, 0xDEADBEEF, false);
assertTrue(testNodes[0] == dataNodes[1]);
assertTrue(testNodes[1] == dataNodes[3]);
assertTrue(testNodes[2] == dataNodes[5]);
@@ -178,7 +185,7 @@ public class TestNetworkTopology {
testNodes[1] = dataNodes[5];
testNodes[2] = dataNodes[3];
cluster.sortByDistance(dataNodes[0], testNodes,
- testNodes.length, 0xDEADBEEF);
+ testNodes.length, 0xDEADBEEF, false);
assertTrue(testNodes[0] == dataNodes[1]);
assertTrue(testNodes[1] == dataNodes[3]);
assertTrue(testNodes[2] == dataNodes[5]);
@@ -188,7 +195,7 @@ public class TestNetworkTopology {
testNodes[1] = dataNodes[5];
testNodes[2] = dataNodes[3];
cluster.sortByDistance(dataNodes[0], testNodes,
- testNodes.length, 0xDEAD);
+ testNodes.length, 0xDEAD, false);
// sortByDistance does not take the "data center" layer into consideration
// and it doesn't sort by getDistance, so 1, 5, 3 is also valid here
assertTrue(testNodes[0] == dataNodes[1]);
@@ -204,7 +211,27 @@ public class TestNetworkTopology {
testNodes[1] = dataNodes[6];
testNodes[2] = dataNodes[7];
cluster.sortByDistance(dataNodes[i], testNodes,
- testNodes.length, 0xBEADED+i);
+ testNodes.length, 0xBEADED+i, false);
+ if (first == null) {
+ first = testNodes[0];
+ } else {
+ if (first != testNodes[0]) {
+ foundRandom = true;
+ break;
+ }
+ }
+ }
+ assertTrue("Expected to find a different first location", foundRandom);
+ // Array of rack local nodes with randomizeBlockLocationsPerBlock set to
+ // true
+ // Expect random order of block locations for same block
+ first = null;
+ for (int i = 1; i <= 4; i++) {
+ testNodes[0] = dataNodes[13];
+ testNodes[1] = dataNodes[14];
+ testNodes[2] = dataNodes[15];
+ cluster.sortByDistance(dataNodes[15 + i], testNodes, testNodes.length,
+ 0xBEADED, true);
if (first == null) {
first = testNodes[0];
} else {
Modified: hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java?rev=1612742&r1=1612741&r2=1612742&view=diff
==============================================================================
--- hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java (original)
+++ hadoop/common/branches/MR-2841/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java Wed Jul 23 01:47:28 2014
@@ -151,8 +151,10 @@ public class TestRefreshUserMappings {
final String [] GROUP_NAMES2 = new String [] {"gr3" , "gr4"};
//keys in conf
- String userKeyGroups = DefaultImpersonationProvider.getProxySuperuserGroupConfKey(SUPER_USER);
- String userKeyHosts = DefaultImpersonationProvider.getProxySuperuserIpConfKey (SUPER_USER);
+ String userKeyGroups = DefaultImpersonationProvider.getTestProvider().
+ getProxySuperuserGroupConfKey(SUPER_USER);
+ String userKeyHosts = DefaultImpersonationProvider.getTestProvider().
+ getProxySuperuserIpConfKey (SUPER_USER);
config.set(userKeyGroups, "gr3,gr4,gr5"); // superuser can proxy for this group
config.set(userKeyHosts,"127.0.0.1");