You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by dd...@apache.org on 2013/04/30 23:46:02 UTC
svn commit: r1477849 - in
/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: ./
src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/
src/test/java/org/apache/hadoop/hdfs/server/namenode/
Author: ddas
Date: Tue Apr 30 21:46:02 2013
New Revision: 1477849
URL: http://svn.apache.org/r1477849
Log:
HDFS-4778. Fixes some issues that the first patch on HDFS-2576 missed. Contributed by Devaraj Das.
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFavoredNodesEndToEnd.java
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1477849&r1=1477848&r2=1477849&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Tue Apr 30 21:46:02 2013
@@ -271,6 +271,9 @@ Trunk (Unreleased)
HDFS-4687. TestDelegationTokenForProxyUser#testWebHdfsDoAs is flaky with
JDK7. (Andrew Wang via atm)
+ HDFS-4778. Fixes some issues that the first patch on HDFS-2576 missed.
+ (ddas)
+
BREAKDOWN OF HADOOP-8562 and HDFS-3602 SUBTASKS AND RELATED JIRAS
HDFS-4145. Merge hdfs cmd line scripts from branch-1-win. (David Lao,
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java?rev=1477849&r1=1477848&r2=1477849&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java Tue Apr 30 21:46:02 2013
@@ -170,7 +170,8 @@ public class BlockPlacementPolicyDefault
results.add(remainingTargets[i]);
}
}
- return results.toArray(new DatanodeDescriptor[results.size()]);
+ return getPipeline(writer,
+ results.toArray(new DatanodeDescriptor[results.size()]));
} catch (NotEnoughReplicasException nr) {
// Fall back to regular block placement disregarding favored nodes hint
return chooseTarget(src, numOfReplicas, writer,
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java?rev=1477849&r1=1477848&r2=1477849&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java Tue Apr 30 21:46:02 2013
@@ -339,7 +339,6 @@ public class DatanodeManager {
*
* @param address hostaddress:transfer address
* @return the best match for the given datanode
- * @throws IOException when no datanode is found for given address
*/
DatanodeDescriptor getDatanodeDescriptor(String address) {
DatanodeDescriptor node = null;
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFavoredNodesEndToEnd.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFavoredNodesEndToEnd.java?rev=1477849&r1=1477848&r2=1477849&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFavoredNodesEndToEnd.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFavoredNodesEndToEnd.java Tue Apr 30 21:46:02 2013
@@ -22,6 +22,7 @@ import static org.junit.Assert.*;
import java.util.ArrayList;
import java.util.Random;
+import java.io.IOException;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.UnknownHostException;
@@ -32,6 +33,7 @@ import org.apache.hadoop.fs.FSDataOutput
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@@ -67,7 +69,7 @@ public class TestFavoredNodesEndToEnd {
}
}
- @Test
+ @Test(timeout=180000)
public void testFavoredNodesEndToEnd() throws Exception {
//create 10 files with random preferred nodes
for (int i = 0; i < NUM_FILES; i++) {
@@ -80,11 +82,7 @@ public class TestFavoredNodesEndToEnd {
4096, (short)3, (long)4096, null, datanode);
out.write(SOME_BYTES);
out.close();
- BlockLocation[] locations =
- dfs.getClient().getBlockLocations(p.toUri().getPath(), 0,
- Long.MAX_VALUE);
- //make sure we have exactly one block location, and three hosts
- assertTrue(locations.length == 1 && locations[0].getHosts().length == 3);
+ BlockLocation[] locations = getBlockLocations(p);
//verify the files got created in the right nodes
for (BlockLocation loc : locations) {
String[] hosts = loc.getNames();
@@ -94,7 +92,7 @@ public class TestFavoredNodesEndToEnd {
}
}
- @Test
+ @Test(timeout=180000)
public void testWhenFavoredNodesNotPresent() throws Exception {
//when we ask for favored nodes but the nodes are not there, we should
//get some other nodes. In other words, the write to hdfs should not fail
@@ -110,13 +108,10 @@ public class TestFavoredNodesEndToEnd {
4096, (short)3, (long)4096, null, arbitraryAddrs);
out.write(SOME_BYTES);
out.close();
- BlockLocation[] locations =
- dfs.getClient().getBlockLocations(p.toUri().getPath(), 0,
- Long.MAX_VALUE);
- assertTrue(locations.length == 1 && locations[0].getHosts().length == 3);
+ getBlockLocations(p);
}
- @Test
+ @Test(timeout=180000)
public void testWhenSomeNodesAreNotGood() throws Exception {
//make some datanode not "good" so that even if the client prefers it,
//the namenode would not give it as a replica to write to
@@ -136,12 +131,9 @@ public class TestFavoredNodesEndToEnd {
4096, (short)3, (long)4096, null, addrs);
out.write(SOME_BYTES);
out.close();
- BlockLocation[] locations =
- dfs.getClient().getBlockLocations(p.toUri().getPath(), 0,
- Long.MAX_VALUE);
//reset the state
d.stopDecommission();
- assertTrue(locations.length == 1 && locations[0].getHosts().length == 3);
+ BlockLocation[] locations = getBlockLocations(p);
//also make sure that the datanode[0] is not in the list of hosts
String datanode0 =
datanodes.get(0).getXferAddress().getAddress().getHostAddress()
@@ -153,6 +145,14 @@ public class TestFavoredNodesEndToEnd {
}
}
+ private BlockLocation[] getBlockLocations(Path p) throws Exception {
+ DFSTestUtil.waitReplication(dfs, p, (short)3);
+ BlockLocation[] locations = dfs.getClient().getBlockLocations(
+ p.toUri().getPath(), 0, Long.MAX_VALUE);
+ assertTrue(locations.length == 1 && locations[0].getHosts().length == 3);
+ return locations;
+ }
+
private String[] getStringForInetSocketAddrs(InetSocketAddress[] datanode) {
String strs[] = new String[datanode.length];
for (int i = 0; i < datanode.length; i++) {