You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by sz...@apache.org on 2013/09/15 06:32:41 UTC
svn commit: r1523402 [1/2] - in
/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project:
hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/
hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/ hadoop-hdfs/
hadoop-hdfs/src/main/java/ hado...
Author: szetszwo
Date: Sun Sep 15 04:32:39 2013
New Revision: 1523402
URL: http://svn.apache.org/r1523402
Log:
Merge r1523109 through r1523401 from trunk.
Modified:
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/ (props changed)
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestOutOfOrderWrite.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ (props changed)
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/native/ (props changed)
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/ (props changed)
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/ (props changed)
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/ (props changed)
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/ (props changed)
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java
hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java
Propchange: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1523109-1523401
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java?rev=1523402&r1=1523401&r2=1523402&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java Sun Sep 15 04:32:39 2013
@@ -39,6 +39,12 @@ import org.jboss.netty.channel.Channel;
public class Nfs3Utils {
public final static String INODEID_PATH_PREFIX = "/.reserved/.inodes/";
+
+ public final static String READ_RPC_START = "READ_RPC_CALL_START____";
+ public final static String READ_RPC_END = "READ_RPC_CALL_END______";
+ public final static String WRITE_RPC_START = "WRITE_RPC_CALL_START____";
+ public final static String WRITE_RPC_END = "WRITE_RPC_CALL_END______";
+
public static String getFileIdPath(FileHandle handle) {
return getFileIdPath(handle.getFileId());
}
@@ -102,7 +108,10 @@ public class Nfs3Utils {
/**
* Send a write response to the netty network socket channel
*/
- public static void writeChannel(Channel channel, XDR out) {
+ public static void writeChannel(Channel channel, XDR out, int xid) {
+ if (RpcProgramNfs3.LOG.isDebugEnabled()) {
+ RpcProgramNfs3.LOG.debug(WRITE_RPC_END + xid);
+ }
ChannelBuffer outBuf = XDR.writeMessageTcp(out, true);
channel.write(outBuf);
}
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java?rev=1523402&r1=1523401&r2=1523402&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java Sun Sep 15 04:32:39 2013
@@ -291,7 +291,7 @@ class OpenFileCtx {
WccData fileWcc = new WccData(latestAttr.getWccAttr(), latestAttr);
WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_IO,
fileWcc, 0, request.getStableHow(), Nfs3Constant.WRITE_COMMIT_VERF);
- Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid));
+ Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid), xid);
} else {
// Handle repeated write requests(same xid or not).
// If already replied, send reply again. If not replied, drop the
@@ -313,7 +313,7 @@ class OpenFileCtx {
WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
fileWcc, request.getCount(), request.getStableHow(),
Nfs3Constant.WRITE_COMMIT_VERF);
- Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid));
+ Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid), xid);
}
updateLastAccessTime();
@@ -367,7 +367,7 @@ class OpenFileCtx {
WccData fileWcc = new WccData(preOpAttr, postOpAttr);
WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
fileWcc, count, stableHow, Nfs3Constant.WRITE_COMMIT_VERF);
- Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid));
+ Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid), xid);
writeCtx.setReplied(true);
}
@@ -392,7 +392,7 @@ class OpenFileCtx {
WccData fileWcc = new WccData(preOpAttr, postOpAttr);
WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
fileWcc, count, stableHow, Nfs3Constant.WRITE_COMMIT_VERF);
- Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid));
+ Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid), xid);
writeCtx.setReplied(true);
}
@@ -418,7 +418,7 @@ class OpenFileCtx {
}
updateLastAccessTime();
- Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid));
+ Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid), xid);
}
}
@@ -707,7 +707,7 @@ class OpenFileCtx {
WccData fileWcc = new WccData(preOpAttr, latestAttr);
WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
fileWcc, count, stableHow, Nfs3Constant.WRITE_COMMIT_VERF);
- Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid));
+ Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid), xid);
}
} catch (IOException e) {
@@ -715,7 +715,7 @@ class OpenFileCtx {
+ offset + " and length " + data.length, e);
if (!writeCtx.getReplied()) {
WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_IO);
- Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid));
+ Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid), xid);
// Keep stream open. Either client retries or SteamMonitor closes it.
}
@@ -753,7 +753,7 @@ class OpenFileCtx {
WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_IO,
fileWcc, 0, writeCtx.getStableHow(), Nfs3Constant.WRITE_COMMIT_VERF);
Nfs3Utils.writeChannel(writeCtx.getChannel(),
- response.send(new XDR(), writeCtx.getXid()));
+ response.send(new XDR(), writeCtx.getXid()), writeCtx.getXid());
}
}
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java?rev=1523402&r1=1523401&r2=1523402&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java Sun Sep 15 04:32:39 2013
@@ -125,7 +125,7 @@ public class RpcProgramNfs3 extends RpcP
public static final FsPermission umask = new FsPermission(
(short) DEFAULT_UMASK);
- private static final Log LOG = LogFactory.getLog(RpcProgramNfs3.class);
+ static final Log LOG = LogFactory.getLog(RpcProgramNfs3.class);
private static final int MAX_READ_TRANSFER_SIZE = 64 * 1024;
private static final int MAX_WRITE_TRANSFER_SIZE = 64 * 1024;
private static final int MAX_READDIR_TRANSFER_SIZE = 64 * 1024;
@@ -1814,9 +1814,19 @@ public class RpcProgramNfs3 extends RpcP
} else if (nfsproc3 == NFSPROC3.READLINK) {
response = readlink(xdr, securityHandler, client);
} else if (nfsproc3 == NFSPROC3.READ) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug(Nfs3Utils.READ_RPC_START + xid);
+ }
response = read(xdr, securityHandler, client);
+ if (LOG.isDebugEnabled() && (nfsproc3 == NFSPROC3.READ)) {
+ LOG.debug(Nfs3Utils.READ_RPC_END + xid);
+ }
} else if (nfsproc3 == NFSPROC3.WRITE) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug(Nfs3Utils.WRITE_RPC_START + xid);
+ }
response = write(xdr, channel, xid, securityHandler, client);
+ // Write end debug trace is in Nfs3Utils.writeChannel
} else if (nfsproc3 == NFSPROC3.CREATE) {
response = create(xdr, securityHandler, client);
} else if (nfsproc3 == NFSPROC3.MKDIR) {
@@ -1853,6 +1863,7 @@ public class RpcProgramNfs3 extends RpcP
if (response != null) {
out = response.send(out, xid);
}
+
return out;
}
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java?rev=1523402&r1=1523401&r2=1523402&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java Sun Sep 15 04:32:39 2013
@@ -118,7 +118,7 @@ public class WriteManager {
byte[] data = request.getData().array();
if (data.length < count) {
WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_INVAL);
- Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid));
+ Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid), xid);
return;
}
@@ -155,7 +155,7 @@ public class WriteManager {
WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_IO,
fileWcc, count, request.getStableHow(),
Nfs3Constant.WRITE_COMMIT_VERF);
- Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid));
+ Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid), xid);
return;
}
@@ -182,10 +182,10 @@ public class WriteManager {
WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
fileWcc, count, request.getStableHow(),
Nfs3Constant.WRITE_COMMIT_VERF);
- Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid));
+ Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid), xid);
} else {
WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_IO);
- Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid));
+ Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid), xid);
}
}
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestOutOfOrderWrite.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestOutOfOrderWrite.java?rev=1523402&r1=1523401&r2=1523402&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestOutOfOrderWrite.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestOutOfOrderWrite.java Sun Sep 15 04:32:39 2013
@@ -174,11 +174,11 @@ public class TestOutOfOrderWrite {
XDR writeReq;
writeReq = write(handle, 0x8000005c, 2000, 1000, data3);
- Nfs3Utils.writeChannel(channel, writeReq);
+ Nfs3Utils.writeChannel(channel, writeReq, 1);
writeReq = write(handle, 0x8000005d, 1000, 1000, data2);
- Nfs3Utils.writeChannel(channel, writeReq);
+ Nfs3Utils.writeChannel(channel, writeReq, 2);
writeReq = write(handle, 0x8000005e, 0, 1000, data1);
- Nfs3Utils.writeChannel(channel, writeReq);
+ Nfs3Utils.writeChannel(channel, writeReq, 3);
// TODO: convert to Junit test, and validate result automatically
}
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1523402&r1=1523401&r2=1523402&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Sun Sep 15 04:32:39 2013
@@ -275,6 +275,10 @@ Release 2.3.0 - UNRELEASED
HDFS-4096. Add snapshot information to namenode WebUI. (Haohui Mai via
jing9)
+ HDFS-5188. In BlockPlacementPolicy, reduce the number of chooseTarget(..)
+ methods; replace HashMap with Map in parameter declarations and cleanup
+ some related code. (szetszwo)
+
OPTIMIZATIONS
BUG FIXES
@@ -338,6 +342,8 @@ Release 2.1.1-beta - UNRELEASED
HDFS-5067 Support symlink operations in NFS gateway. (brandonli)
+ HDFS-5199 Add more debug trace for NFS READ and WRITE. (brandonli)
+
IMPROVEMENTS
HDFS-4513. Clarify in the WebHDFS REST API that all JSON respsonses may
Propchange: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1523109-1523401
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java?rev=1523402&r1=1523401&r2=1523402&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java Sun Sep 15 04:32:39 2013
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
/**
* This class contains constants for configuration keys used
@@ -348,6 +349,8 @@ public class DFSConfigKeys extends Commo
public static final String DFS_BLOCK_ACCESS_TOKEN_LIFETIME_KEY = "dfs.block.access.token.lifetime";
public static final long DFS_BLOCK_ACCESS_TOKEN_LIFETIME_DEFAULT = 600L;
+ public static final String DFS_BLOCK_REPLICATOR_CLASSNAME_KEY = "dfs.block.replicator.classname";
+ public static final Class<BlockPlacementPolicyDefault> DFS_BLOCK_REPLICATOR_CLASSNAME_DEFAULT = BlockPlacementPolicyDefault.class;
public static final String DFS_REPLICATION_MAX_KEY = "dfs.replication.max";
public static final int DFS_REPLICATION_MAX_DEFAULT = 512;
public static final String DFS_DF_INTERVAL_KEY = "dfs.df.interval";
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java?rev=1523402&r1=1523401&r2=1523402&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java Sun Sep 15 04:32:39 2013
@@ -1260,8 +1260,7 @@ public class BlockManager {
namesystem.writeUnlock();
}
- HashMap<Node, Node> excludedNodes
- = new HashMap<Node, Node>();
+ final Map<Node, Node> excludedNodes = new HashMap<Node, Node>();
for(ReplicationWork rw : work){
// Exclude all of the containing nodes from being targets.
// This list includes decommissioning or corrupt nodes.
@@ -1273,9 +1272,7 @@ public class BlockManager {
// choose replication targets: NOT HOLDING THE GLOBAL LOCK
// It is costly to extract the filename for which chooseTargets is called,
// so for now we pass in the block collection itself.
- rw.targets = blockplacement.chooseTarget(rw.bc,
- rw.additionalReplRequired, rw.srcNode, rw.liveReplicaNodes,
- excludedNodes, rw.block.getNumBytes());
+ rw.chooseTargets(blockplacement, excludedNodes);
}
namesystem.writeLock();
@@ -3249,6 +3246,13 @@ assert storedBlock.findDatanode(dn) < 0
this.priority = priority;
this.targets = null;
}
+
+ private void chooseTargets(BlockPlacementPolicy blockplacement,
+ Map<Node, Node> excludedNodes) {
+ targets = blockplacement.chooseTarget(bc.getName(),
+ additionalReplRequired, srcNode, liveReplicaNodes, false,
+ excludedNodes, block.getNumBytes());
+ }
}
/**
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java?rev=1523402&r1=1523401&r2=1523402&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java Sun Sep 15 04:32:39 2013
@@ -19,7 +19,6 @@ package org.apache.hadoop.hdfs.server.bl
import java.util.ArrayList;
import java.util.Collection;
-import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -27,6 +26,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@@ -55,25 +55,6 @@ public abstract class BlockPlacementPoli
* choose <i>numOfReplicas</i> data nodes for <i>writer</i>
* to re-replicate a block with size <i>blocksize</i>
* If not, return as many as we can.
- *
- * @param srcPath the file to which this chooseTargets is being invoked.
- * @param numOfReplicas additional number of replicas wanted.
- * @param writer the writer's machine, null if not in the cluster.
- * @param chosenNodes datanodes that have been chosen as targets.
- * @param blocksize size of the data to be written.
- * @return array of DatanodeDescriptor instances chosen as target
- * and sorted as a pipeline.
- */
- abstract DatanodeDescriptor[] chooseTarget(String srcPath,
- int numOfReplicas,
- DatanodeDescriptor writer,
- List<DatanodeDescriptor> chosenNodes,
- long blocksize);
-
- /**
- * choose <i>numOfReplicas</i> data nodes for <i>writer</i>
- * to re-replicate a block with size <i>blocksize</i>
- * If not, return as many as we can.
*
* @param srcPath the file to which this chooseTargets is being invoked.
* @param numOfReplicas additional number of replicas wanted.
@@ -90,34 +71,8 @@ public abstract class BlockPlacementPoli
DatanodeDescriptor writer,
List<DatanodeDescriptor> chosenNodes,
boolean returnChosenNodes,
- HashMap<Node, Node> excludedNodes,
+ Map<Node, Node> excludedNodes,
long blocksize);
-
- /**
- * choose <i>numOfReplicas</i> data nodes for <i>writer</i>
- * If not, return as many as we can.
- * The base implemenatation extracts the pathname of the file from the
- * specified srcBC, but this could be a costly operation depending on the
- * file system implementation. Concrete implementations of this class should
- * override this method to avoid this overhead.
- *
- * @param srcBC block collection of file for which chooseTarget is invoked.
- * @param numOfReplicas additional number of replicas wanted.
- * @param writer the writer's machine, null if not in the cluster.
- * @param chosenNodes datanodes that have been chosen as targets.
- * @param blocksize size of the data to be written.
- * @return array of DatanodeDescriptor instances chosen as target
- * and sorted as a pipeline.
- */
- DatanodeDescriptor[] chooseTarget(BlockCollection srcBC,
- int numOfReplicas,
- DatanodeDescriptor writer,
- List<DatanodeDescriptor> chosenNodes,
- HashMap<Node, Node> excludedNodes,
- long blocksize) {
- return chooseTarget(srcBC.getName(), numOfReplicas, writer,
- chosenNodes, false, excludedNodes, blocksize);
- }
/**
* Same as {@link #chooseTarget(String, int, DatanodeDescriptor, List, boolean,
@@ -128,7 +83,7 @@ public abstract class BlockPlacementPoli
*/
DatanodeDescriptor[] chooseTarget(String src,
int numOfReplicas, DatanodeDescriptor writer,
- HashMap<Node, Node> excludedNodes,
+ Map<Node, Node> excludedNodes,
long blocksize, List<DatanodeDescriptor> favoredNodes) {
// This class does not provide the functionality of placing
// a block in favored datanodes. The implementations of this class
@@ -183,7 +138,7 @@ public abstract class BlockPlacementPoli
/**
* Get an instance of the configured Block Placement Policy based on the
- * value of the configuration paramater dfs.block.replicator.classname.
+ * the configuration property {@link DFS_BLOCK_REPLICATOR_CLASSNAME_KEY}.
*
* @param conf the configuration to be used
* @param stats an object that is used to retrieve the load on the cluster
@@ -193,12 +148,12 @@ public abstract class BlockPlacementPoli
public static BlockPlacementPolicy getInstance(Configuration conf,
FSClusterStats stats,
NetworkTopology clusterMap) {
- Class<? extends BlockPlacementPolicy> replicatorClass =
- conf.getClass("dfs.block.replicator.classname",
- BlockPlacementPolicyDefault.class,
- BlockPlacementPolicy.class);
- BlockPlacementPolicy replicator = (BlockPlacementPolicy) ReflectionUtils.newInstance(
- replicatorClass, conf);
+ final Class<? extends BlockPlacementPolicy> replicatorClass = conf.getClass(
+ DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY,
+ DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_DEFAULT,
+ BlockPlacementPolicy.class);
+ final BlockPlacementPolicy replicator = ReflectionUtils.newInstance(
+ replicatorClass, conf);
replicator.initialize(conf, stats, clusterMap);
return replicator;
}
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java?rev=1523402&r1=1523401&r2=1523402&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java Sun Sep 15 04:32:39 2013
@@ -22,8 +22,8 @@ import static org.apache.hadoop.util.Tim
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
-import java.util.Iterator;
import java.util.List;
+import java.util.Map;
import java.util.Set;
import java.util.TreeSet;
@@ -57,6 +57,14 @@ public class BlockPlacementPolicyDefault
"For more information, please enable DEBUG log level on "
+ BlockPlacementPolicy.class.getName();
+ private static final ThreadLocal<StringBuilder> debugLoggingBuilder
+ = new ThreadLocal<StringBuilder>() {
+ @Override
+ protected StringBuilder initialValue() {
+ return new StringBuilder();
+ }
+ };
+
protected boolean considerLoad;
private boolean preferLocalNode = true;
protected NetworkTopology clusterMap;
@@ -95,40 +103,25 @@ public class BlockPlacementPolicyDefault
DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_DEFAULT);
}
- protected ThreadLocal<StringBuilder> threadLocalBuilder =
- new ThreadLocal<StringBuilder>() {
- @Override
- protected StringBuilder initialValue() {
- return new StringBuilder();
- }
- };
-
- @Override
- public DatanodeDescriptor[] chooseTarget(String srcPath,
- int numOfReplicas,
- DatanodeDescriptor writer,
- List<DatanodeDescriptor> chosenNodes,
- long blocksize) {
- return chooseTarget(numOfReplicas, writer, chosenNodes, false,
- null, blocksize);
- }
-
@Override
public DatanodeDescriptor[] chooseTarget(String srcPath,
int numOfReplicas,
DatanodeDescriptor writer,
List<DatanodeDescriptor> chosenNodes,
boolean returnChosenNodes,
- HashMap<Node, Node> excludedNodes,
+ Map<Node, Node> excludedNodes,
long blocksize) {
return chooseTarget(numOfReplicas, writer, chosenNodes, returnChosenNodes,
excludedNodes, blocksize);
}
@Override
- DatanodeDescriptor[] chooseTarget(String src, int numOfReplicas,
- DatanodeDescriptor writer, HashMap<Node, Node> excludedNodes,
- long blocksize, List<DatanodeDescriptor> favoredNodes) {
+ DatanodeDescriptor[] chooseTarget(String src,
+ int numOfReplicas,
+ DatanodeDescriptor writer,
+ Map<Node, Node> excludedNodes,
+ long blocksize,
+ List<DatanodeDescriptor> favoredNodes) {
try {
if (favoredNodes == null || favoredNodes.size() == 0) {
// Favored nodes not specified, fall back to regular block placement.
@@ -137,7 +130,7 @@ public class BlockPlacementPolicyDefault
excludedNodes, blocksize);
}
- HashMap<Node, Node> favoriteAndExcludedNodes = excludedNodes == null ?
+ Map<Node, Node> favoriteAndExcludedNodes = excludedNodes == null ?
new HashMap<Node, Node>() : new HashMap<Node, Node>(excludedNodes);
// Choose favored nodes
@@ -181,14 +174,14 @@ public class BlockPlacementPolicyDefault
}
/** This is the implementation. */
- DatanodeDescriptor[] chooseTarget(int numOfReplicas,
+ private DatanodeDescriptor[] chooseTarget(int numOfReplicas,
DatanodeDescriptor writer,
List<DatanodeDescriptor> chosenNodes,
boolean returnChosenNodes,
- HashMap<Node, Node> excludedNodes,
+ Map<Node, Node> excludedNodes,
long blocksize) {
if (numOfReplicas == 0 || clusterMap.getNumOfLeaves()==0) {
- return new DatanodeDescriptor[0];
+ return DatanodeDescriptor.EMPTY_ARRAY;
}
if (excludedNodes == null) {
@@ -204,7 +197,6 @@ public class BlockPlacementPolicyDefault
for (DatanodeDescriptor node:chosenNodes) {
// add localMachine and related nodes to excludedNodes
addToExcludedNodes(node, excludedNodes);
- adjustExcludedNodes(excludedNodes, node);
}
if (!clusterMap.contains(writer)) {
@@ -239,7 +231,7 @@ public class BlockPlacementPolicyDefault
/* choose <i>numOfReplicas</i> from all data nodes */
private DatanodeDescriptor chooseTarget(int numOfReplicas,
DatanodeDescriptor writer,
- HashMap<Node, Node> excludedNodes,
+ Map<Node, Node> excludedNodes,
long blocksize,
int maxNodesPerRack,
List<DatanodeDescriptor> results,
@@ -256,7 +248,7 @@ public class BlockPlacementPolicyDefault
}
// Keep a copy of original excludedNodes
- final HashMap<Node, Node> oldExcludedNodes = avoidStaleNodes ?
+ final Map<Node, Node> oldExcludedNodes = avoidStaleNodes ?
new HashMap<Node, Node>(excludedNodes) : null;
try {
if (numOfResults == 0) {
@@ -316,19 +308,19 @@ public class BlockPlacementPolicyDefault
return writer;
}
- /* choose <i>localMachine</i> as the target.
+ /**
+ * Choose <i>localMachine</i> as the target.
* if <i>localMachine</i> is not available,
* choose a node on the same rack
* @return the chosen node
*/
- protected DatanodeDescriptor chooseLocalNode(
- DatanodeDescriptor localMachine,
- HashMap<Node, Node> excludedNodes,
+ protected DatanodeDescriptor chooseLocalNode(DatanodeDescriptor localMachine,
+ Map<Node, Node> excludedNodes,
long blocksize,
int maxNodesPerRack,
List<DatanodeDescriptor> results,
boolean avoidStaleNodes)
- throws NotEnoughReplicasException {
+ throws NotEnoughReplicasException {
// if no local machine, randomly choose one node
if (localMachine == null)
return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
@@ -337,11 +329,8 @@ public class BlockPlacementPolicyDefault
// otherwise try local machine first
Node oldNode = excludedNodes.put(localMachine, localMachine);
if (oldNode == null) { // was not in the excluded list
- if (isGoodTarget(localMachine, blocksize, maxNodesPerRack, false,
- results, avoidStaleNodes)) {
- results.add(localMachine);
- // add localMachine and related nodes to excludedNode
- addToExcludedNodes(localMachine, excludedNodes);
+ if (addIfIsGoodTarget(localMachine, excludedNodes, blocksize,
+ maxNodesPerRack, false, results, avoidStaleNodes) >= 0) {
return localMachine;
}
}
@@ -358,26 +347,26 @@ public class BlockPlacementPolicyDefault
* @return number of new excluded nodes
*/
protected int addToExcludedNodes(DatanodeDescriptor localMachine,
- HashMap<Node, Node> excludedNodes) {
+ Map<Node, Node> excludedNodes) {
Node node = excludedNodes.put(localMachine, localMachine);
return node == null?1:0;
}
- /* choose one node from the rack that <i>localMachine</i> is on.
+ /**
+ * Choose one node from the rack that <i>localMachine</i> is on.
* if no such node is available, choose one node from the rack where
* a second replica is on.
* if still no such node is available, choose a random node
* in the cluster.
* @return the chosen node
*/
- protected DatanodeDescriptor chooseLocalRack(
- DatanodeDescriptor localMachine,
- HashMap<Node, Node> excludedNodes,
+ protected DatanodeDescriptor chooseLocalRack(DatanodeDescriptor localMachine,
+ Map<Node, Node> excludedNodes,
long blocksize,
int maxNodesPerRack,
List<DatanodeDescriptor> results,
boolean avoidStaleNodes)
- throws NotEnoughReplicasException {
+ throws NotEnoughReplicasException {
// no local machine, so choose a random machine
if (localMachine == null) {
return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
@@ -391,9 +380,7 @@ public class BlockPlacementPolicyDefault
} catch (NotEnoughReplicasException e1) {
// find the second replica
DatanodeDescriptor newLocal=null;
- for(Iterator<DatanodeDescriptor> iter=results.iterator();
- iter.hasNext();) {
- DatanodeDescriptor nextNode = iter.next();
+ for(DatanodeDescriptor nextNode : results) {
if (nextNode != localMachine) {
newLocal = nextNode;
break;
@@ -416,7 +403,8 @@ public class BlockPlacementPolicyDefault
}
}
- /* choose <i>numOfReplicas</i> nodes from the racks
+ /**
+ * Choose <i>numOfReplicas</i> nodes from the racks
* that <i>localMachine</i> is NOT on.
* if not enough nodes are available, choose the remaining ones
* from the local rack
@@ -424,12 +412,12 @@ public class BlockPlacementPolicyDefault
protected void chooseRemoteRack(int numOfReplicas,
DatanodeDescriptor localMachine,
- HashMap<Node, Node> excludedNodes,
+ Map<Node, Node> excludedNodes,
long blocksize,
int maxReplicasPerRack,
List<DatanodeDescriptor> results,
boolean avoidStaleNodes)
- throws NotEnoughReplicasException {
+ throws NotEnoughReplicasException {
int oldNumOfReplicas = results.size();
// randomly choose one node from remote racks
try {
@@ -443,91 +431,59 @@ public class BlockPlacementPolicyDefault
}
}
- /* Randomly choose one target from <i>nodes</i>.
- * @return the chosen node
+ /**
+ * Randomly choose one target from the given <i>scope</i>.
+ * @return the chosen node, if there is any.
*/
- protected DatanodeDescriptor chooseRandom(
- String nodes,
- HashMap<Node, Node> excludedNodes,
- long blocksize,
- int maxNodesPerRack,
- List<DatanodeDescriptor> results,
- boolean avoidStaleNodes)
- throws NotEnoughReplicasException {
- int numOfAvailableNodes =
- clusterMap.countNumOfAvailableNodes(nodes, excludedNodes.keySet());
- StringBuilder builder = null;
- if (LOG.isDebugEnabled()) {
- builder = threadLocalBuilder.get();
- builder.setLength(0);
- builder.append("[");
- }
- boolean badTarget = false;
- while(numOfAvailableNodes > 0) {
- DatanodeDescriptor chosenNode =
- (DatanodeDescriptor)(clusterMap.chooseRandom(nodes));
-
- Node oldNode = excludedNodes.put(chosenNode, chosenNode);
- if (oldNode == null) { // chosenNode was not in the excluded list
- numOfAvailableNodes--;
- if (isGoodTarget(chosenNode, blocksize,
- maxNodesPerRack, results, avoidStaleNodes)) {
- results.add(chosenNode);
- // add chosenNode and related nodes to excludedNode
- addToExcludedNodes(chosenNode, excludedNodes);
- adjustExcludedNodes(excludedNodes, chosenNode);
- return chosenNode;
- } else {
- badTarget = true;
- }
- }
- }
-
- String detail = enableDebugLogging;
- if (LOG.isDebugEnabled()) {
- if (badTarget && builder != null) {
- detail = builder.append("]").toString();
- builder.setLength(0);
- } else detail = "";
- }
- throw new NotEnoughReplicasException(detail);
+ protected DatanodeDescriptor chooseRandom(String scope,
+ Map<Node, Node> excludedNodes,
+ long blocksize,
+ int maxNodesPerRack,
+ List<DatanodeDescriptor> results,
+ boolean avoidStaleNodes)
+ throws NotEnoughReplicasException {
+ return chooseRandom(1, scope, excludedNodes, blocksize, maxNodesPerRack,
+ results, avoidStaleNodes);
}
-
- /* Randomly choose <i>numOfReplicas</i> targets from <i>nodes</i>.
+
+ /**
+ * Randomly choose <i>numOfReplicas</i> targets from the given <i>scope</i>.
+ * @return the first chosen node, if there is any.
*/
- protected void chooseRandom(int numOfReplicas,
- String nodes,
- HashMap<Node, Node> excludedNodes,
+ protected DatanodeDescriptor chooseRandom(int numOfReplicas,
+ String scope,
+ Map<Node, Node> excludedNodes,
long blocksize,
int maxNodesPerRack,
List<DatanodeDescriptor> results,
boolean avoidStaleNodes)
- throws NotEnoughReplicasException {
+ throws NotEnoughReplicasException {
- int numOfAvailableNodes =
- clusterMap.countNumOfAvailableNodes(nodes, excludedNodes.keySet());
+ int numOfAvailableNodes = clusterMap.countNumOfAvailableNodes(
+ scope, excludedNodes.keySet());
StringBuilder builder = null;
if (LOG.isDebugEnabled()) {
- builder = threadLocalBuilder.get();
+ builder = debugLoggingBuilder.get();
builder.setLength(0);
builder.append("[");
}
boolean badTarget = false;
+ DatanodeDescriptor firstChosen = null;
while(numOfReplicas > 0 && numOfAvailableNodes > 0) {
DatanodeDescriptor chosenNode =
- (DatanodeDescriptor)(clusterMap.chooseRandom(nodes));
+ (DatanodeDescriptor)clusterMap.chooseRandom(scope);
Node oldNode = excludedNodes.put(chosenNode, chosenNode);
if (oldNode == null) {
numOfAvailableNodes--;
- if (isGoodTarget(chosenNode, blocksize,
- maxNodesPerRack, results, avoidStaleNodes)) {
+ int newExcludedNodes = addIfIsGoodTarget(chosenNode, excludedNodes,
+ blocksize, maxNodesPerRack, considerLoad, results, avoidStaleNodes);
+ if (newExcludedNodes >= 0) {
numOfReplicas--;
- results.add(chosenNode);
- // add chosenNode and related nodes to excludedNode
- int newExcludedNodes = addToExcludedNodes(chosenNode, excludedNodes);
+ if (firstChosen == null) {
+ firstChosen = chosenNode;
+ }
numOfAvailableNodes -= newExcludedNodes;
- adjustExcludedNodes(excludedNodes, chosenNode);
} else {
badTarget = true;
}
@@ -544,34 +500,44 @@ public class BlockPlacementPolicyDefault
}
throw new NotEnoughReplicasException(detail);
}
+
+ return firstChosen;
}
-
+
/**
- * After choosing a node to place replica, adjust excluded nodes accordingly.
- * It should do nothing here as chosenNode is already put into exlcudeNodes,
- * but it can be overridden in subclass to put more related nodes into
- * excludedNodes.
- *
- * @param excludedNodes
- * @param chosenNode
- */
- protected void adjustExcludedNodes(HashMap<Node, Node> excludedNodes,
- Node chosenNode) {
- // do nothing here.
+ * If the given node is a good target, add it to the result list and
+ * update the excluded node map.
+ * @return -1 if the given is not a good target;
+ * otherwise, return the number of excluded nodes added to the map.
+ */
+ int addIfIsGoodTarget(DatanodeDescriptor node,
+ Map<Node, Node> excludedNodes,
+ long blockSize,
+ int maxNodesPerRack,
+ boolean considerLoad,
+ List<DatanodeDescriptor> results,
+ boolean avoidStaleNodes) {
+ if (isGoodTarget(node, blockSize, maxNodesPerRack, considerLoad,
+ results, avoidStaleNodes)) {
+ results.add(node);
+ // add node and related nodes to excludedNode
+ return addToExcludedNodes(node, excludedNodes);
+ } else {
+ return -1;
+ }
}
- /* judge if a node is a good target.
- * return true if <i>node</i> has enough space,
- * does not have too much load, and the rack does not have too many nodes
- */
- private boolean isGoodTarget(DatanodeDescriptor node,
- long blockSize, int maxTargetPerRack,
- List<DatanodeDescriptor> results,
- boolean avoidStaleNodes) {
- return isGoodTarget(node, blockSize, maxTargetPerRack, this.considerLoad,
- results, avoidStaleNodes);
+ private static void logNodeIsNotChosen(DatanodeDescriptor node, String reason) {
+ if (LOG.isDebugEnabled()) {
+ // build the error message for later use.
+ debugLoggingBuilder.get()
+ .append(node).append(": ")
+ .append("Node ").append(NodeBase.getPath(node))
+ .append(" is not chosen because ")
+ .append(reason);
+ }
}
-
+
/**
* Determine if a node is a good target.
*
@@ -588,28 +554,20 @@ public class BlockPlacementPolicyDefault
* does not have too much load,
* and the rack does not have too many nodes.
*/
- protected boolean isGoodTarget(DatanodeDescriptor node,
+ private boolean isGoodTarget(DatanodeDescriptor node,
long blockSize, int maxTargetPerRack,
boolean considerLoad,
List<DatanodeDescriptor> results,
boolean avoidStaleNodes) {
// check if the node is (being) decommissed
if (node.isDecommissionInProgress() || node.isDecommissioned()) {
- if(LOG.isDebugEnabled()) {
- threadLocalBuilder.get().append(node.toString()).append(": ")
- .append("Node ").append(NodeBase.getPath(node))
- .append(" is not chosen because the node is (being) decommissioned ");
- }
+ logNodeIsNotChosen(node, "the node is (being) decommissioned ");
return false;
}
if (avoidStaleNodes) {
if (node.isStale(this.staleInterval)) {
- if (LOG.isDebugEnabled()) {
- threadLocalBuilder.get().append(node.toString()).append(": ")
- .append("Node ").append(NodeBase.getPath(node))
- .append(" is not chosen because the node is stale ");
- }
+ logNodeIsNotChosen(node, "the node is stale ");
return false;
}
}
@@ -618,11 +576,7 @@ public class BlockPlacementPolicyDefault
(node.getBlocksScheduled() * blockSize);
// check the remaining capacity of the target machine
if (blockSize* HdfsConstants.MIN_BLOCKS_FOR_WRITE>remaining) {
- if(LOG.isDebugEnabled()) {
- threadLocalBuilder.get().append(node.toString()).append(": ")
- .append("Node ").append(NodeBase.getPath(node))
- .append(" is not chosen because the node does not have enough space ");
- }
+ logNodeIsNotChosen(node, "the node does not have enough space ");
return false;
}
@@ -634,11 +588,7 @@ public class BlockPlacementPolicyDefault
avgLoad = (double)stats.getTotalLoad()/size;
}
if (node.getXceiverCount() > (2.0 * avgLoad)) {
- if(LOG.isDebugEnabled()) {
- threadLocalBuilder.get().append(node.toString()).append(": ")
- .append("Node ").append(NodeBase.getPath(node))
- .append(" is not chosen because the node is too busy ");
- }
+ logNodeIsNotChosen(node, "the node is too busy ");
return false;
}
}
@@ -646,31 +596,25 @@ public class BlockPlacementPolicyDefault
// check if the target rack has chosen too many nodes
String rackname = node.getNetworkLocation();
int counter=1;
- for(Iterator<DatanodeDescriptor> iter = results.iterator();
- iter.hasNext();) {
- Node result = iter.next();
+ for(Node result : results) {
if (rackname.equals(result.getNetworkLocation())) {
counter++;
}
}
if (counter>maxTargetPerRack) {
- if(LOG.isDebugEnabled()) {
- threadLocalBuilder.get().append(node.toString()).append(": ")
- .append("Node ").append(NodeBase.getPath(node))
- .append(" is not chosen because the rack has too many chosen nodes ");
- }
+ logNodeIsNotChosen(node, "the rack has too many chosen nodes ");
return false;
}
return true;
}
- /* Return a pipeline of nodes.
+ /**
+ * Return a pipeline of nodes.
* The pipeline is formed finding a shortest path that
* starts from the writer and traverses all <i>nodes</i>
* This is basically a traveling salesman problem.
*/
- private DatanodeDescriptor[] getPipeline(
- DatanodeDescriptor writer,
+ private DatanodeDescriptor[] getPipeline(DatanodeDescriptor writer,
DatanodeDescriptor[] nodes) {
if (nodes.length==0) return nodes;
@@ -709,7 +653,7 @@ public class BlockPlacementPolicyDefault
int minRacks) {
DatanodeInfo[] locs = lBlk.getLocations();
if (locs == null)
- locs = new DatanodeInfo[0];
+ locs = DatanodeDescriptor.EMPTY_ARRAY;
int numRacks = clusterMap.getNumOfRacks();
if(numRacks <= 1) // only one rack
return 0;
@@ -724,24 +668,18 @@ public class BlockPlacementPolicyDefault
@Override
public DatanodeDescriptor chooseReplicaToDelete(BlockCollection bc,
- Block block,
- short replicationFactor,
- Collection<DatanodeDescriptor> first,
- Collection<DatanodeDescriptor> second) {
+ Block block, short replicationFactor,
+ Collection<DatanodeDescriptor> first,
+ Collection<DatanodeDescriptor> second) {
long oldestHeartbeat =
now() - heartbeatInterval * tolerateHeartbeatMultiplier;
DatanodeDescriptor oldestHeartbeatNode = null;
long minSpace = Long.MAX_VALUE;
DatanodeDescriptor minSpaceNode = null;
- // pick replica from the first Set. If first is empty, then pick replicas
- // from second set.
- Iterator<DatanodeDescriptor> iter = pickupReplicaSet(first, second);
-
// Pick the node with the oldest heartbeat or with the least free space,
// if all hearbeats are within the tolerable heartbeat interval
- while (iter.hasNext() ) {
- DatanodeDescriptor node = iter.next();
+ for(DatanodeDescriptor node : pickupReplicaSet(first, second)) {
long free = node.getRemaining();
long lastHeartbeat = node.getLastUpdate();
if(lastHeartbeat < oldestHeartbeat) {
@@ -762,12 +700,10 @@ public class BlockPlacementPolicyDefault
* replica while second set contains remaining replica nodes.
* So pick up first set if not empty. If first is empty, then pick second.
*/
- protected Iterator<DatanodeDescriptor> pickupReplicaSet(
+ protected Collection<DatanodeDescriptor> pickupReplicaSet(
Collection<DatanodeDescriptor> first,
Collection<DatanodeDescriptor> second) {
- Iterator<DatanodeDescriptor> iter =
- first.isEmpty() ? second.iterator() : first.iterator();
- return iter;
+ return first.isEmpty() ? second : first;
}
@VisibleForTesting
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java?rev=1523402&r1=1523401&r2=1523402&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java Sun Sep 15 04:32:39 2013
@@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs.server.bl
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
-import java.util.Iterator;
import java.util.List;
import java.util.Map;
@@ -65,7 +64,7 @@ public class BlockPlacementPolicyWithNod
*/
@Override
protected DatanodeDescriptor chooseLocalNode(DatanodeDescriptor localMachine,
- HashMap<Node, Node> excludedNodes, long blocksize, int maxNodesPerRack,
+ Map<Node, Node> excludedNodes, long blocksize, int maxNodesPerRack,
List<DatanodeDescriptor> results, boolean avoidStaleNodes)
throws NotEnoughReplicasException {
// if no local machine, randomly choose one node
@@ -76,12 +75,8 @@ public class BlockPlacementPolicyWithNod
// otherwise try local machine first
Node oldNode = excludedNodes.put(localMachine, localMachine);
if (oldNode == null) { // was not in the excluded list
- if (isGoodTarget(localMachine, blocksize,
- maxNodesPerRack, false, results, avoidStaleNodes)) {
- results.add(localMachine);
- // Nodes under same nodegroup should be excluded.
- addNodeGroupToExcludedNodes(excludedNodes,
- localMachine.getNetworkLocation());
+ if (addIfIsGoodTarget(localMachine, excludedNodes, blocksize,
+ maxNodesPerRack, false, results, avoidStaleNodes) >= 0) {
return localMachine;
}
}
@@ -98,26 +93,10 @@ public class BlockPlacementPolicyWithNod
blocksize, maxNodesPerRack, results, avoidStaleNodes);
}
- @Override
- protected void adjustExcludedNodes(HashMap<Node, Node> excludedNodes,
- Node chosenNode) {
- // as node-group aware implementation, it should make sure no two replica
- // are placing on the same node group.
- addNodeGroupToExcludedNodes(excludedNodes, chosenNode.getNetworkLocation());
- }
- // add all nodes under specific nodegroup to excludedNodes.
- private void addNodeGroupToExcludedNodes(HashMap<Node, Node> excludedNodes,
- String nodeGroup) {
- List<Node> leafNodes = clusterMap.getLeaves(nodeGroup);
- for (Node node : leafNodes) {
- excludedNodes.put(node, node);
- }
- }
-
@Override
protected DatanodeDescriptor chooseLocalRack(DatanodeDescriptor localMachine,
- HashMap<Node, Node> excludedNodes, long blocksize, int maxNodesPerRack,
+ Map<Node, Node> excludedNodes, long blocksize, int maxNodesPerRack,
List<DatanodeDescriptor> results, boolean avoidStaleNodes)
throws NotEnoughReplicasException {
// no local machine, so choose a random machine
@@ -137,9 +116,7 @@ public class BlockPlacementPolicyWithNod
} catch (NotEnoughReplicasException e1) {
// find the second replica
DatanodeDescriptor newLocal=null;
- for(Iterator<DatanodeDescriptor> iter=results.iterator();
- iter.hasNext();) {
- DatanodeDescriptor nextNode = iter.next();
+ for(DatanodeDescriptor nextNode : results) {
if (nextNode != localMachine) {
newLocal = nextNode;
break;
@@ -165,7 +142,7 @@ public class BlockPlacementPolicyWithNod
@Override
protected void chooseRemoteRack(int numOfReplicas,
- DatanodeDescriptor localMachine, HashMap<Node, Node> excludedNodes,
+ DatanodeDescriptor localMachine, Map<Node, Node> excludedNodes,
long blocksize, int maxReplicasPerRack, List<DatanodeDescriptor> results,
boolean avoidStaleNodes) throws NotEnoughReplicasException {
int oldNumOfReplicas = results.size();
@@ -192,7 +169,7 @@ public class BlockPlacementPolicyWithNod
*/
private DatanodeDescriptor chooseLocalNodeGroup(
NetworkTopologyWithNodeGroup clusterMap, DatanodeDescriptor localMachine,
- HashMap<Node, Node> excludedNodes, long blocksize, int maxNodesPerRack,
+ Map<Node, Node> excludedNodes, long blocksize, int maxNodesPerRack,
List<DatanodeDescriptor> results, boolean avoidStaleNodes)
throws NotEnoughReplicasException {
// no local machine, so choose a random machine
@@ -209,9 +186,7 @@ public class BlockPlacementPolicyWithNod
} catch (NotEnoughReplicasException e1) {
// find the second replica
DatanodeDescriptor newLocal=null;
- for(Iterator<DatanodeDescriptor> iter=results.iterator();
- iter.hasNext();) {
- DatanodeDescriptor nextNode = iter.next();
+ for(DatanodeDescriptor nextNode : results) {
if (nextNode != localMachine) {
newLocal = nextNode;
break;
@@ -248,10 +223,11 @@ public class BlockPlacementPolicyWithNod
* within the same nodegroup
* @return number of new excluded nodes
*/
- protected int addToExcludedNodes(DatanodeDescriptor localMachine,
- HashMap<Node, Node> excludedNodes) {
+ @Override
+ protected int addToExcludedNodes(DatanodeDescriptor chosenNode,
+ Map<Node, Node> excludedNodes) {
int countOfExcludedNodes = 0;
- String nodeGroupScope = localMachine.getNetworkLocation();
+ String nodeGroupScope = chosenNode.getNetworkLocation();
List<Node> leafNodes = clusterMap.getLeaves(nodeGroupScope);
for (Node leafNode : leafNodes) {
Node node = excludedNodes.put(leafNode, leafNode);
@@ -274,12 +250,12 @@ public class BlockPlacementPolicyWithNod
* If first is empty, then pick second.
*/
@Override
- public Iterator<DatanodeDescriptor> pickupReplicaSet(
+ public Collection<DatanodeDescriptor> pickupReplicaSet(
Collection<DatanodeDescriptor> first,
Collection<DatanodeDescriptor> second) {
// If no replica within same rack, return directly.
if (first.isEmpty()) {
- return second.iterator();
+ return second;
}
// Split data nodes in the first set into two sets,
// moreThanOne contains nodes on nodegroup with more than one replica
@@ -312,9 +288,7 @@ public class BlockPlacementPolicyWithNod
}
}
- Iterator<DatanodeDescriptor> iter =
- moreThanOne.isEmpty() ? exactlyOne.iterator() : moreThanOne.iterator();
- return iter;
+ return moreThanOne.isEmpty()? exactlyOne : moreThanOne;
}
}
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java?rev=1523402&r1=1523401&r2=1523402&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java Sun Sep 15 04:32:39 2013
@@ -43,7 +43,8 @@ import org.apache.hadoop.util.Time;
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class DatanodeDescriptor extends DatanodeInfo {
-
+ public static final DatanodeDescriptor[] EMPTY_ARRAY = {};
+
// Stores status of decommissioning.
// If node is not decommissioning, do not use this object for anything.
public DecommissioningStatus decommissioningStatus = new DecommissioningStatus();
Propchange: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/native/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:r1523109-1523401
Propchange: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1523109-1523401
Propchange: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1523109-1523401
Propchange: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1523109-1523401
Propchange: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1523109-1523401
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java?rev=1523402&r1=1523401&r2=1523402&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java Sun Sep 15 04:32:39 2013
@@ -31,6 +31,7 @@ import org.apache.commons.logging.LogFac
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -39,9 +40,11 @@ import org.apache.hadoop.hdfs.NameNodePr
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyWithNodeGroup;
import org.apache.hadoop.net.NetworkTopology;
+import org.apache.hadoop.net.NetworkTopologyWithNodeGroup;
+import org.junit.Assert;
import org.junit.Test;
-import junit.framework.Assert;
/**
* This class tests if a balancer schedules tasks correctly.
@@ -75,10 +78,9 @@ public class TestBalancerWithNodeGroup {
Configuration conf = new HdfsConfiguration();
TestBalancer.initConf(conf);
conf.set(CommonConfigurationKeysPublic.NET_TOPOLOGY_IMPL_KEY,
- "org.apache.hadoop.net.NetworkTopologyWithNodeGroup");
- conf.set("dfs.block.replicator.classname",
- "org.apache.hadoop.hdfs.server.blockmanagement." +
- "BlockPlacementPolicyWithNodeGroup");
+ NetworkTopologyWithNodeGroup.class.getName());
+ conf.set(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY,
+ BlockPlacementPolicyWithNodeGroup.class.getName());
return conf;
}
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java?rev=1523402&r1=1523401&r2=1523402&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java Sun Sep 15 04:32:39 2013
@@ -157,8 +157,8 @@ public class TestRBWBlockInvalidation {
// in the context of the test, whereas a random one is more accurate
// to what is seen in real clusters (nodes have random amounts of free
// space)
- conf.setClass("dfs.block.replicator.classname", RandomDeleterPolicy.class,
- BlockPlacementPolicy.class);
+ conf.setClass(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY,
+ RandomDeleterPolicy.class, BlockPlacementPolicy.class);
// Speed up the test a bit with faster heartbeats.
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java?rev=1523402&r1=1523401&r2=1523402&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java Sun Sep 15 04:32:39 2013
@@ -138,30 +138,25 @@ public class TestReplicationPolicy {
HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 4, 0); // overloaded
DatanodeDescriptor[] targets;
- targets = replicator.chooseTarget(filename, 0, dataNodes[0],
- new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
+ targets = chooseTarget(0);
assertEquals(targets.length, 0);
- targets = replicator.chooseTarget(filename, 1, dataNodes[0],
- new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
+ targets = chooseTarget(1);
assertEquals(targets.length, 1);
assertEquals(targets[0], dataNodes[0]);
- targets = replicator.chooseTarget(filename,
- 2, dataNodes[0], new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
+ targets = chooseTarget(2);
assertEquals(targets.length, 2);
assertEquals(targets[0], dataNodes[0]);
assertFalse(cluster.isOnSameRack(targets[0], targets[1]));
- targets = replicator.chooseTarget(filename, 3, dataNodes[0],
- new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
+ targets = chooseTarget(3);
assertEquals(targets.length, 3);
assertEquals(targets[0], dataNodes[0]);
assertFalse(cluster.isOnSameRack(targets[0], targets[1]));
assertTrue(cluster.isOnSameRack(targets[1], targets[2]));
- targets = replicator.chooseTarget(filename, 4, dataNodes[0],
- new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
+ targets = chooseTarget(4);
assertEquals(targets.length, 4);
assertEquals(targets[0], dataNodes[0]);
assertTrue(cluster.isOnSameRack(targets[1], targets[2]) ||
@@ -173,15 +168,38 @@ public class TestReplicationPolicy {
HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0, 0);
}
+ private static DatanodeDescriptor[] chooseTarget(int numOfReplicas) {
+ return chooseTarget(numOfReplicas, dataNodes[0]);
+ }
+
+ private static DatanodeDescriptor[] chooseTarget(int numOfReplicas,
+ DatanodeDescriptor writer) {
+ return chooseTarget(numOfReplicas, writer,
+ new ArrayList<DatanodeDescriptor>());
+ }
+
+ private static DatanodeDescriptor[] chooseTarget(int numOfReplicas,
+ List<DatanodeDescriptor> chosenNodes) {
+ return chooseTarget(numOfReplicas, dataNodes[0], chosenNodes);
+ }
+
+ private static DatanodeDescriptor[] chooseTarget(int numOfReplicas,
+ DatanodeDescriptor writer, List<DatanodeDescriptor> chosenNodes) {
+ return chooseTarget(numOfReplicas, writer, chosenNodes, null);
+ }
+
+ private static DatanodeDescriptor[] chooseTarget(int numOfReplicas,
+ List<DatanodeDescriptor> chosenNodes, Map<Node, Node> excludedNodes) {
+ return chooseTarget(numOfReplicas, dataNodes[0], chosenNodes, excludedNodes);
+ }
+
private static DatanodeDescriptor[] chooseTarget(
- BlockPlacementPolicyDefault policy,
int numOfReplicas,
DatanodeDescriptor writer,
List<DatanodeDescriptor> chosenNodes,
- HashMap<Node, Node> excludedNodes,
- long blocksize) {
- return policy.chooseTarget(numOfReplicas, writer, chosenNodes, false,
- excludedNodes, blocksize);
+ Map<Node, Node> excludedNodes) {
+ return replicator.chooseTarget(filename, numOfReplicas, writer, chosenNodes,
+ false, excludedNodes, BLOCK_SIZE);
}
/**
@@ -196,28 +214,24 @@ public class TestReplicationPolicy {
public void testChooseTarget2() throws Exception {
HashMap<Node, Node> excludedNodes;
DatanodeDescriptor[] targets;
- BlockPlacementPolicyDefault repl = (BlockPlacementPolicyDefault)replicator;
List<DatanodeDescriptor> chosenNodes = new ArrayList<DatanodeDescriptor>();
excludedNodes = new HashMap<Node, Node>();
excludedNodes.put(dataNodes[1], dataNodes[1]);
- targets = chooseTarget(repl, 0, dataNodes[0], chosenNodes, excludedNodes,
- BLOCK_SIZE);
+ targets = chooseTarget(0, chosenNodes, excludedNodes);
assertEquals(targets.length, 0);
excludedNodes.clear();
chosenNodes.clear();
excludedNodes.put(dataNodes[1], dataNodes[1]);
- targets = chooseTarget(repl, 1, dataNodes[0], chosenNodes, excludedNodes,
- BLOCK_SIZE);
+ targets = chooseTarget(1, chosenNodes, excludedNodes);
assertEquals(targets.length, 1);
assertEquals(targets[0], dataNodes[0]);
excludedNodes.clear();
chosenNodes.clear();
excludedNodes.put(dataNodes[1], dataNodes[1]);
- targets = chooseTarget(repl, 2, dataNodes[0], chosenNodes, excludedNodes,
- BLOCK_SIZE);
+ targets = chooseTarget(2, chosenNodes, excludedNodes);
assertEquals(targets.length, 2);
assertEquals(targets[0], dataNodes[0]);
assertFalse(cluster.isOnSameRack(targets[0], targets[1]));
@@ -225,8 +239,7 @@ public class TestReplicationPolicy {
excludedNodes.clear();
chosenNodes.clear();
excludedNodes.put(dataNodes[1], dataNodes[1]);
- targets = chooseTarget(repl, 3, dataNodes[0], chosenNodes, excludedNodes,
- BLOCK_SIZE);
+ targets = chooseTarget(3, chosenNodes, excludedNodes);
assertEquals(targets.length, 3);
assertEquals(targets[0], dataNodes[0]);
assertFalse(cluster.isOnSameRack(targets[0], targets[1]));
@@ -235,8 +248,7 @@ public class TestReplicationPolicy {
excludedNodes.clear();
chosenNodes.clear();
excludedNodes.put(dataNodes[1], dataNodes[1]);
- targets = chooseTarget(repl, 4, dataNodes[0], chosenNodes, excludedNodes,
- BLOCK_SIZE);
+ targets = chooseTarget(4, chosenNodes, excludedNodes);
assertEquals(targets.length, 4);
assertEquals(targets[0], dataNodes[0]);
for(int i=1; i<4; i++) {
@@ -250,7 +262,7 @@ public class TestReplicationPolicy {
chosenNodes.clear();
excludedNodes.put(dataNodes[1], dataNodes[1]);
chosenNodes.add(dataNodes[2]);
- targets = repl.chooseTarget(1, dataNodes[0], chosenNodes, true,
+ targets = replicator.chooseTarget(filename, 1, dataNodes[0], chosenNodes, true,
excludedNodes, BLOCK_SIZE);
System.out.println("targets=" + Arrays.asList(targets));
assertEquals(2, targets.length);
@@ -276,30 +288,25 @@ public class TestReplicationPolicy {
(HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0, 0); // no space
DatanodeDescriptor[] targets;
- targets = replicator.chooseTarget(filename, 0, dataNodes[0],
- new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
+ targets = chooseTarget(0);
assertEquals(targets.length, 0);
- targets = replicator.chooseTarget(filename, 1, dataNodes[0],
- new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
+ targets = chooseTarget(1);
assertEquals(targets.length, 1);
assertEquals(targets[0], dataNodes[1]);
- targets = replicator.chooseTarget(filename, 2, dataNodes[0],
- new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
+ targets = chooseTarget(2);
assertEquals(targets.length, 2);
assertEquals(targets[0], dataNodes[1]);
assertFalse(cluster.isOnSameRack(targets[0], targets[1]));
- targets = replicator.chooseTarget(filename, 3, dataNodes[0],
- new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
+ targets = chooseTarget(3);
assertEquals(targets.length, 3);
assertEquals(targets[0], dataNodes[1]);
assertTrue(cluster.isOnSameRack(targets[1], targets[2]));
assertFalse(cluster.isOnSameRack(targets[0], targets[1]));
- targets = replicator.chooseTarget(filename, 4, dataNodes[0],
- new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
+ targets = chooseTarget(4);
assertEquals(targets.length, 4);
assertEquals(targets[0], dataNodes[1]);
for(int i=1; i<4; i++) {
@@ -332,23 +339,19 @@ public class TestReplicationPolicy {
}
DatanodeDescriptor[] targets;
- targets = replicator.chooseTarget(filename, 0, dataNodes[0],
- new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
+ targets = chooseTarget(0);
assertEquals(targets.length, 0);
- targets = replicator.chooseTarget(filename, 1, dataNodes[0],
- new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
+ targets = chooseTarget(1);
assertEquals(targets.length, 1);
assertFalse(cluster.isOnSameRack(targets[0], dataNodes[0]));
- targets = replicator.chooseTarget(filename, 2, dataNodes[0],
- new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
+ targets = chooseTarget(2);
assertEquals(targets.length, 2);
assertFalse(cluster.isOnSameRack(targets[0], dataNodes[0]));
assertFalse(cluster.isOnSameRack(targets[0], targets[1]));
- targets = replicator.chooseTarget(filename, 3, dataNodes[0],
- new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
+ targets = chooseTarget(3);
assertEquals(targets.length, 3);
for(int i=0; i<3; i++) {
assertFalse(cluster.isOnSameRack(targets[i], dataNodes[0]));
@@ -377,21 +380,17 @@ public class TestReplicationPolicy {
DFSTestUtil.getDatanodeDescriptor("7.7.7.7", "/d2/r4");
DatanodeDescriptor[] targets;
- targets = replicator.chooseTarget(filename, 0, writerDesc,
- new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
+ targets = chooseTarget(0, writerDesc);
assertEquals(targets.length, 0);
- targets = replicator.chooseTarget(filename, 1, writerDesc,
- new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
+ targets = chooseTarget(1, writerDesc);
assertEquals(targets.length, 1);
- targets = replicator.chooseTarget(filename, 2, writerDesc,
- new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
+ targets = chooseTarget(2, writerDesc);
assertEquals(targets.length, 2);
assertFalse(cluster.isOnSameRack(targets[0], targets[1]));
- targets = replicator.chooseTarget(filename, 3, writerDesc,
- new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
+ targets = chooseTarget(3, writerDesc);
assertEquals(targets.length, 3);
assertTrue(cluster.isOnSameRack(targets[1], targets[2]));
assertFalse(cluster.isOnSameRack(targets[0], targets[1]));
@@ -435,9 +434,7 @@ public class TestReplicationPolicy {
// try to choose NUM_OF_DATANODES which is more than actually available
// nodes.
- DatanodeDescriptor[] targets = replicator.chooseTarget(filename,
- NUM_OF_DATANODES, dataNodes[0], new ArrayList<DatanodeDescriptor>(),
- BLOCK_SIZE);
+ DatanodeDescriptor[] targets = chooseTarget(NUM_OF_DATANODES);
assertEquals(targets.length, NUM_OF_DATANODES - 2);
final List<LoggingEvent> log = appender.getLog();
@@ -480,17 +477,14 @@ public class TestReplicationPolicy {
DatanodeDescriptor[] targets;
// We set the datanode[0] as stale, thus should choose datanode[1] since
// datanode[1] is on the same rack with datanode[0] (writer)
- targets = replicator.chooseTarget(filename, 1, dataNodes[0],
- new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
+ targets = chooseTarget(1);
assertEquals(targets.length, 1);
assertEquals(targets[0], dataNodes[1]);
HashMap<Node, Node> excludedNodes = new HashMap<Node, Node>();
excludedNodes.put(dataNodes[1], dataNodes[1]);
List<DatanodeDescriptor> chosenNodes = new ArrayList<DatanodeDescriptor>();
- BlockPlacementPolicyDefault repl = (BlockPlacementPolicyDefault)replicator;
- targets = chooseTarget(repl, 1, dataNodes[0], chosenNodes, excludedNodes,
- BLOCK_SIZE);
+ targets = chooseTarget(1, chosenNodes, excludedNodes);
assertEquals(targets.length, 1);
assertFalse(cluster.isOnSameRack(targets[0], dataNodes[0]));
@@ -517,33 +511,27 @@ public class TestReplicationPolicy {
namenode.getNamesystem().getBlockManager()
.getDatanodeManager().getHeartbeatManager().heartbeatCheck();
- DatanodeDescriptor[] targets;
- targets = replicator.chooseTarget(filename, 0, dataNodes[0],
- new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
+ DatanodeDescriptor[] targets = chooseTarget(0);
assertEquals(targets.length, 0);
// Since we have 6 datanodes total, stale nodes should
// not be returned until we ask for more than 3 targets
- targets = replicator.chooseTarget(filename, 1, dataNodes[0],
- new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
+ targets = chooseTarget(1);
assertEquals(targets.length, 1);
assertFalse(containsWithinRange(targets[0], dataNodes, 0, 2));
- targets = replicator.chooseTarget(filename, 2, dataNodes[0],
- new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
+ targets = chooseTarget(2);
assertEquals(targets.length, 2);
assertFalse(containsWithinRange(targets[0], dataNodes, 0, 2));
assertFalse(containsWithinRange(targets[1], dataNodes, 0, 2));
- targets = replicator.chooseTarget(filename, 3, dataNodes[0],
- new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
+ targets = chooseTarget(3);
assertEquals(targets.length, 3);
assertTrue(containsWithinRange(targets[0], dataNodes, 3, 5));
assertTrue(containsWithinRange(targets[1], dataNodes, 3, 5));
assertTrue(containsWithinRange(targets[2], dataNodes, 3, 5));
- targets = replicator.chooseTarget(filename, 4, dataNodes[0],
- new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
+ targets = chooseTarget(4);
assertEquals(targets.length, 4);
assertTrue(containsWithinRange(dataNodes[3], targets, 0, 3));
assertTrue(containsWithinRange(dataNodes[4], targets, 0, 3));
@@ -596,7 +584,8 @@ public class TestReplicationPolicy {
BlockPlacementPolicy replicator = miniCluster.getNameNode()
.getNamesystem().getBlockManager().getBlockPlacementPolicy();
DatanodeDescriptor[] targets = replicator.chooseTarget(filename, 3,
- staleNodeInfo, new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
+ staleNodeInfo, new ArrayList<DatanodeDescriptor>(), false, null, BLOCK_SIZE);
+
assertEquals(targets.length, 3);
assertFalse(cluster.isOnSameRack(targets[0], staleNodeInfo));
@@ -620,7 +609,7 @@ public class TestReplicationPolicy {
.getDatanodeManager().shouldAvoidStaleDataNodesForWrite());
// Call chooseTarget
targets = replicator.chooseTarget(filename, 3,
- staleNodeInfo, new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
+ staleNodeInfo, new ArrayList<DatanodeDescriptor>(), false, null, BLOCK_SIZE);
assertEquals(targets.length, 3);
assertTrue(cluster.isOnSameRack(targets[0], staleNodeInfo));
@@ -642,8 +631,7 @@ public class TestReplicationPolicy {
assertTrue(miniCluster.getNameNode().getNamesystem().getBlockManager()
.getDatanodeManager().shouldAvoidStaleDataNodesForWrite());
// Call chooseTarget
- targets = replicator.chooseTarget(filename, 3,
- staleNodeInfo, new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
+ targets = chooseTarget(3, staleNodeInfo);
assertEquals(targets.length, 3);
assertFalse(cluster.isOnSameRack(targets[0], staleNodeInfo));
} finally {
@@ -664,23 +652,19 @@ public class TestReplicationPolicy {
chosenNodes.add(dataNodes[0]);
DatanodeDescriptor[] targets;
- targets = replicator.chooseTarget(filename,
- 0, dataNodes[0], chosenNodes, BLOCK_SIZE);
+ targets = chooseTarget(0, chosenNodes);
assertEquals(targets.length, 0);
- targets = replicator.chooseTarget(filename,
- 1, dataNodes[0], chosenNodes, BLOCK_SIZE);
+ targets = chooseTarget(1, chosenNodes);
assertEquals(targets.length, 1);
assertFalse(cluster.isOnSameRack(dataNodes[0], targets[0]));
- targets = replicator.chooseTarget(filename,
- 2, dataNodes[0], chosenNodes, BLOCK_SIZE);
+ targets = chooseTarget(2, chosenNodes);
assertEquals(targets.length, 2);
assertTrue(cluster.isOnSameRack(dataNodes[0], targets[0]));
assertFalse(cluster.isOnSameRack(targets[0], targets[1]));
- targets = replicator.chooseTarget(filename,
- 3, dataNodes[0], chosenNodes, BLOCK_SIZE);
+ targets = chooseTarget(3, chosenNodes);
assertEquals(targets.length, 3);
assertTrue(cluster.isOnSameRack(dataNodes[0], targets[0]));
assertFalse(cluster.isOnSameRack(targets[0], targets[2]));
@@ -700,17 +684,14 @@ public class TestReplicationPolicy {
chosenNodes.add(dataNodes[1]);
DatanodeDescriptor[] targets;
- targets = replicator.chooseTarget(filename,
- 0, dataNodes[0], chosenNodes, BLOCK_SIZE);
+ targets = chooseTarget(0, chosenNodes);
assertEquals(targets.length, 0);
- targets = replicator.chooseTarget(filename,
- 1, dataNodes[0], chosenNodes, BLOCK_SIZE);
+ targets = chooseTarget(1, chosenNodes);
assertEquals(targets.length, 1);
assertFalse(cluster.isOnSameRack(dataNodes[0], targets[0]));
- targets = replicator.chooseTarget(filename,
- 2, dataNodes[0], chosenNodes, BLOCK_SIZE);
+ targets = chooseTarget(2, chosenNodes);
assertEquals(targets.length, 2);
assertFalse(cluster.isOnSameRack(dataNodes[0], targets[0]));
assertFalse(cluster.isOnSameRack(dataNodes[0], targets[1]));
@@ -730,29 +711,24 @@ public class TestReplicationPolicy {
chosenNodes.add(dataNodes[2]);
DatanodeDescriptor[] targets;
- targets = replicator.chooseTarget(filename,
- 0, dataNodes[0], chosenNodes, BLOCK_SIZE);
+ targets = chooseTarget(0, chosenNodes);
assertEquals(targets.length, 0);
- targets = replicator.chooseTarget(filename,
- 1, dataNodes[0], chosenNodes, BLOCK_SIZE);
+ targets = chooseTarget(1, chosenNodes);
assertEquals(targets.length, 1);
assertTrue(cluster.isOnSameRack(dataNodes[0], targets[0]));
assertFalse(cluster.isOnSameRack(dataNodes[2], targets[0]));
- targets = replicator.chooseTarget(filename,
- 1, dataNodes[2], chosenNodes, BLOCK_SIZE);
+ targets = chooseTarget(1, dataNodes[2], chosenNodes);
assertEquals(targets.length, 1);
assertTrue(cluster.isOnSameRack(dataNodes[2], targets[0]));
assertFalse(cluster.isOnSameRack(dataNodes[0], targets[0]));
- targets = replicator.chooseTarget(filename,
- 2, dataNodes[0], chosenNodes, BLOCK_SIZE);
+ targets = chooseTarget(2, chosenNodes);
assertEquals(targets.length, 2);
assertTrue(cluster.isOnSameRack(dataNodes[0], targets[0]));
- targets = replicator.chooseTarget(filename,
- 2, dataNodes[2], chosenNodes, BLOCK_SIZE);
+ targets = chooseTarget(2, dataNodes[2], chosenNodes);
assertEquals(targets.length, 2);
assertTrue(cluster.isOnSameRack(dataNodes[2], targets[0]));
}