You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ha...@apache.org on 2009/02/02 20:10:32 UTC
svn commit: r740081 - in /hadoop/core/branches/branch-0.19: ./
src/hdfs/org/apache/hadoop/hdfs/server/datanode/
src/hdfs/org/apache/hadoop/hdfs/server/namenode/
src/hdfs/org/apache/hadoop/hdfs/server/protocol/
src/test/org/apache/hadoop/hdfs/server/nam...
Author: hairong
Date: Mon Feb 2 19:10:30 2009
New Revision: 740081
URL: http://svn.apache.org/viewvc?rev=740081&view=rev
Log:
Merge -r 740076:740077 to move the change of HADOOP-5034 from main t0 branch 0.19.
Added:
hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/server/namenode/TestHeartbeatHandling.java
- copied unchanged from r740077, hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestHeartbeatHandling.java
Modified:
hadoop/core/branches/branch-0.19/ (props changed)
hadoop/core/branches/branch-0.19/CHANGES.txt (contents, props changed)
hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java
hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java
hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java
Propchange: hadoop/core/branches/branch-0.19/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Mon Feb 2 19:10:30 2009
@@ -1 +1 @@
-/hadoop/core/trunk:697306,698176,699056,699098,699415,699424,699444,699490,699517,700163,700628,700923,701273,701398,703923,704203,704261,704701,704703,704707,704712,704732,704748,704989,705391,705420,705430,705762,706350,706707,706719,706796,706802,707258,707262,708623,708641,708710,709040,709303,712881,713888,720602,723013,723460,723831,723918,724883,727117,727212,727217,727228,727869,732572,732777,733887,734870,736426,738697
+/hadoop/core/trunk:697306,698176,699056,699098,699415,699424,699444,699490,699517,700163,700628,700923,701273,701398,703923,704203,704261,704701,704703,704707,704712,704732,704748,704989,705391,705420,705430,705762,706350,706707,706719,706796,706802,707258,707262,708623,708641,708710,709040,709303,712881,713888,720602,723013,723460,723831,723918,724883,727117,727212,727217,727228,727869,732572,732777,733887,734870,736426,738697,740077
Modified: hadoop/core/branches/branch-0.19/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.19/CHANGES.txt?rev=740081&r1=740080&r2=740081&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.19/CHANGES.txt (original)
+++ hadoop/core/branches/branch-0.19/CHANGES.txt Mon Feb 2 19:10:30 2009
@@ -77,6 +77,9 @@
HADOOP-4862. Minor : HADOOP-3678 did not remove all the cases of
spurious IOExceptions logged by DataNode. (Raghu Angadi)
+ HADOOP-5034. NameNode should send both replication and deletion requests
+ to DataNode in one reply to a heartbeat. (hairong)
+
Release 0.19.0 - 2008-11-18
INCOMPATIBLE CHANGES
Propchange: hadoop/core/branches/branch-0.19/CHANGES.txt
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Mon Feb 2 19:10:30 2009
@@ -1,2 +1,2 @@
/hadoop/core/branches/branch-0.18/CHANGES.txt:727226
-/hadoop/core/trunk/CHANGES.txt:697306,698176,699056,699098,699415,699424,699444,699490,699517,700163,700628,700923,701273,701398,703923,704203,704261,704701,704703,704707,704712,704732,704748,704989,705391,705420,705430,705762,706350,706707,706719,706796,706802,707258,707262,708623,708641,708710,708723,709040,709303,711717,712881,713888,720602,723013,723460,723831,723918,724883,727117,727212,727217,727228,727869,732572,732777,733887,734870,735082,736426,738697
+/hadoop/core/trunk/CHANGES.txt:697306,698176,699056,699098,699415,699424,699444,699490,699517,700163,700628,700923,701273,701398,703923,704203,704261,704701,704703,704707,704712,704732,704748,704989,705391,705420,705430,705762,706350,706707,706719,706796,706802,707258,707262,708623,708641,708710,708723,709040,709303,711717,712881,713888,720602,723013,723460,723831,723918,724883,727117,727212,727217,727228,727869,732572,732777,733887,734870,735082,736426,738697,740077
Modified: hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=740081&r1=740080&r2=740081&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original)
+++ hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java Mon Feb 2 19:10:30 2009
@@ -673,7 +673,7 @@
// -- Bytes remaining
//
lastHeartbeat = startTime;
- DatanodeCommand cmd = namenode.sendHeartbeat(dnRegistration,
+ DatanodeCommand[] cmds = namenode.sendHeartbeat(dnRegistration,
data.getCapacity(),
data.getDfsUsed(),
data.getRemaining(),
@@ -681,7 +681,7 @@
getXceiverCount());
myMetrics.heartbeats.inc(now() - startTime);
//LOG.info("Just sent heartbeat, with name " + localName);
- if (!processCommand(cmd))
+ if (!processCommand(cmds))
continue;
}
@@ -791,6 +791,27 @@
} // while (shouldRun)
} // offerService
+ /**
+ * Process an array of datanode commands
+ *
+ * @param cmds an array of datanode commands
+ * @return true if further processing may be required or false otherwise.
+ */
+ private boolean processCommand(DatanodeCommand[] cmds) {
+ if (cmds != null) {
+ for (DatanodeCommand cmd : cmds) {
+ try {
+ if (processCommand(cmd) == false) {
+ return false;
+ }
+ } catch (IOException ioe) {
+ LOG.warn("Error processing datanode Command", ioe);
+ }
+ }
+ }
+ return true;
+ }
+
/**
*
* @param cmd
Modified: hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=740081&r1=740080&r2=740081&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Mon Feb 2 19:10:30 2009
@@ -2191,10 +2191,10 @@
* If a substantial amount of time passed since the last datanode
* heartbeat then request an immediate block report.
*
- * @return a datanode command
+ * @return an array of datanode commands
* @throws IOException
*/
- DatanodeCommand handleHeartbeat(DatanodeRegistration nodeReg,
+ DatanodeCommand[] handleHeartbeat(DatanodeRegistration nodeReg,
long capacity, long dfsUsed, long remaining,
int xceiverCount, int xmitsInProgress) throws IOException {
DatanodeCommand cmd = null;
@@ -2204,7 +2204,7 @@
try {
nodeinfo = getDatanode(nodeReg);
} catch(UnregisteredDatanodeException e) {
- return DatanodeCommand.REGISTER;
+ return new DatanodeCommand[]{DatanodeCommand.REGISTER};
}
// Check if this datanode should actually be shutdown instead.
@@ -2214,7 +2214,7 @@
}
if (nodeinfo == null || !nodeinfo.isAlive) {
- return DatanodeCommand.REGISTER;
+ return new DatanodeCommand[]{DatanodeCommand.REGISTER};
}
updateStats(nodeinfo, false);
@@ -2222,26 +2222,35 @@
updateStats(nodeinfo, true);
//check lease recovery
- if (cmd == null) {
- cmd = nodeinfo.getLeaseRecoveryCommand(Integer.MAX_VALUE);
+ cmd = nodeinfo.getLeaseRecoveryCommand(Integer.MAX_VALUE);
+ if (cmd != null) {
+ return new DatanodeCommand[] {cmd};
}
+
+ ArrayList<DatanodeCommand> cmds = new ArrayList<DatanodeCommand>(2);
//check pending replication
- if (cmd == null) {
- cmd = nodeinfo.getReplicationCommand(
+ cmd = nodeinfo.getReplicationCommand(
maxReplicationStreams - xmitsInProgress);
+ if (cmd != null) {
+ cmds.add(cmd);
}
//check block invalidation
- if (cmd == null) {
- cmd = nodeinfo.getInvalidateBlocks(blockInvalidateLimit);
+ cmd = nodeinfo.getInvalidateBlocks(blockInvalidateLimit);
+ if (cmd != null) {
+ cmds.add(cmd);
+ }
+ if (!cmds.isEmpty()) {
+ return cmds.toArray(new DatanodeCommand[cmds.size()]);
}
}
}
//check distributed upgrade
- if (cmd == null) {
- cmd = getDistributedUpgradeCommand();
+ cmd = getDistributedUpgradeCommand();
+ if (cmd != null) {
+ return new DatanodeCommand[] {cmd};
}
- return cmd;
+ return null;
}
private void updateStats(DatanodeDescriptor node, boolean isAdded) {
Modified: hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=740081&r1=740080&r2=740081&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original)
+++ hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java Mon Feb 2 19:10:30 2009
@@ -612,10 +612,10 @@
/**
* Data node notify the name node that it is alive
- * Return a block-oriented command for the datanode to execute.
+ * Return an array of block-oriented commands for the datanode to execute.
* This will be either a transfer or a delete operation.
*/
- public DatanodeCommand sendHeartbeat(DatanodeRegistration nodeReg,
+ public DatanodeCommand[] sendHeartbeat(DatanodeRegistration nodeReg,
long capacity,
long dfsUsed,
long remaining,
Modified: hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java?rev=740081&r1=740080&r2=740081&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java (original)
+++ hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java Mon Feb 2 19:10:30 2009
@@ -35,15 +35,10 @@
**********************************************************************/
public interface DatanodeProtocol extends VersionedProtocol {
/**
- * 18: In sendHeartbeat, the capacity parameter reported was sum of
- * the filesystem disk space of all the data directories. This is
- * changed to exclude the reserved capacity defined by
- * dfs.datanode.du.reserved.
- *
- * The new capacity reported is sum of the filesystem disk space of
- * all the data directories minus the reserved capacity.
+ * 19: SendHeartbeat returns an array of DatanodeCommand objects
+ * in stead of a DatanodeCommand object.
*/
- public static final long versionID = 18L;
+ public static final long versionID = 19L;
// error code
final static int NOTIFY = 0;
@@ -77,11 +72,12 @@
/**
* sendHeartbeat() tells the NameNode that the DataNode is still
* alive and well. Includes some status info, too.
- * It also gives the NameNode a chance to return a "DatanodeCommand" object.
+ * It also gives the NameNode a chance to return
+ * an array of "DatanodeCommand" objects.
* A DatanodeCommand tells the DataNode to invalidate local block(s),
* or to copy them to other DataNodes, etc.
*/
- public DatanodeCommand sendHeartbeat(DatanodeRegistration registration,
+ public DatanodeCommand[] sendHeartbeat(DatanodeRegistration registration,
long capacity,
long dfsUsed, long remaining,
int xmitsInProgress,