You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ha...@apache.org on 2009/02/02 20:09:13 UTC

svn commit: r740080 - in /hadoop/core/branches/branch-0.20: ./ src/hdfs/org/apache/hadoop/hdfs/server/datanode/ src/hdfs/org/apache/hadoop/hdfs/server/namenode/ src/hdfs/org/apache/hadoop/hdfs/server/protocol/ src/test/org/apache/hadoop/hdfs/server/nam...

Author: hairong
Date: Mon Feb  2 19:09:09 2009
New Revision: 740080

URL: http://svn.apache.org/viewvc?rev=740080&view=rev
Log:
Merge -r 740076:740077 to move the change of HADOOP-5009 from main t0 branch 0.20.

Added:
    hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/server/namenode/TestHeartbeatHandling.java
      - copied unchanged from r740077, hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestHeartbeatHandling.java
Modified:
    hadoop/core/branches/branch-0.20/   (props changed)
    hadoop/core/branches/branch-0.20/CHANGES.txt   (contents, props changed)
    hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java
    hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java
    hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java
    hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java

Propchange: hadoop/core/branches/branch-0.20/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Mon Feb  2 19:09:09 2009
@@ -1,2 +1,2 @@
 /hadoop/core/branches/branch-0.19:713112
-/hadoop/core/trunk:727001,727117,727191,727212,727217,727228,727255,727869,728187,729052,729987,732385,732572,732777,732838,732869,733887,734870,734916,736426,738328,738697
+/hadoop/core/trunk:727001,727117,727191,727212,727217,727228,727255,727869,728187,729052,729987,732385,732572,732777,732838,732869,733887,734870,734916,736426,738328,738697,740077

Modified: hadoop/core/branches/branch-0.20/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.20/CHANGES.txt?rev=740080&r1=740079&r2=740080&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.20/CHANGES.txt (original)
+++ hadoop/core/branches/branch-0.20/CHANGES.txt Mon Feb  2 19:09:09 2009
@@ -653,6 +653,9 @@
     HADOOP-4862. Minor : HADOOP-3678 did not remove all the cases of 
     spurious IOExceptions logged by DataNode. (Raghu Angadi) 
 
+    HADOOP-5034. NameNode should send both replication and deletion requests
+    to DataNode in one reply to a heartbeat. (hairong)
+
 Release 0.19.0 - 2008-11-18
 
   INCOMPATIBLE CHANGES

Propchange: hadoop/core/branches/branch-0.20/CHANGES.txt
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Mon Feb  2 19:09:09 2009
@@ -1,3 +1,3 @@
 /hadoop/core/branches/branch-0.18/CHANGES.txt:727226
 /hadoop/core/branches/branch-0.19/CHANGES.txt:713112
-/hadoop/core/trunk/CHANGES.txt:727001,727117,727191,727212,727228,727255,727869,728187,729052,729987,732385,732572,732777,732838,732869,733887,734870,734916,735082,736426,738602,738697,739416
+/hadoop/core/trunk/CHANGES.txt:727001,727117,727191,727212,727228,727255,727869,728187,729052,729987,732385,732572,732777,732838,732869,733887,734870,734916,735082,736426,738602,738697,739416,740077

Modified: hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=740080&r1=740079&r2=740080&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original)
+++ hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java Mon Feb  2 19:09:09 2009
@@ -694,7 +694,7 @@
           // -- Bytes remaining
           //
           lastHeartbeat = startTime;
-          DatanodeCommand cmd = namenode.sendHeartbeat(dnRegistration,
+          DatanodeCommand[] cmds = namenode.sendHeartbeat(dnRegistration,
                                                        data.getCapacity(),
                                                        data.getDfsUsed(),
                                                        data.getRemaining(),
@@ -702,7 +702,7 @@
                                                        getXceiverCount());
           myMetrics.heartbeats.inc(now() - startTime);
           //LOG.info("Just sent heartbeat, with name " + localName);
-          if (!processCommand(cmd))
+          if (!processCommand(cmds))
             continue;
         }
             
@@ -812,6 +812,27 @@
     } // while (shouldRun)
   } // offerService
 
+  /**
+   * Process an array of datanode commands
+   * 
+   * @param cmds an array of datanode commands
+   * @return true if further processing may be required or false otherwise. 
+   */
+  private boolean processCommand(DatanodeCommand[] cmds) {
+    if (cmds != null) {
+      for (DatanodeCommand cmd : cmds) {
+        try {
+          if (processCommand(cmd) == false) {
+            return false;
+          }
+        } catch (IOException ioe) {
+          LOG.warn("Error processing datanode Command", ioe);
+        }
+      }
+    }
+    return true;
+  }
+  
     /**
      * 
      * @param cmd

Modified: hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=740080&r1=740079&r2=740080&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Mon Feb  2 19:09:09 2009
@@ -2135,10 +2135,10 @@
    * If a substantial amount of time passed since the last datanode 
    * heartbeat then request an immediate block report.  
    * 
-   * @return a datanode command 
+   * @return an array of datanode commands 
    * @throws IOException
    */
-  DatanodeCommand handleHeartbeat(DatanodeRegistration nodeReg,
+  DatanodeCommand[] handleHeartbeat(DatanodeRegistration nodeReg,
       long capacity, long dfsUsed, long remaining,
       int xceiverCount, int xmitsInProgress) throws IOException {
     DatanodeCommand cmd = null;
@@ -2148,7 +2148,7 @@
         try {
           nodeinfo = getDatanode(nodeReg);
         } catch(UnregisteredDatanodeException e) {
-          return DatanodeCommand.REGISTER;
+          return new DatanodeCommand[]{DatanodeCommand.REGISTER};
         }
           
         // Check if this datanode should actually be shutdown instead. 
@@ -2158,7 +2158,7 @@
         }
 
         if (nodeinfo == null || !nodeinfo.isAlive) {
-          return DatanodeCommand.REGISTER;
+          return new DatanodeCommand[]{DatanodeCommand.REGISTER};
         }
 
         updateStats(nodeinfo, false);
@@ -2166,26 +2166,35 @@
         updateStats(nodeinfo, true);
         
         //check lease recovery
-        if (cmd == null) {
-          cmd = nodeinfo.getLeaseRecoveryCommand(Integer.MAX_VALUE);
+        cmd = nodeinfo.getLeaseRecoveryCommand(Integer.MAX_VALUE);
+        if (cmd != null) {
+          return new DatanodeCommand[] {cmd};
         }
+      
+        ArrayList<DatanodeCommand> cmds = new ArrayList<DatanodeCommand>(2);
         //check pending replication
-        if (cmd == null) {
-          cmd = nodeinfo.getReplicationCommand(
+        cmd = nodeinfo.getReplicationCommand(
               maxReplicationStreams - xmitsInProgress);
+        if (cmd != null) {
+          cmds.add(cmd);
         }
         //check block invalidation
-        if (cmd == null) {
-          cmd = nodeinfo.getInvalidateBlocks(blockInvalidateLimit);
+        cmd = nodeinfo.getInvalidateBlocks(blockInvalidateLimit);
+        if (cmd != null) {
+          cmds.add(cmd);
+        }
+        if (!cmds.isEmpty()) {
+          return cmds.toArray(new DatanodeCommand[cmds.size()]);
         }
       }
     }
 
     //check distributed upgrade
-    if (cmd == null) {
-      cmd = getDistributedUpgradeCommand();
+    cmd = getDistributedUpgradeCommand();
+    if (cmd != null) {
+      return new DatanodeCommand[] {cmd};
     }
-    return cmd;
+    return null;
   }
 
   private void updateStats(DatanodeDescriptor node, boolean isAdded) {

Modified: hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=740080&r1=740079&r2=740080&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original)
+++ hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java Mon Feb  2 19:09:09 2009
@@ -690,10 +690,10 @@
 
   /**
    * Data node notify the name node that it is alive 
-   * Return a block-oriented command for the datanode to execute.
+   * Return an array of block-oriented commands for the datanode to execute.
    * This will be either a transfer or a delete operation.
    */
-  public DatanodeCommand sendHeartbeat(DatanodeRegistration nodeReg,
+  public DatanodeCommand[] sendHeartbeat(DatanodeRegistration nodeReg,
                                        long capacity,
                                        long dfsUsed,
                                        long remaining,

Modified: hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java?rev=740080&r1=740079&r2=740080&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java (original)
+++ hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java Mon Feb  2 19:09:09 2009
@@ -35,15 +35,10 @@
  **********************************************************************/
 public interface DatanodeProtocol extends VersionedProtocol {
   /**
-   * 18: In sendHeartbeat, the capacity parameter reported was sum of 
-   *     the filesystem disk space of all the data directories. This is 
-   *     changed to exclude the reserved capacity defined by 
-   *     dfs.datanode.du.reserved. 
-   *
-   *     The new capacity reported is sum of the filesystem disk space of 
-   *     all the data directories minus the reserved capacity.
+   * 19: SendHeartbeat returns an array of DatanodeCommand objects
+   *     in stead of a DatanodeCommand object.
    */
-  public static final long versionID = 18L;
+  public static final long versionID = 19L;
   
   // error code
   final static int NOTIFY = 0;
@@ -77,11 +72,12 @@
   /**
    * sendHeartbeat() tells the NameNode that the DataNode is still
    * alive and well.  Includes some status info, too. 
-   * It also gives the NameNode a chance to return a "DatanodeCommand" object.
+   * It also gives the NameNode a chance to return 
+   * an array of "DatanodeCommand" objects.
    * A DatanodeCommand tells the DataNode to invalidate local block(s), 
    * or to copy them to other DataNodes, etc.
    */
-  public DatanodeCommand sendHeartbeat(DatanodeRegistration registration,
+  public DatanodeCommand[] sendHeartbeat(DatanodeRegistration registration,
                                        long capacity,
                                        long dfsUsed, long remaining,
                                        int xmitsInProgress,

Modified: hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java?rev=740080&r1=740079&r2=740080&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java (original)
+++ hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java Mon Feb  2 19:09:09 2009
@@ -724,10 +724,13 @@
      */
     void sendHeartbeat() throws IOException {
       // register datanode
-      DatanodeCommand cmd = nameNode.sendHeartbeat(
+      DatanodeCommand[] cmds = nameNode.sendHeartbeat(
           dnRegistration, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, 0, 0);
-      if(cmd != null)
-        LOG.debug("sendHeartbeat Name-node reply: " + cmd.getAction());
+      if(cmds != null) {
+        for (DatanodeCommand cmd : cmds ) {
+          LOG.debug("sendHeartbeat Name-node reply: " + cmd.getAction());
+        }
+      }
     }
 
     boolean addBlock(Block blk) {
@@ -755,13 +758,18 @@
      */
     int replicateBlocks() throws IOException {
       // register datanode
-      DatanodeCommand cmd = nameNode.sendHeartbeat(
+      DatanodeCommand[] cmds = nameNode.sendHeartbeat(
           dnRegistration, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, 0, 0);
-      if(cmd == null || cmd.getAction() != DatanodeProtocol.DNA_TRANSFER)
-        return 0;
-      // Send a copy of a block to another datanode
-      BlockCommand bcmd = (BlockCommand)cmd;
-      return transferBlocks(bcmd.getBlocks(), bcmd.getTargets());
+      if (cmds != null) {
+        for (DatanodeCommand cmd : cmds) {
+          if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
+            // Send a copy of a block to another datanode
+            BlockCommand bcmd = (BlockCommand)cmd;
+            return transferBlocks(bcmd.getBlocks(), bcmd.getTargets());
+          }
+        }
+      }
+      return 0;
     }
 
     /**