You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by om...@apache.org on 2008/08/06 20:34:37 UTC

svn commit: r683361 - in /hadoop/core/trunk: CHANGES.txt src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java

Author: omalley
Date: Wed Aug  6 11:34:37 2008
New Revision: 683361

URL: http://svn.apache.org/viewvc?rev=683361&view=rev
Log:
HADOOP-3859. Allow the maximum number of xceivers in the data node to
be configurable. Contributed by Johan Oskarsson.

Modified:
    hadoop/core/trunk/CHANGES.txt
    hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java

Modified: hadoop/core/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/CHANGES.txt?rev=683361&r1=683360&r2=683361&view=diff
==============================================================================
--- hadoop/core/trunk/CHANGES.txt (original)
+++ hadoop/core/trunk/CHANGES.txt Wed Aug  6 11:34:37 2008
@@ -1043,6 +1043,9 @@
     FileSystem.delete rather than the FileUtil.fullyDelete. (Amareshwari
     Sri Ramadasu via acmurthy)  
 
+    HADOOP-3859. Allow the maximum number of xceivers in the data node to
+    be configurable. (Johan Oskarsson via omalley)
+
 Release 0.17.1 - 2008-06-23
 
   INCOMPATIBLE CHANGES

Modified: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=683361&r1=683360&r2=683361&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java Wed Aug  6 11:34:37 2008
@@ -161,6 +161,14 @@
   private static final Random R = new Random();
 
   /**
+   * Maximal number of concurrent xceivers per node.
+   * Enforcing the limit is required in order to avoid data-node
+   * running out of memory.
+   */
+  private static final int MAX_XCEIVER_COUNT = 256;
+  private int maxXceiverCount = MAX_XCEIVER_COUNT;
+  
+  /**
    * We need an estimate for block size to check if the disk partition has
    * enough space. For now we set it to be the default block size set
    * in the server side configuration, which is not ideal because the
@@ -304,6 +312,7 @@
     this.dnRegistration.setName(machineName + ":" + tmpPort);
     LOG.info("Opened info server at " + tmpPort);
       
+    this.maxXceiverCount = conf.getInt("dfs.datanode.max.xcievers", MAX_XCEIVER_COUNT);
     this.threadGroup = new ThreadGroup("dataXceiveServer");
     this.dataXceiveServer = new Daemon(threadGroup, new DataXceiveServer(ss));
     this.threadGroup.setDaemon(true); // auto destroy when empty
@@ -632,13 +641,6 @@
     shutdown();
   }
     
-  /**
-   * Maximal number of concurrent xceivers per node.
-   * Enforcing the limit is required in order to avoid data-node
-   * running out of memory.
-   */
-  private final static int MAX_XCEIVER_COUNT = 256;
-
   /** Number of concurrent xceivers per node. */
   int getXceiverCount() {
     return threadGroup == null ? 0 : threadGroup.activeCount();
@@ -1040,10 +1042,10 @@
         byte op = in.readByte();
         // Make sure the xciver count is not exceeded
         int curXceiverCount = getXceiverCount();
-        if(curXceiverCount > MAX_XCEIVER_COUNT) {
+        if (curXceiverCount > maxXceiverCount) {
           throw new IOException("xceiverCount " + curXceiverCount
                                 + " exceeds the limit of concurrent xcievers "
-                                + MAX_XCEIVER_COUNT);
+                                + maxXceiverCount);
         }
         long startTime = now();
         switch ( op ) {