You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by om...@apache.org on 2008/08/06 20:52:21 UTC
svn commit: r683366 - in /hadoop/core/branches/branch-0.17: CHANGES.txt
src/java/org/apache/hadoop/dfs/DataNode.java
Author: omalley
Date: Wed Aug 6 11:52:20 2008
New Revision: 683366
URL: http://svn.apache.org/viewvc?rev=683366&view=rev
Log:
HADOOP-3859 Merge -r 683362:683365 from branch-0.18 to branch-0.17.
Modified:
hadoop/core/branches/branch-0.17/CHANGES.txt
hadoop/core/branches/branch-0.17/src/java/org/apache/hadoop/dfs/DataNode.java
Modified: hadoop/core/branches/branch-0.17/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.17/CHANGES.txt?rev=683366&r1=683365&r2=683366&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.17/CHANGES.txt (original)
+++ hadoop/core/branches/branch-0.17/CHANGES.txt Wed Aug 6 11:52:20 2008
@@ -35,6 +35,9 @@
FileSystem.delete rather than the FileUtil.fullyDelete. (Amareshwari
Sri Ramadasu via acmurthy)
+ HADOOP-3859. Allow the maximum number of xceivers in the data node to
+ be configurable. (Johan Oskarsson via omalley)
+
Release 0.17.1 - 2008-06-23
INCOMPATIBLE CHANGES
Modified: hadoop/core/branches/branch-0.17/src/java/org/apache/hadoop/dfs/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.17/src/java/org/apache/hadoop/dfs/DataNode.java?rev=683366&r1=683365&r2=683366&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.17/src/java/org/apache/hadoop/dfs/DataNode.java (original)
+++ hadoop/core/branches/branch-0.17/src/java/org/apache/hadoop/dfs/DataNode.java Wed Aug 6 11:52:20 2008
@@ -128,6 +128,14 @@
private static final Random R = new Random();
/**
+ * Maximal number of concurrent xceivers per node.
+ * Enforcing the limit is required in order to avoid data-node
+ * running out of memory.
+ */
+ private static final int MAX_XCEIVER_COUNT = 256;
+ private int maxXceiverCount = MAX_XCEIVER_COUNT;
+
+ /**
* We need an estimate for block size to check if the disk partition has
* enough space. For now we set it to be the default block size set
* in the server side configuration, which is not ideal because the
@@ -267,6 +275,7 @@
this.dnRegistration.setName(machineName + ":" + tmpPort);
LOG.info("Opened server at " + tmpPort);
+ this.maxXceiverCount = conf.getInt("dfs.datanode.max.xcievers", MAX_XCEIVER_COUNT);
this.threadGroup = new ThreadGroup("dataXceiveServer");
this.dataXceiveServer = new Daemon(threadGroup, new DataXceiveServer(ss));
this.threadGroup.setDaemon(true); // auto destroy when empty
@@ -569,13 +578,6 @@
shutdown();
}
- /**
- * Maximal number of concurrent xceivers per node.
- * Enforcing the limit is required in order to avoid data-node
- * running out of memory.
- */
- private final static int MAX_XCEIVER_COUNT = 256;
-
/** Number of concurrent xceivers per node. */
int getXceiverCount() {
return threadGroup == null ? 0 : threadGroup.activeCount();
@@ -973,10 +975,10 @@
byte op = in.readByte();
// Make sure the xciver count is not exceeded
int curXceiverCount = getXceiverCount();
- if(curXceiverCount > MAX_XCEIVER_COUNT) {
+ if (curXceiverCount > maxXceiverCount) {
throw new IOException("xceiverCount " + curXceiverCount
+ " exceeds the limit of concurrent xcievers "
- + MAX_XCEIVER_COUNT);
+ + maxXceiverCount);
}
long startTime = now();
switch ( op ) {