You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by to...@apache.org on 2010/06/02 18:14:34 UTC
svn commit: r950618 - in /hadoop/hdfs/trunk: ./ src/java/
src/java/org/apache/hadoop/hdfs/ src/java/org/apache/hadoop/hdfs/protocol/
src/java/org/apache/hadoop/hdfs/server/datanode/
src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/
Author: tomwhite
Date: Wed Jun 2 16:14:34 2010
New Revision: 950618
URL: http://svn.apache.org/viewvc?rev=950618&view=rev
Log:
HDFS-1161. Make DN minimum valid volumes configurable. Contributed by Eli Collins.
Modified:
hadoop/hdfs/trunk/CHANGES.txt
hadoop/hdfs/trunk/src/java/hdfs-default.xml
hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/FSConstants.java
hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
Modified: hadoop/hdfs/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=950618&r1=950617&r2=950618&view=diff
==============================================================================
--- hadoop/hdfs/trunk/CHANGES.txt (original)
+++ hadoop/hdfs/trunk/CHANGES.txt Wed Jun 2 16:14:34 2010
@@ -560,6 +560,9 @@ Release 0.21.0 - Unreleased
HDFS-995. Replace usage of FileStatus#isDir(). (Eli Collins via
tomwhite)
+ HDFS-1161. Make DN minimum valid volumes configurable.
+ (Eli Collins via tomwhite)
+
OPTIMIZATIONS
HDFS-946. NameNode should not return full path name when lisitng a
Modified: hadoop/hdfs/trunk/src/java/hdfs-default.xml
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/hdfs-default.xml?rev=950618&r1=950617&r2=950618&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/hdfs-default.xml (original)
+++ hadoop/hdfs/trunk/src/java/hdfs-default.xml Wed Jun 2 16:14:34 2010
@@ -510,4 +510,13 @@ creations/deletions), or "all".</descrip
</description>
</property>
+<property>
+ <name>dfs.datanode.failed.volumes.tolerated</name>
+ <value>0</value>
+ <description>The number of volumes that are allowed to
+ fail before a datanode stops offering service. By default
+ any volume failure will cause a datanode to shutdown.
+ </description>
+</property>
+
</configuration>
Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java?rev=950618&r1=950617&r2=950618&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java Wed Jun 2 16:14:34 2010
@@ -89,6 +89,8 @@ public class DFSConfigKeys extends Commo
public static final boolean DFS_NAMENODE_NAME_DIR_RESTORE_DEFAULT = false;
public static final String DFS_LIST_LIMIT = "dfs.ls.limit";
public static final int DFS_LIST_LIMIT_DEFAULT = 1000;
+ public static final String DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY = "dfs.datanode.failed.volumes.tolerated";
+ public static final int DFS_DATANODE_FAILED_VOLUMES_TOLERATED_DEFAULT = 0;
//Delegation token related keys
public static final String DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_KEY = "dfs.namenode.delegation.key.update-interval";
Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/FSConstants.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/FSConstants.java?rev=950618&r1=950617&r2=950618&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/FSConstants.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/FSConstants.java Wed Jun 2 16:14:34 2010
@@ -60,7 +60,6 @@ public interface FSConstants {
public static final int DEFAULT_DATA_SOCKET_SIZE = 128 * 1024;
public static final int SIZE_OF_INTEGER = Integer.SIZE / Byte.SIZE;
- public static final int MIN_NUM_OF_VALID_VOLUMES = 1;// for a DN to run
// SafeMode actions
public enum SafeModeAction{ SAFEMODE_LEAVE, SAFEMODE_ENTER, SAFEMODE_GET; }
Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java?rev=950618&r1=950617&r2=950618&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java Wed Jun 2 16:14:34 2010
@@ -836,6 +836,7 @@ public class FSDataset implements FSCons
ReplicasMap volumeMap = new ReplicasMap();
static Random random = new Random();
FSDatasetAsyncDiskService asyncDiskService;
+ private int validVolsRequired;
// Used for synchronizing access to usage stats
private Object statsLock = new Object();
@@ -849,6 +850,17 @@ public class FSDataset implements FSCons
this.maxBlocksPerDir = conf.getInt("dfs.datanode.numblocks", 64);
this.supportAppends = conf.getBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY,
DFSConfigKeys.DFS_SUPPORT_APPEND_DEFAULT);
+ // The number of volumes required for operation is the total number
+ // of volumes minus the number of failed volumes we can tolerate.
+ final int volFailuresTolerated =
+ conf.getInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY,
+ DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_DEFAULT);
+ this.validVolsRequired = storage.getNumStorageDirs() - volFailuresTolerated;
+ if (validVolsRequired < 1 ||
+ validVolsRequired > storage.getNumStorageDirs()) {
+ DataNode.LOG.error("Invalid value " + volFailuresTolerated + " for " +
+ DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY);
+ }
FSVolume[] volArray = new FSVolume[storage.getNumStorageDirs()];
for (int idx = 0; idx < storage.getNumStorageDirs(); idx++) {
volArray[idx] = new FSVolume(storage.getStorageDir(idx).getCurrentDir(), conf);
@@ -871,12 +883,12 @@ public class FSDataset implements FSCons
return volumes.getDfsUsed();
}
}
+
/**
- * Return true - if there are still valid volumes
- * on the DataNode
+ * Return true - if there are still valid volumes on the DataNode.
*/
- public boolean hasEnoughResource(){
- return volumes.numberOfVolumes() >= MIN_NUM_OF_VALID_VOLUMES;
+ public boolean hasEnoughResource() {
+ return volumes.numberOfVolumes() >= validVolsRequired;
}
/**
Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java?rev=950618&r1=950617&r2=950618&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java Wed Jun 2 16:14:34 2010
@@ -71,6 +71,8 @@ public class TestDataNodeVolumeFailure e
// bring up a cluster of 2
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, block_size);
+ // Allow a single volume failure (there are two volumes)
+ conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 1);
cluster = new MiniDFSCluster(conf, dn_num, true, null);
cluster.waitActive();
}