You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by el...@apache.org on 2013/01/11 01:09:37 UTC
svn commit: r1431753 - in
/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: ./
src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/
src/main/java/org/apache/hadoop/hdfs/server/datanode/
Author: eli
Date: Fri Jan 11 00:09:37 2013
New Revision: 1431753
URL: http://svn.apache.org/viewvc?rev=1431753&view=rev
Log:
HDFS-4377. Some trivial DN comment cleanup. Contributed by Eli Collins
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1431753&r1=1431752&r2=1431753&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Fri Jan 11 00:09:37 2013
@@ -478,6 +478,8 @@ Release 2.0.3-alpha - Unreleased
HDFS-4363. Combine PBHelper and HdfsProtoUtil and remove redundant
methods. (suresh)
+ HDFS-4377. Some trivial DN comment cleanup. (eli)
+
OPTIMIZATIONS
BUG FIXES
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java?rev=1431753&r1=1431752&r2=1431753&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java Fri Jan 11 00:09:37 2013
@@ -171,20 +171,19 @@ public class BlockManager {
*/
private final Set<Block> postponedMisreplicatedBlocks = Sets.newHashSet();
- //
- // Keeps a TreeSet for every named node. Each treeset contains
- // a list of the blocks that are "extra" at that location. We'll
- // eventually remove these extras.
- // Mapping: StorageID -> TreeSet<Block>
- //
+ /**
+ * Maps a StorageID to the set of blocks that are "extra" for this
+ * DataNode. We'll eventually remove these extras.
+ */
public final Map<String, LightWeightLinkedSet<Block>> excessReplicateMap =
new TreeMap<String, LightWeightLinkedSet<Block>>();
- //
- // Store set of Blocks that need to be replicated 1 or more times.
- // We also store pending replication-orders.
- //
+ /**
+ * Store set of Blocks that need to be replicated 1 or more times.
+ * We also store pending replication-orders.
+ */
public final UnderReplicatedBlocks neededReplications = new UnderReplicatedBlocks();
+
@VisibleForTesting
final PendingReplicationBlocks pendingReplications;
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1431753&r1=1431752&r2=1431753&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Fri Jan 11 00:09:37 2013
@@ -970,29 +970,27 @@ public class DataNode extends Configured
dnId.setStorageID(createNewStorageId(dnId.getXferPort()));
}
+ /**
+ * @return a unique storage ID of form "DS-randInt-ipaddr-port-timestamp"
+ */
static String createNewStorageId(int port) {
- /* Return
- * "DS-randInt-ipaddr-currentTimeMillis"
- * It is considered extermely rare for all these numbers to match
- * on a different machine accidentally for the following
- * a) SecureRandom(INT_MAX) is pretty much random (1 in 2 billion), and
- * b) Good chance ip address would be different, and
- * c) Even on the same machine, Datanode is designed to use different ports.
- * d) Good chance that these are started at different times.
- * For a confict to occur all the 4 above have to match!.
- * The format of this string can be changed anytime in future without
- * affecting its functionality.
- */
+ // It is unlikely that we will create a non-unique storage ID
+ // for the following reasons:
+ // a) SecureRandom is a cryptographically strong random number generator
+ // b) IP addresses will likely differ on different hosts
+ // c) DataNode xfer ports will differ on the same host
+ // d) StorageIDs will likely be generated at different times (in ms)
+ // A conflict requires that all four conditions are violated.
+ // NB: The format of this string can be changed in the future without
+ // requiring that old SotrageIDs be updated.
String ip = "unknownIP";
try {
ip = DNS.getDefaultIP("default");
} catch (UnknownHostException ignored) {
- LOG.warn("Could not find ip address of \"default\" inteface.");
+ LOG.warn("Could not find an IP address for the \"default\" inteface.");
}
-
int rand = DFSUtil.getSecureRandom().nextInt(Integer.MAX_VALUE);
- return "DS-" + rand + "-" + ip + "-" + port + "-"
- + Time.now();
+ return "DS-" + rand + "-" + ip + "-" + port + "-" + Time.now();
}
/** Ensure the authentication method is kerberos */
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java?rev=1431753&r1=1431752&r2=1431753&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java Fri Jan 11 00:09:37 2013
@@ -62,7 +62,7 @@ import org.apache.hadoop.util.DiskChecke
*/
@InterfaceAudience.Private
public class DataStorage extends Storage {
- // Constants
+
public final static String BLOCK_SUBDIR_PREFIX = "subdir";
final static String BLOCK_FILE_PREFIX = "blk_";
final static String COPY_FILE_PREFIX = "dncp_";
@@ -71,13 +71,13 @@ public class DataStorage extends Storage
public final static String STORAGE_DIR_FINALIZED = "finalized";
public final static String STORAGE_DIR_TMP = "tmp";
- /** Access to this variable is guarded by "this" */
+ /** Unique storage ID. {@see DataNode#createNewStorageId(int)} for details */
private String storageID;
- // flag to ensure initialzing storage occurs only once
- private boolean initilized = false;
+ // Flag to ensure we only initialize storage once
+ private boolean initialized = false;
- // BlockPoolStorage is map of <Block pool Id, BlockPoolStorage>
+ // Maps block pool IDs to block pool storage
private Map<String, BlockPoolSliceStorage> bpStorageMap
= Collections.synchronizedMap(new HashMap<String, BlockPoolSliceStorage>());
@@ -130,7 +130,7 @@ public class DataStorage extends Storage
synchronized void recoverTransitionRead(DataNode datanode,
NamespaceInfo nsInfo, Collection<File> dataDirs, StartupOption startOpt)
throws IOException {
- if (initilized) {
+ if (initialized) {
// DN storage has been initialized, no need to do anything
return;
}
@@ -200,7 +200,7 @@ public class DataStorage extends Storage
this.writeAll();
// 4. mark DN storage is initilized
- this.initilized = true;
+ this.initialized = true;
}
/**