You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by sz...@apache.org on 2012/10/22 20:30:16 UTC
svn commit: r1400986 - in
/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: CHANGES.txt
src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
Author: szetszwo
Date: Mon Oct 22 18:30:16 2012
New Revision: 1400986
URL: http://svn.apache.org/viewvc?rev=1400986&view=rev
Log:
HDFS-4099. Clean up replication code and add more javadoc.
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1400986&r1=1400985&r2=1400986&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Mon Oct 22 18:30:16 2012
@@ -408,6 +408,8 @@ Release 2.0.3-alpha - Unreleased
HDFS-4088. Remove "throws QuotaExceededException" from an
INodeDirectoryWithQuota constructor. (szetszwo)
+ HDFS-4099. Clean up replication code and add more javadoc. (szetszwo)
+
OPTIMIZATIONS
BUG FIXES
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java?rev=1400986&r1=1400985&r2=1400986&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java Mon Oct 22 18:30:16 2012
@@ -17,6 +17,8 @@
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
+import static org.apache.hadoop.util.ExitUtil.terminate;
+
import java.io.IOException;
import java.io.PrintWriter;
import java.util.ArrayList;
@@ -49,14 +51,11 @@ import org.apache.hadoop.hdfs.protocol.E
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
-import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode;
+import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
import org.apache.hadoop.hdfs.server.blockmanagement.PendingDataNodeMessages.ReportedBlockInfo;
-
-import static org.apache.hadoop.util.ExitUtil.terminate;
-
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.namenode.FSClusterStats;
@@ -2833,28 +2832,32 @@ assert storedBlock.findDatanode(dn) < 0
}
}
- public void checkReplication(Block block, short numExpectedReplicas) {
- // filter out containingNodes that are marked for decommission.
- NumberReplicas number = countNodes(block);
- if (isNeededReplication(block, numExpectedReplicas, number.liveReplicas())) {
- neededReplications.add(block,
- number.liveReplicas(),
- number.decommissionedReplicas(),
- numExpectedReplicas);
- return;
- }
- if (number.liveReplicas() > numExpectedReplicas) {
- processOverReplicatedBlock(block, numExpectedReplicas, null, null);
+ /**
+ * Check replication of the blocks in the collection.
+ * If any block is needed replication, insert it into the replication queue.
+ * Otherwise, if the block is more than the expected replication factor,
+ * process it as an over replicated block.
+ */
+ public void checkReplication(BlockCollection bc) {
+ final short expected = bc.getBlockReplication();
+ for (Block block : bc.getBlocks()) {
+ final NumberReplicas n = countNodes(block);
+ if (isNeededReplication(block, expected, n.liveReplicas())) {
+ neededReplications.add(block, n.liveReplicas(),
+ n.decommissionedReplicas(), expected);
+ } else if (n.liveReplicas() > expected) {
+ processOverReplicatedBlock(block, expected, null, null);
+ }
}
}
- /* get replication factor of a block */
+ /**
+ * @return 0 if the block is not found;
+ * otherwise, return the replication factor of the block.
+ */
private int getReplication(Block block) {
- BlockCollection bc = blocksMap.getBlockCollection(block);
- if (bc == null) { // block does not belong to any file
- return 0;
- }
- return bc.getBlockReplication();
+ final BlockCollection bc = blocksMap.getBlockCollection(block);
+ return bc == null? 0: bc.getBlockReplication();
}
@@ -2929,12 +2932,12 @@ assert storedBlock.findDatanode(dn) < 0
return enoughRacks;
}
- boolean isNeededReplication(Block b, int expectedReplication, int curReplicas) {
- if ((curReplicas >= expectedReplication) && (blockHasEnoughRacks(b))) {
- return false;
- } else {
- return true;
- }
+ /**
+ * A block needs replication if the number of replicas is less than expected
+ * or if it does not have enough racks.
+ */
+ private boolean isNeededReplication(Block b, int expected, int current) {
+ return current < expected || !blockHasEnoughRacks(b);
}
public long getMissingBlocksCount() {
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1400986&r1=1400985&r2=1400986&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Mon Oct 22 18:30:16 2012
@@ -2428,21 +2428,6 @@ public class FSNamesystem implements Nam
return true;
}
- /**
- * Check all blocks of a file. If any blocks are lower than their intended
- * replication factor, then insert them into neededReplication and if
- * the blocks are more than the intended replication factor then insert
- * them into invalidateBlocks.
- */
- private void checkReplicationFactor(INodeFile file) {
- short numExpectedReplicas = file.getBlockReplication();
- Block[] pendingBlocks = file.getBlocks();
- int nrBlocks = pendingBlocks.length;
- for (int i = 0; i < nrBlocks; i++) {
- blockManager.checkReplication(pendingBlocks[i], numExpectedReplicas);
- }
- }
-
/**
* Allocate a block at the given pending filename
*
@@ -3175,7 +3160,7 @@ public class FSNamesystem implements Nam
// close file and persist block allocations for this file
dir.closeFile(src, newFile);
- checkReplicationFactor(newFile);
+ blockManager.checkReplication(newFile);
}
void commitBlockSynchronization(ExtendedBlock lastblock,