You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ji...@apache.org on 2016/03/02 03:43:19 UTC

hadoop git commit: HDFS-9876. shouldProcessOverReplicated should not count number of pending replicas. Contributed by Jing Zhao.

Repository: hadoop
Updated Branches:
  refs/heads/trunk 4abb2fa68 -> f2ba7da4f


HDFS-9876. shouldProcessOverReplicated should not count number of pending replicas. Contributed by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f2ba7da4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f2ba7da4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f2ba7da4

Branch: refs/heads/trunk
Commit: f2ba7da4f0df6cf0fc245093aeb4500158e6ee0b
Parents: 4abb2fa
Author: Jing Zhao <ji...@apache.org>
Authored: Tue Mar 1 18:41:57 2016 -0800
Committer: Jing Zhao <ji...@apache.org>
Committed: Tue Mar 1 18:41:57 2016 -0800

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 ++
 .../server/blockmanagement/BlockManager.java    | 38 ++++++++++----------
 2 files changed, 23 insertions(+), 18 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2ba7da4/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 80bd4d57..421ee68 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -455,6 +455,9 @@ Trunk (Unreleased)
     HDFS-9867. Missing block exception should carry locatedBlocks information.
     (Mingliang Liu via jing9)
 
+    HDFS-9876. shouldProcessOverReplicated should not count number of pending
+    replicas. (jing9)
+
     BREAKDOWN OF HDFS-7285 SUBTASKS AND RELATED JIRAS
 
       HDFS-7347. Configurable erasure coding policy for individual files and

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2ba7da4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 77eea0a..5175c13 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -2893,7 +2893,7 @@ public class BlockManager implements BlockStatsMXBean {
     } else {
       updateNeededReplications(storedBlock, curReplicaDelta, 0);
     }
-    if (shouldProcessOverReplicated(num, pendingNum, fileReplication)) {
+    if (shouldProcessOverReplicated(num, fileReplication)) {
       processOverReplicatedBlock(storedBlock, fileReplication, node, delNodeHint);
     }
     // If the file replication has reached desired value
@@ -2912,8 +2912,8 @@ public class BlockManager implements BlockStatsMXBean {
   }
 
   private boolean shouldProcessOverReplicated(NumberReplicas num,
-      int pendingNum, int expectedNum) {
-    int numCurrent = num.liveReplicas() + pendingNum;
+      int expectedNum) {
+    final int numCurrent = num.liveReplicas();
     return numCurrent > expectedNum ||
         (numCurrent == expectedNum && num.redundantInternalBlocks() > 0);
   }
@@ -3131,7 +3131,7 @@ public class BlockManager implements BlockStatsMXBean {
       }
     }
 
-    if (shouldProcessOverReplicated(num, 0, expectedReplication)) {
+    if (shouldProcessOverReplicated(num, expectedReplication)) {
       if (num.replicasOnStaleNodes() > 0) {
         // If any of the replicas of this block are on nodes that are
         // considered "stale", then these replicas may in fact have
@@ -3268,7 +3268,6 @@ public class BlockManager implements BlockStatsMXBean {
     assert storedBlock instanceof BlockInfoStriped;
     BlockInfoStriped sblk = (BlockInfoStriped) storedBlock;
     short groupSize = sblk.getTotalBlockNum();
-    BlockPlacementPolicy placementPolicy = placementPolicies.getPolicy(true);
 
     // find all duplicated indices
     BitSet found = new BitSet(groupSize); //indices found
@@ -3283,14 +3282,6 @@ public class BlockManager implements BlockStatsMXBean {
       found.set(index);
       storage2index.put(storage, index);
     }
-    // the number of target left replicas equals to the of number of the found
-    // indices.
-    int numOfTarget = found.cardinality();
-
-    final BlockStoragePolicy storagePolicy = storagePolicySuite.getPolicy(
-        bc.getStoragePolicyID());
-    final List<StorageType> excessTypes = storagePolicy.chooseExcess(
-        (short)numOfTarget, DatanodeStorageInfo.toStorageTypes(nonExcess));
 
     // use delHint only if delHint is duplicated
     final DatanodeStorageInfo delStorageHint =
@@ -3302,6 +3293,19 @@ public class BlockManager implements BlockStatsMXBean {
       }
     }
 
+    // cardinality of found indicates the expected number of internal blocks
+    final int numOfTarget = found.cardinality();
+    final BlockStoragePolicy storagePolicy = storagePolicySuite.getPolicy(
+        bc.getStoragePolicyID());
+    final List<StorageType> excessTypes = storagePolicy.chooseExcess(
+        (short) numOfTarget, DatanodeStorageInfo.toStorageTypes(nonExcess));
+    if (excessTypes.isEmpty()) {
+      LOG.warn("excess types chosen for block {} among storages {} is empty",
+          storedBlock, nonExcess);
+      return;
+    }
+
+    BlockPlacementPolicy placementPolicy = placementPolicies.getPolicy(true);
     // for each duplicated index, delete some replicas until only one left
     for (int targetIndex = duplicated.nextSetBit(0); targetIndex >= 0;
          targetIndex = duplicated.nextSetBit(targetIndex + 1)) {
@@ -3312,9 +3316,7 @@ public class BlockManager implements BlockStatsMXBean {
           candidates.add(storage);
         }
       }
-      Block internalBlock = new Block(storedBlock);
-      internalBlock.setBlockId(storedBlock.getBlockId() + targetIndex);
-      while (candidates.size() > 1) {
+      if (candidates.size() > 1) {
         List<DatanodeStorageInfo> replicasToDelete = placementPolicy
             .chooseReplicasToDelete(nonExcess, candidates, (short) 1,
                 excessTypes, null, null);
@@ -3749,7 +3751,7 @@ public class BlockManager implements BlockStatsMXBean {
       final BlockInfo block = it.next();
       int expectedReplication = this.getReplication(block);
       NumberReplicas num = countNodes(block);
-      if (shouldProcessOverReplicated(num, 0, expectedReplication)) {
+      if (shouldProcessOverReplicated(num, expectedReplication)) {
         // over-replicated block
         processOverReplicatedBlock(block, (short) expectedReplication, null,
             null);
@@ -3886,7 +3888,7 @@ public class BlockManager implements BlockStatsMXBean {
         neededReplications.add(block, n.liveReplicas() + pending,
             n.readOnlyReplicas(),
             n.decommissionedAndDecommissioning(), expected);
-      } else if (shouldProcessOverReplicated(n, 0, expected)) {
+      } else if (shouldProcessOverReplicated(n, expected)) {
         processOverReplicatedBlock(block, expected, null, null);
       }
     }