You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ji...@apache.org on 2016/02/03 22:30:18 UTC

hadoop git commit: HDFS-9748. Avoid duplication in pendingReplications when addExpectedReplicasToPending is called twice. Contributed by Walter Su.

Repository: hadoop
Updated Branches:
  refs/heads/trunk fa328e2d3 -> 7badf1560


HDFS-9748. Avoid duplication in pendingReplications when addExpectedReplicasToPending is called twice. Contributed by Walter Su.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7badf156
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7badf156
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7badf156

Branch: refs/heads/trunk
Commit: 7badf156049b78cabf8537fff9846a0f9924a090
Parents: fa328e2
Author: Jing Zhao <ji...@apache.org>
Authored: Wed Feb 3 13:30:09 2016 -0800
Committer: Jing Zhao <ji...@apache.org>
Committed: Wed Feb 3 13:30:09 2016 -0800

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt                 | 3 +++
 .../server/blockmanagement/PendingReplicationBlocks.java    | 9 ++++++---
 .../hdfs/server/blockmanagement/TestPendingReplication.java | 9 ++++++++-
 3 files changed, 17 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7badf156/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 1c01978..103edad 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2693,6 +2693,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-9739. DatanodeStorage.isValidStorageId() is broken
     (Mingliang Liu via vinayakumarb)
 
+    HDFS-9748. Avoid duplication in pendingReplications when
+    addExpectedReplicasToPending is called twice. (Walter Su via jing9)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7badf156/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReplicationBlocks.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReplicationBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReplicationBlocks.java
index 04232cf..71939de 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReplicationBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReplicationBlocks.java
@@ -23,7 +23,6 @@ import java.io.PrintWriter;
 import java.sql.Time;
 import java.util.ArrayList;
 import java.util.Arrays;
-import java.util.Collections;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
@@ -77,7 +76,7 @@ class PendingReplicationBlocks {
    * @param block The corresponding block
    * @param targets The DataNodes where replicas of the block should be placed
    */
-  void increment(BlockInfo block, DatanodeDescriptor[] targets) {
+  void increment(BlockInfo block, DatanodeDescriptor... targets) {
     synchronized (pendingReplications) {
       PendingBlockInfo found = pendingReplications.get(block);
       if (found == null) {
@@ -193,7 +192,11 @@ class PendingReplicationBlocks {
 
     void incrementReplicas(DatanodeDescriptor... newTargets) {
       if (newTargets != null) {
-        Collections.addAll(targets, newTargets);
+        for (DatanodeDescriptor newTarget : newTargets) {
+          if (!targets.contains(newTarget)) {
+            targets.add(newTarget);
+          }
+        }
       }
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7badf156/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java
index b5b0cf2..18f28d5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java
@@ -80,13 +80,20 @@ public class TestPendingReplication {
 
 
     //
-    // remove one item and reinsert it
+    // remove one item
     //
     BlockInfo blk = genBlockInfo(8, 8, 0);
     pendingReplications.decrement(blk, storages[7].getDatanodeDescriptor()); // removes one replica
     assertEquals("pendingReplications.getNumReplicas ",
                  7, pendingReplications.getNumReplicas(blk));
 
+    //
+    // insert the same item twice should be counted as once
+    //
+    pendingReplications.increment(blk, storages[0].getDatanodeDescriptor());
+    assertEquals("pendingReplications.getNumReplicas ",
+        7, pendingReplications.getNumReplicas(blk));
+
     for (int i = 0; i < 7; i++) {
       // removes all replicas
       pendingReplications.decrement(blk, storages[i].getDatanodeDescriptor());