You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by br...@apache.org on 2017/05/16 05:22:38 UTC
hadoop git commit: HDFS-10987. Make Decommission less expensive when
lot of blocks present. Contributed By Brahma Reddy Battula.
Repository: hadoop
Updated Branches:
refs/heads/branch-2.7 3252064ea -> b95d63cda
HDFS-10987. Make Decommission less expensive when lot of blocks present. Contributed By Brahma Reddy Battula.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b95d63cd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b95d63cd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b95d63cd
Branch: refs/heads/branch-2.7
Commit: b95d63cdac4b35b2b47152121b18cde8b3b92eaa
Parents: 3252064
Author: Brahma Reddy Battula <br...@apache.org>
Authored: Tue May 16 10:50:51 2017 +0530
Committer: Brahma Reddy Battula <br...@apache.org>
Committed: Tue May 16 10:50:51 2017 +0530
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++
.../blockmanagement/DecommissionManager.java | 31 ++++++++++++++++++--
2 files changed, 31 insertions(+), 3 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b95d63cd/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index da64e6d..beecb58 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -280,6 +280,9 @@ Release 2.7.4 - UNRELEASED
HDFS-11674. reserveSpaceForReplicas is not released if append request failed
due to mirror down and replica recovered (vinayakumarb)
+
+ HDFS-10987. Make Decommission less expensive when lot of blocks present.
+ (Brahma Reddy Battula)
Release 2.7.3 - 2016-08-25
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b95d63cd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
index 437d99a..a4715ad 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
@@ -348,6 +348,10 @@ public class DecommissionManager {
*/
private int numBlocksChecked = 0;
/**
+ * The number of blocks checked after (re)holding lock.
+ */
+ private int numBlocksCheckedPerLock = 0;
+ /**
* The number of nodes that have been checked on this tick. Used for
* testing.
*/
@@ -385,6 +389,7 @@ public class DecommissionManager {
}
// Reset the checked count at beginning of each iteration
numBlocksChecked = 0;
+ numBlocksCheckedPerLock = 0;
numNodesChecked = 0;
// Check decom progress
namesystem.writeLock();
@@ -417,9 +422,8 @@ public class DecommissionManager {
it = new CyclicIteration<>(decomNodeBlocks, iterkey).iterator();
final LinkedList<DatanodeDescriptor> toRemove = new LinkedList<>();
- while (it.hasNext()
- && !exceededNumBlocksPerCheck()
- && !exceededNumNodesPerCheck()) {
+ while (it.hasNext() && !exceededNumBlocksPerCheck()
+ && !exceededNumNodesPerCheck() && namesystem.isRunning()) {
numNodesChecked++;
final Map.Entry<DatanodeDescriptor, AbstractList<BlockInfoContiguous>>
entry = it.next();
@@ -544,7 +548,28 @@ public class DecommissionManager {
int decommissionOnlyReplicas = 0;
int underReplicatedInOpenFiles = 0;
while (it.hasNext()) {
+ if (insufficientlyReplicated == null
+ && numBlocksCheckedPerLock >= numBlocksPerCheck) {
+ // During fullscan insufficientlyReplicated will NOT be null, iterator
+ // will be DN's iterator. So should not yield lock, otherwise
+ // ConcurrentModificationException could occur.
+ // Once the fullscan done, iterator will be a copy. So can yield the
+ // lock.
+ // Yielding is required in case of block number is greater than the
+ // configured per-iteration-limit.
+ namesystem.writeUnlock();
+ try {
+ LOG.debug("Yielded lock during decommission check");
+ Thread.sleep(0, 500);
+ } catch (InterruptedException ignored) {
+ return;
+ }
+ //reset
+ numBlocksCheckedPerLock = 0;
+ namesystem.writeLock();
+ }
numBlocksChecked++;
+ numBlocksCheckedPerLock++;
final BlockInfoContiguous block = it.next();
// Remove the block from the list if it's no longer in the block map,
// e.g. the containing file has been deleted
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org