You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ar...@apache.org on 2017/02/24 23:47:09 UTC
[1/2] hadoop git commit: HDFS-11295. Check storage remaining instead
of node remaining in BlockPlacementPolicyDefault.chooseReplicaToDelete().
Contributed by Marton Elek.
Repository: hadoop
Updated Branches:
refs/heads/branch-2 5c509f5f0 -> fa86ec99a
refs/heads/trunk 289bc50e6 -> d2b3ba9b8
HDFS-11295. Check storage remaining instead of node remaining in BlockPlacementPolicyDefault.chooseReplicaToDelete(). Contributed by Marton Elek.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d2b3ba9b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d2b3ba9b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d2b3ba9b
Branch: refs/heads/trunk
Commit: d2b3ba9b8fb76753fa1b51661dacbde74aa5c6df
Parents: 289bc50
Author: Arpit Agarwal <ar...@apache.org>
Authored: Fri Feb 24 15:44:11 2017 -0800
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Fri Feb 24 15:44:11 2017 -0800
----------------------------------------------------------------------
.../BlockPlacementPolicyDefault.java | 2 +-
.../blockmanagement/DatanodeStorageInfo.java | 5 +++
.../blockmanagement/TestReplicationPolicy.java | 35 ++++++++++++++------
.../TestReplicationPolicyWithNodeGroup.java | 23 ++++++++++---
4 files changed, 49 insertions(+), 16 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d2b3ba9b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index eb54667..7676334 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -968,7 +968,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
}
final DatanodeDescriptor node = storage.getDatanodeDescriptor();
- long free = node.getRemaining();
+ long free = storage.getRemaining();
long lastHeartbeat = node.getLastUpdateMonotonic();
if (lastHeartbeat < oldestHeartbeat) {
oldestHeartbeat = lastHeartbeat;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d2b3ba9b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
index b4c8aaa..ab666b7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
@@ -388,6 +388,11 @@ public class DatanodeStorageInfo {
return null;
}
+ @VisibleForTesting
+ void setRemainingForTests(int remaining) {
+ this.remaining = remaining;
+ }
+
static enum AddBlockResult {
ADDED, REPLACED, ALREADY_EXIST
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d2b3ba9b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
index 1af013d..27dcbf1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
@@ -950,24 +950,31 @@ public class TestReplicationPolicy extends BaseReplicationPolicyTest {
List<DatanodeStorageInfo> replicaList = new ArrayList<>();
final Map<String, List<DatanodeStorageInfo>> rackMap
= new HashMap<String, List<DatanodeStorageInfo>>();
-
- dataNodes[0].setRemaining(4*1024*1024);
+
+ storages[0].setRemainingForTests(4*1024*1024);
+ dataNodes[0].setRemaining(calculateRemaining(dataNodes[0]));
replicaList.add(storages[0]);
-
- dataNodes[1].setRemaining(3*1024*1024);
+
+ storages[1].setRemainingForTests(3*1024*1024);
+ dataNodes[1].setRemaining(calculateRemaining(dataNodes[1]));
replicaList.add(storages[1]);
-
- dataNodes[2].setRemaining(2*1024*1024);
+
+ storages[2].setRemainingForTests(2*1024*1024);
+ dataNodes[2].setRemaining(calculateRemaining(dataNodes[2]));
replicaList.add(storages[2]);
-
- dataNodes[5].setRemaining(1*1024*1024);
+
+ //Even if this node has the most space, because the storage[5] has
+ //the lowest it should be chosen in case of block delete.
+ storages[4].setRemainingForTests(100 * 1024 * 1024);
+ storages[5].setRemainingForTests(512 * 1024);
+ dataNodes[5].setRemaining(calculateRemaining(dataNodes[5]));
replicaList.add(storages[5]);
-
+
// Refresh the last update time for all the datanodes
for (int i = 0; i < dataNodes.length; i++) {
DFSTestUtil.resetLastUpdatesWithOffset(dataNodes[i], 0);
}
-
+
List<DatanodeStorageInfo> first = new ArrayList<>();
List<DatanodeStorageInfo> second = new ArrayList<>();
replicator.splitNodesWithRack(replicaList, replicaList, rackMap, first,
@@ -999,6 +1006,14 @@ public class TestReplicationPolicy extends BaseReplicationPolicyTest {
assertEquals(chosen, storages[1]);
}
+ private long calculateRemaining(DatanodeDescriptor dataNode) {
+ long sum = 0;
+ for (DatanodeStorageInfo storageInfo: dataNode.getStorageInfos()){
+ sum += storageInfo.getRemaining();
+ }
+ return sum;
+ }
+
@Test
public void testChooseReplicasToDelete() throws Exception {
Collection<DatanodeStorageInfo> nonExcess = new ArrayList<>();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d2b3ba9b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java
index 2f184bb..ebd4b81 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java
@@ -625,16 +625,21 @@ public class TestReplicationPolicyWithNodeGroup extends BaseReplicationPolicyTes
public void testChooseReplicaToDelete() throws Exception {
List<DatanodeStorageInfo> replicaList = new ArrayList<>();
final Map<String, List<DatanodeStorageInfo>> rackMap = new HashMap<>();
- dataNodes[0].setRemaining(4*1024*1024);
+ storages[0].setRemainingForTests(4*1024*1024);
+ dataNodes[0].setRemaining(calculateRemaining(dataNodes[0]));
replicaList.add(storages[0]);
- dataNodes[1].setRemaining(3*1024*1024);
+ storages[1].setRemainingForTests(3*1024*1024);
+ dataNodes[1].setRemaining(calculateRemaining(dataNodes[1]));
replicaList.add(storages[1]);
- dataNodes[2].setRemaining(2*1024*1024);
+ storages[2].setRemainingForTests(2*1024*1024);
+ dataNodes[2].setRemaining(calculateRemaining(dataNodes[2]));
replicaList.add(storages[2]);
- dataNodes[5].setRemaining(1*1024*1024);
+ storages[4].setRemainingForTests(100 * 1024 * 1024);
+ storages[5].setRemainingForTests(512 * 1024);
+ dataNodes[5].setRemaining(calculateRemaining(dataNodes[5]));
replicaList.add(storages[5]);
List<DatanodeStorageInfo> first = new ArrayList<>();
@@ -671,7 +676,15 @@ public class TestReplicationPolicyWithNodeGroup extends BaseReplicationPolicyTes
first, second, excessTypes, rackMap);
assertEquals(chosen, storages[5]);
}
-
+
+ private long calculateRemaining(DatanodeDescriptor dataNode) {
+ long sum = 0;
+ for (DatanodeStorageInfo storageInfo: dataNode.getStorageInfos()){
+ sum += storageInfo.getRemaining();
+ }
+ return sum;
+ }
+
/**
* Test replica placement policy in case of boundary topology.
* Rack 2 has only 1 node group & can't be placed with two replicas
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[2/2] hadoop git commit: HDFS-11295. Check storage remaining instead
of node remaining in BlockPlacementPolicyDefault.chooseReplicaToDelete().
Contributed by Marton Elek.
Posted by ar...@apache.org.
HDFS-11295. Check storage remaining instead of node remaining in BlockPlacementPolicyDefault.chooseReplicaToDelete(). Contributed by Marton Elek.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fa86ec99
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fa86ec99
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fa86ec99
Branch: refs/heads/branch-2
Commit: fa86ec99a1ff0bef4fb322a82a68667ebd733926
Parents: 5c509f5
Author: Arpit Agarwal <ar...@apache.org>
Authored: Fri Feb 24 15:44:11 2017 -0800
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Fri Feb 24 15:44:19 2017 -0800
----------------------------------------------------------------------
.../BlockPlacementPolicyDefault.java | 2 +-
.../blockmanagement/DatanodeStorageInfo.java | 5 +++
.../blockmanagement/TestReplicationPolicy.java | 35 ++++++++++++++------
.../TestReplicationPolicyWithNodeGroup.java | 23 ++++++++++---
4 files changed, 49 insertions(+), 16 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa86ec99/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index ec2e4ba..6fab722 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -966,7 +966,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
}
final DatanodeDescriptor node = storage.getDatanodeDescriptor();
- long free = node.getRemaining();
+ long free = storage.getRemaining();
long lastHeartbeat = node.getLastUpdateMonotonic();
if (lastHeartbeat < oldestHeartbeat) {
oldestHeartbeat = lastHeartbeat;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa86ec99/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
index 862b1bf..6474e3f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
@@ -382,6 +382,11 @@ public class DatanodeStorageInfo {
return null;
}
+ @VisibleForTesting
+ void setRemainingForTests(int remaining) {
+ this.remaining = remaining;
+ }
+
static enum AddBlockResult {
ADDED, REPLACED, ALREADY_EXIST
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa86ec99/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
index 21839c6..d2cd919 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
@@ -950,24 +950,31 @@ public class TestReplicationPolicy extends BaseReplicationPolicyTest {
List<DatanodeStorageInfo> replicaList = new ArrayList<>();
final Map<String, List<DatanodeStorageInfo>> rackMap
= new HashMap<String, List<DatanodeStorageInfo>>();
-
- dataNodes[0].setRemaining(4*1024*1024);
+
+ storages[0].setRemainingForTests(4*1024*1024);
+ dataNodes[0].setRemaining(calculateRemaining(dataNodes[0]));
replicaList.add(storages[0]);
-
- dataNodes[1].setRemaining(3*1024*1024);
+
+ storages[1].setRemainingForTests(3*1024*1024);
+ dataNodes[1].setRemaining(calculateRemaining(dataNodes[1]));
replicaList.add(storages[1]);
-
- dataNodes[2].setRemaining(2*1024*1024);
+
+ storages[2].setRemainingForTests(2*1024*1024);
+ dataNodes[2].setRemaining(calculateRemaining(dataNodes[2]));
replicaList.add(storages[2]);
-
- dataNodes[5].setRemaining(1*1024*1024);
+
+ //Even if this node has the most space, because the storage[5] has
+ //the lowest it should be chosen in case of block delete.
+ storages[4].setRemainingForTests(100 * 1024 * 1024);
+ storages[5].setRemainingForTests(512 * 1024);
+ dataNodes[5].setRemaining(calculateRemaining(dataNodes[5]));
replicaList.add(storages[5]);
-
+
// Refresh the last update time for all the datanodes
for (int i = 0; i < dataNodes.length; i++) {
DFSTestUtil.resetLastUpdatesWithOffset(dataNodes[i], 0);
}
-
+
List<DatanodeStorageInfo> first = new ArrayList<>();
List<DatanodeStorageInfo> second = new ArrayList<>();
replicator.splitNodesWithRack(replicaList, rackMap, first, second);
@@ -998,6 +1005,14 @@ public class TestReplicationPolicy extends BaseReplicationPolicyTest {
assertEquals(chosen, storages[1]);
}
+ private long calculateRemaining(DatanodeDescriptor dataNode) {
+ long sum = 0;
+ for (DatanodeStorageInfo storageInfo: dataNode.getStorageInfos()){
+ sum += storageInfo.getRemaining();
+ }
+ return sum;
+ }
+
@Test
public void testChooseReplicasToDelete() throws Exception {
Collection<DatanodeStorageInfo> nonExcess = new ArrayList<>();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa86ec99/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java
index 1fb46f9..e5ffd56 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java
@@ -625,16 +625,21 @@ public class TestReplicationPolicyWithNodeGroup extends BaseReplicationPolicyTes
public void testChooseReplicaToDelete() throws Exception {
List<DatanodeStorageInfo> replicaList = new ArrayList<>();
final Map<String, List<DatanodeStorageInfo>> rackMap = new HashMap<>();
- dataNodes[0].setRemaining(4*1024*1024);
+ storages[0].setRemainingForTests(4*1024*1024);
+ dataNodes[0].setRemaining(calculateRemaining(dataNodes[0]));
replicaList.add(storages[0]);
- dataNodes[1].setRemaining(3*1024*1024);
+ storages[1].setRemainingForTests(3*1024*1024);
+ dataNodes[1].setRemaining(calculateRemaining(dataNodes[1]));
replicaList.add(storages[1]);
- dataNodes[2].setRemaining(2*1024*1024);
+ storages[2].setRemainingForTests(2*1024*1024);
+ dataNodes[2].setRemaining(calculateRemaining(dataNodes[2]));
replicaList.add(storages[2]);
- dataNodes[5].setRemaining(1*1024*1024);
+ storages[4].setRemainingForTests(100 * 1024 * 1024);
+ storages[5].setRemainingForTests(512 * 1024);
+ dataNodes[5].setRemaining(calculateRemaining(dataNodes[5]));
replicaList.add(storages[5]);
List<DatanodeStorageInfo> first = new ArrayList<>();
@@ -671,7 +676,15 @@ public class TestReplicationPolicyWithNodeGroup extends BaseReplicationPolicyTes
first, second, excessTypes, rackMap);
assertEquals(chosen, storages[5]);
}
-
+
+ private long calculateRemaining(DatanodeDescriptor dataNode) {
+ long sum = 0;
+ for (DatanodeStorageInfo storageInfo: dataNode.getStorageInfos()){
+ sum += storageInfo.getRemaining();
+ }
+ return sum;
+ }
+
/**
* Test replica placement policy in case of boundary topology.
* Rack 2 has only 1 node group & can't be placed with two replicas
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org