You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ka...@apache.org on 2016/01/25 18:25:45 UTC
[10/50] [abbrv] hadoop git commit: YARN-4557. Fix improper Queues
sorting in PartitionedQueueComparator when accessible-node-labels=*.
(Naganarasimha G R via wangda)
YARN-4557. Fix improper Queues sorting in PartitionedQueueComparator when accessible-node-labels=*. (Naganarasimha G R via wangda)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5ff5f673
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5ff5f673
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5ff5f673
Branch: refs/heads/YARN-1011
Commit: 5ff5f67332b527acaca7a69ac421930a02ca55b3
Parents: 1708a4c
Author: Wangda Tan <wa...@apache.org>
Authored: Thu Jan 21 11:15:04 2016 +0800
Committer: Wangda Tan <wa...@apache.org>
Committed: Thu Jan 21 11:21:06 2016 +0800
----------------------------------------------------------------------
hadoop-yarn-project/CHANGES.txt | 3 +
.../scheduler/AppSchedulingInfo.java | 2 +-
.../capacity/PartitionedQueueComparator.java | 10 ++-
.../TestNodeLabelContainerAllocation.java | 77 +++++++++++++++++++-
4 files changed, 87 insertions(+), 5 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ff5f673/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 212ffa1..bd467ea 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -1289,6 +1289,9 @@ Release 2.8.0 - UNRELEASED
YARN-4565. Fix a bug that leads to AM resource limit not hornored when
sizeBasedWeight enabled for FairOrderingPolicy. (wtan via jianhe)
+ YARN-4557. Fix improper Queues sorting in PartitionedQueueComparator
+ when accessible-node-labels=*. (Naganarasimha G R via wangda)
+
Release 2.7.3 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ff5f673/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
index 631b418..07f3d8b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
@@ -57,7 +57,7 @@ import org.apache.hadoop.yarn.util.resource.Resources;
public class AppSchedulingInfo {
private static final Log LOG = LogFactory.getLog(AppSchedulingInfo.class);
- private static final Comparator COMPARATOR =
+ private static final Comparator<Priority> COMPARATOR =
new org.apache.hadoop.yarn.server.resourcemanager.resource.Priority.Comparator();
private static final int EPOCH_BIT_SHIFT = 40;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ff5f673/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/PartitionedQueueComparator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/PartitionedQueueComparator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/PartitionedQueueComparator.java
index ddcc719..477c615 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/PartitionedQueueComparator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/PartitionedQueueComparator.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
import java.util.Comparator;
+import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
+
public class PartitionedQueueComparator implements Comparator<CSQueue> {
private String partitionToLookAt = null;
@@ -35,15 +37,17 @@ public class PartitionedQueueComparator implements Comparator<CSQueue> {
* the other not, accessible queue goes first.
*/
boolean q1Accessible =
- q1.getAccessibleNodeLabels().contains(partitionToLookAt);
+ q1.getAccessibleNodeLabels().contains(partitionToLookAt)
+ || q1.getAccessibleNodeLabels().contains(RMNodeLabelsManager.ANY);
boolean q2Accessible =
- q2.getAccessibleNodeLabels().contains(partitionToLookAt);
+ q2.getAccessibleNodeLabels().contains(partitionToLookAt)
+ || q2.getAccessibleNodeLabels().contains(RMNodeLabelsManager.ANY);
if (q1Accessible && !q2Accessible) {
return -1;
} else if (!q1Accessible && q2Accessible) {
return 1;
}
-
+
/*
*
* 2. When two queue has same accessibility, check who will go first:
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ff5f673/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java
index dff82ca..bbf6e43 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
import java.util.ArrayList;
import java.util.Arrays;
+import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
@@ -502,7 +503,6 @@ public class TestNodeLabelContainerAllocation {
};
rm1.getRMContext().setNodeLabelManager(mgr);
rm1.start();
- MockNM nm1 = rm1.registerNode("h1:1234", 8 * GB); // no label
MockNM nm2 = rm1.registerNode("h2:1234", 40 * GB); // label = y
// launch an app to queue b1 (label = y), AM container should be launched in
// nm2
@@ -1470,9 +1470,11 @@ public class TestNodeLabelContainerAllocation {
csConf.setCapacityByLabel(B, "x", 70);
final String C = CapacitySchedulerConfiguration.ROOT + ".c";
+ csConf.setAccessibleNodeLabels(C, Collections.<String> emptySet());
csConf.setCapacity(C, 25);
final String D = CapacitySchedulerConfiguration.ROOT + ".d";
+ csConf.setAccessibleNodeLabels(D, Collections.<String> emptySet());
csConf.setCapacity(D, 25);
// set node -> label
@@ -1601,4 +1603,77 @@ public class TestNodeLabelContainerAllocation {
cs.getApplicationAttempt(am4.getApplicationAttemptId()));
}
+
+ @Test
+ public void testOrderOfAllocationOnPartitionsWhenAccessibilityIsAll()
+ throws Exception {
+ /**
+ * Test case: have a following queue structure:
+ *
+ * <pre>
+ * root
+ * __________
+ * / \
+ * a (*) b (x)
+ * </pre>
+ *
+ * Both queues a/b can access x, we need to verify whether * accessibility
+ * is considered in ordering of queues
+ */
+
+ CapacitySchedulerConfiguration csConf =
+ new CapacitySchedulerConfiguration(this.conf);
+
+ // Define top-level queues
+ csConf.setQueues(CapacitySchedulerConfiguration.ROOT,
+ new String[] { "a", "b" });
+ csConf.setCapacityByLabel(CapacitySchedulerConfiguration.ROOT, "x", 100);
+
+ final String A = CapacitySchedulerConfiguration.ROOT + ".a";
+ csConf.setCapacity(A, 25);
+ csConf.setAccessibleNodeLabels(A, toSet("*"));
+ csConf.setCapacityByLabel(A, "x", 60);
+
+ final String B = CapacitySchedulerConfiguration.ROOT + ".b";
+ csConf.setCapacity(B, 75);
+ csConf.setAccessibleNodeLabels(B, toSet("x"));
+ csConf.setCapacityByLabel(B, "x", 40);
+
+ // set node -> label
+ mgr.addToCluserNodeLabels(
+ ImmutableSet.of(NodeLabel.newInstance("x", false)));
+ mgr.addLabelsToNode(
+ ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("x")));
+
+ // inject node label manager
+ MockRM rm = new MockRM(csConf) {
+ @Override
+ public RMNodeLabelsManager createNodeLabelManager() {
+ return mgr;
+ }
+ };
+
+ rm.getRMContext().setNodeLabelManager(mgr);
+ rm.start();
+
+ CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
+
+ MockNM nm1 = rm.registerNode("h1:1234", 10 * GB); // label = x
+
+ // app1 -> a
+ RMApp app1 = rm.submitApp(1 * GB, "app", "user", null, "a", "x");
+ MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm1);
+
+ // app2 -> b
+ RMApp app2 = rm.submitApp(1 * GB, "app", "user", null, "b", "x");
+ MockAM am2 = MockRM.launchAndRegisterAM(app2, rm, nm1);
+
+ // Both a/b has used_capacity(x) = 0, when doing exclusive allocation, a
+ // will go first since a has more capacity(x)
+ am1.allocate("*", 1 * GB, 1, new ArrayList<ContainerId>(), "x");
+ am2.allocate("*", 1 * GB, 1, new ArrayList<ContainerId>(), "x");
+ doNMHeartbeat(rm, nm1.getNodeId(), 1);
+ checkNumOfContainersInAnAppOnGivenNode(2, nm1.getNodeId(),
+ cs.getApplicationAttempt(am1.getApplicationAttemptId()));
+ }
}