You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by as...@apache.org on 2018/01/31 15:57:25 UTC

[01/32] hadoop git commit: YARN-7780. Documentation for Placement Constraints. (Konstantinos Karanasos via asuresh)

Repository: hadoop
Updated Branches:
  refs/heads/trunk 7288b0833 -> 8d1e2c640


YARN-7780. Documentation for Placement Constraints. (Konstantinos Karanasos via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8df7666f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8df7666f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8df7666f

Branch: refs/heads/trunk
Commit: 8df7666fe19f124e80bcc63c496607e085fcf804
Parents: add993e
Author: Arun Suresh <as...@apache.org>
Authored: Tue Jan 30 07:38:27 2018 -0800
Committer: Arun Suresh <as...@apache.org>
Committed: Wed Jan 31 01:30:17 2018 -0800

----------------------------------------------------------------------
 .../yarn/api/resource/PlacementConstraints.java |  17 ++-
 .../hadoop/yarn/conf/YarnConfiguration.java     |  11 +-
 .../site/markdown/PlacementConstraints.md.vm    | 149 +++++++++++++++++++
 3 files changed, 164 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8df7666f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraints.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraints.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraints.java
index 70a8080..c1549c5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraints.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraints.java
@@ -96,8 +96,9 @@ public final class PlacementConstraints {
    * Creates a constraint that restricts the number of allocations within a
    * given scope (e.g., node or rack).
    *
-   * For example, {@code cardinality(NODE, 3, 10)}, restricts the number of
-   * allocations per node to be no less than 3 and no more than 10.
+   * For example, {@code cardinality(NODE, 3, 10, "zk")} is satisfied on nodes
+   * where there are no less than 3 allocations with tag "zk" and no more than
+   * 10.
    *
    * @param scope the scope of the constraint
    * @param minCardinality determines the minimum number of allocations within
@@ -132,7 +133,7 @@ public final class PlacementConstraints {
 
   /**
    * Similar to {@link #cardinality(String, int, int, String...)}, but
-   * determines only the maximum cardinality (the minimum can be as low as 0).
+   * determines only the maximum cardinality (the minimum cardinality is 0).
    *
    * @param scope the scope of the constraint
    * @param maxCardinality determines the maximum number of allocations within
@@ -150,7 +151,7 @@ public final class PlacementConstraints {
    *
    * Consider a set of nodes N that belongs to the scope specified in the
    * constraint. If the target expressions are satisfied at least minCardinality
-   * times and at most max-cardinality times in the node set N, then the
+   * times and at most maxCardinality times in the node set N, then the
    * constraint is satisfied.
    *
    * For example, {@code targetCardinality(RACK, 2, 10, allocationTag("zk"))},
@@ -197,7 +198,7 @@ public final class PlacementConstraints {
 
     /**
      * Constructs a target expression on a node partition. It is satisfied if
-     * the specified node partition has one of the specified nodePartitions
+     * the specified node partition has one of the specified nodePartitions.
      *
      * @param nodePartitions the set of values that the attribute should take
      *          values from
@@ -211,7 +212,7 @@ public final class PlacementConstraints {
 
     /**
      * Constructs a target expression on an allocation tag. It is satisfied if
-     * the there are allocations with one of the given tags.
+     * there are allocations with one of the given tags.
      *
      * @param allocationTags the set of tags that the attribute should take
      *          values from
@@ -224,8 +225,8 @@ public final class PlacementConstraints {
 
     /**
      * Constructs a target expression on an allocation tag. It is satisfied if
-     * the there are allocations with one of the given tags. Comparing to
-     * {@link PlacementTargets#allocationTag(String...)}, this only check tags
+     * there are allocations with one of the given tags. Comparing to
+     * {@link PlacementTargets#allocationTag(String...)}, this only checks tags
      * within the application.
      *
      * @param allocationTags the set of tags that the attribute should take

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8df7666f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index f5bb2c7..118f9fb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -532,6 +532,12 @@ public class YarnConfiguration extends Configuration {
   public static final String RM_SCHEDULER = 
     RM_PREFIX + "scheduler.class";
 
+  /** Enable rich placement constraints. */
+  public static final String RM_PLACEMENT_CONSTRAINTS_ENABLED =
+      RM_PREFIX + "placement-constraints.enabled";
+
+  public static final boolean DEFAULT_RM_PLACEMENT_CONSTRAINTS_ENABLED = false;
+
   /** Placement Algorithm. */
   public static final String RM_PLACEMENT_CONSTRAINTS_ALGORITHM_CLASS =
       RM_PREFIX + "placement-constraints.algorithm.class";
@@ -540,11 +546,6 @@ public class YarnConfiguration extends Configuration {
   public static final String RM_PLACEMENT_CONSTRAINTS_ALGORITHM_ITERATOR =
       RM_PREFIX + "placement-constraints.algorithm.iterator";
 
-  public static final String RM_PLACEMENT_CONSTRAINTS_ENABLED =
-      RM_PREFIX + "placement-constraints.enabled";
-
-  public static final boolean DEFAULT_RM_PLACEMENT_CONSTRAINTS_ENABLED = false;
-
   public static final String RM_PLACEMENT_CONSTRAINTS_RETRY_ATTEMPTS =
       RM_PREFIX + "placement-constraints.retry-attempts";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8df7666f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/PlacementConstraints.md.vm
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/PlacementConstraints.md.vm b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/PlacementConstraints.md.vm
new file mode 100644
index 0000000..7926eab
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/PlacementConstraints.md.vm
@@ -0,0 +1,149 @@
+<!---
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+
+#set ( $H3 = '###' )
+#set ( $H4 = '####' )
+#set ( $H5 = '#####' )
+
+Placement Constraints
+=====================
+
+<!-- MACRO{toc|fromDepth=0|toDepth=3} -->
+
+
+Overview
+--------
+
+YARN allows applications to specify placement constraints in the form of data locality (preference to specific nodes or racks) or (non-overlapping) node labels. This document focuses on more expressive placement constraints in YARN. Such constraints can be crucial for the performance and resilience of applications, especially those that include long-running containers, such as services, machine-learning and streaming workloads.
+
+For example, it may be beneficial to co-locate the allocations of a job on the same rack (*affinity* constraints) to reduce network costs, spread allocations across machines (*anti-affinity* constraints) to minimize resource interference, or allow up to a specific number of allocations in a node group (*cardinality* constraints) to strike a balance between the two. Placement decisions also affect resilience. For example, allocations placed within the same cluster upgrade domain would go offline simultaneously.
+
+The applications can specify constraints without requiring knowledge of the underlying topology of the cluster (e.g., one does not need to specify the specific node or rack where their containers should be placed with constraints) or the other applications deployed. Currently **intra-application** constraints are supported, but the design that is followed is generic and support for constraints across applications will soon be added. Moreover, all constraints at the moment are **hard**, that is, if the constraints for a container cannot be satisfied due to the current cluster condition or conflicting constraints, the container request gets rejected.
+
+Note that in this document we use the notion of “allocation” to refer to a unit of resources (e.g., CPU and memory) that gets allocated in a node. In the current implementation of YARN, an allocation corresponds to a single container. However, in case an application uses an allocation to spawn more than one containers, an allocation could correspond to multiple containers.
+
+
+Quick Guide
+-----------
+
+We first describe how to enable scheduling with placement constraints and then provide examples of how to experiment with this feature using the distributed shell, an application that allows to run a given shell command on a set of containers.
+
+$H3 Enabling placement constraints
+
+To enable placement constraints, the following property has to be set to **true** in **conf/yarn-site.xml**:
+
+| Property | Description | Default value |
+|:-------- |:----------- |:------------- |
+| `yarn.resourcemanager.placement-constraints.enabled` | Enables rich placement constraints. | `false` |
+
+
+Further, the user can choose between the following two alternatives for placing containers with constraints:
+
+* **Placement processor:** Following this approach, the placement of containers with constraints is determined as a pre-processing step before the capacity or the fair scheduler is called. Once the placement is decided, the capacity/fair scheduler is invoked to perform the actual allocation. The advantage of this approach is that it supports all constraint types (affinity, anti-affinity, cardinality). Moreover, it considers multiple containers at a time, which allows to satisfy more constraints than a container-at-a-time approach can achieve. As it sits outside the main scheduler, it can be used by both the capacity and fair schedulers. Note that at the moment it does not account for task priorities within an application, given that such priorities might be conflicting with the placement constraints.
+* **Placement allocator in capacity scheduler:** This approach places containers with constraints within the capacity scheduler. It currently supports anti-affinity constraints (no affinity or cardinality) and places one container at a time. However, it supports traditional task priorities within an application.
+
+The placement processor approach supports a wider range of constraints and can allow more containers to be placed especially when applications have demanding constraints or the cluster is highly-utilized (due to considering multiple containers at a time). However, if respecting task priority within an application is important for the user and the capacity scheduler is used, then the placement allocator in the capacity scheduler should be used instead.
+
+By default, the placement processor approach is enabled. To use the placement allocator in the capacity scheduler instead, the following parameter has to be set to **true** in the **conf/capacity-scheduler.xml**:
+
+| Property | Description | Default value |
+|:-------- |:----------- |:------------- |
+| `yarn.scheduler.capacity.scheduling-request.allowed` | When set to false, the placement processor is used; when set to true, the allocator inside the capacity scheduler is used. | `false` |
+
+
+
+$H3 Experimenting with placement constraints using distributed shell
+
+Users can experiment with placement constraints by using the distributed shell application through the following command:
+
+```
+$ yarn org.apache.hadoop.yarn.applications.distributedshell.Client -jar share/hadoop/yarn/hadoop-yarn-applications-distributedshell-${project.version}.jar -shell_command sleep -shell_args 10 -placement_spec PlacementSpec
+```
+
+where **PlacementSpec** is of the form:
+
+```
+PlacementSpec => "" | KeyVal;PlacementSpec
+KeyVal        => SourceTag=Constraint
+SourceTag     => String
+Constraint    => NumContainers | NumContainers,"IN",Scope,TargetTag | NumContainers,"NOTIN",Scope,TargetTag | NumContainers,"CARDINALITY",Scope,TargetTag,MinCard,MaxCard
+NumContainers => int
+Scope         => "NODE" | "RACK"
+TargetTag     => String
+MinCard       => int
+MaxCard       => int
+```
+
+Note that when the `-placement_spec` argument is specified in the distributed shell command, the `-num-containers` argument should not be used. In case `-num-containers` argument is used in conjunction with `-placement-spec`, the former is ignored. This is because in PlacementSpec, we determine the number of containers per tag, making the `-num-containers` redundant and possibly conflicting. Moreover, if `-placement_spec` is used, all containers will be requested with GUARANTEED execution type.
+
+An example of PlacementSpec is the following:
+```
+zk=3,NOTIN,NODE,zk:hbase=5,IN,RACK,zk:spark=7,CARDINALITY,NODE,hbase,1,3
+```
+The above encodes two constraints:
+* place 3 containers with tag "zk" (standing for ZooKeeper) with node anti-affinity to each other, i.e., do not place more than one container per node (notice that in this first constraint, the SourceTag and the TargetTag of the constraint coincide);
+* place 5 containers with tag "hbase" with affinity to a rack on which containers with tag "zk" are running (i.e., an "hbase" container should not be placed at a rack where an "zk" container is running, given that "zk" is the TargetTag of the second constraint);
+* place 7 container with tag "spark" in nodes that have at least one, but no more than three, containers, with tag "hbase".
+
+
+
+Defining Placement Constraints
+------------------------------
+
+$H3 Allocation tags
+
+Allocation tags are string tags that an application can associate with (groups of) its containers. Tags are used to identify components of applications. For example, an HBase Master allocation can be tagged with "hbase-m", and Region Servers with "hbase-rs". Other examples are "latency-critical" to refer to the more general demands of the allocation, or "app_0041" to denote the job ID. Allocation tags play a key role in constraints, as they allow to refer to multiple allocations that share a common tag.
+
+Note that instead of using the `ResourceRequest` object to define allocation tags, we use the new `SchedulingRequest` object. This has many similarities with the `ResourceRequest`, but better separates the sizing of the requested allocations (number and size of allocations, priority, execution type, etc.), and the constraints dictating how these allocations should be placed (resource name, relaxed locality). Applications can still use `ResourceRequest` objects, but in order to define allocation tags and constraints, they need to use the `SchedulingRequest` object. Within a single `AllocateRequest`, an application should use either the `ResourceRequest` or the `SchedulingRequest` objects, but not both of them.
+
+$H4 Differences between node labels, node attributes and allocation tags
+
+The difference between allocation tags and node labels or node attributes (YARN-3409), is that allocation tags are attached to allocations and not to nodes. When an allocation gets allocated to a node by the scheduler, the set of tags of that allocation are automatically added to the node for the duration of the allocation. Hence, a node inherits the tags of the allocations that are currently allocated to the node. Likewise, a rack inherits the tags of its nodes. Moreover, similar to node labels and unlike node attributes, allocation tags have no value attached to them. As we show below, our constraints can refer to allocation tags, as well as node labels and node attributes.
+
+
+$H3 Placement constraints API
+
+Applications can use the public API in the `PlacementConstraints` to construct placement constraint. Before describing the methods for building constraints, we describe the methods of the `PlacementTargets` class that are used to construct the target expressions that will then be used in constraints:
+
+| Method | Description |
+|:------ |:----------- |
+| `allocationTag(String... allocationTags)` | Constructs a target expression on an allocation tag. It is satisfied if there are allocations with one of the given tags. |
+| `allocationTagToIntraApp(String... allocationTags)` | similar to `allocationTag(String...)`, but targeting only the containers of the application that will use this target (intra-application constraints). |
+| `nodePartition(String... nodePartitions)` | Constructs a target expression on a node partition. It is satisfied for nodes that belong to one of the `nodePartitions`. |
+| `nodeAttribute(String attributeKey, String... attributeValues)` | Constructs a target expression on a node attribute. It is satisfied if the specified node attribute has one of the specified values. |
+
+Note that the `nodeAttribute` method above is not yet functional, as it requires the ongoing node attributes feature.
+
+The methods of the `PlacementConstraints` class for building constraints are the following:
+
+| Method | Description |
+|:------ |:----------- |
+| `targetIn(String scope, TargetExpression... targetExpressions)` | Creates a constraint that requires allocations to be placed on nodes that satisfy all target expressions within the given scope (e.g., node or rack). For example, `targetIn(RACK, allocationTag("hbase-m"))`, allows allocations on nodes that belong to a rack that has at least one allocation with tag "hbase-m". |
+| `targetNotIn(String scope, TargetExpression... targetExpressions)` | Creates a constraint that requires allocations to be placed on nodes that belong to a scope (e.g., node or rack) that does not satisfy any of the target expressions. |
+| `cardinality(String scope, int minCardinality, int maxCardinality, String... allocationTags)` | Creates a constraint that restricts the number of allocations within a given scope (e.g., node or rack). For example, {@code cardinality(NODE, 3, 10, "zk")} is satisfied on nodes where there are no less than 3 allocations with tag "zk" and no more than 10. |
+| `minCardinality(String scope, int minCardinality, String... allocationTags)` | Similar to `cardinality(String, int, int, String...)`, but determines only the minimum cardinality (the maximum cardinality is unbound). |
+| `maxCardinality(String scope, int maxCardinality, String... allocationTags)` | Similar to `cardinality(String, int, int, String...)`, but determines only the maximum cardinality (the minimum cardinality is 0). |
+| `targetCardinality(String scope, int minCardinality, int maxCardinality, String... allocationTags)` | This constraint generalizes the cardinality and target constraints. Consider a set of nodes N that belongs to the scope specified in the constraint. If the target expressions are satisfied at least minCardinality times and at most maxCardinality times in the node set N, then the constraint is satisfied. For example, `targetCardinality(RACK, 2, 10, allocationTag("zk"))`, requires an allocation to be placed within a rack that has at least 2 and at most 10 other allocations with tag "zk". |
+
+The `PlacementConstraints` class also includes method for building compound constraints (AND/OR expressions with multiple constraints). Adding support for compound constraints is work in progress.
+
+
+$H3 Specifying constraints in applications
+
+Applications have to specify the containers for which each constraint will be enabled. To this end, applications can provide a mapping from a set of allocation tags (source tags) to a placement constraint. For example, an entry of this mapping could be "hbase"->constraint1, which means that constraint1 will be applied when scheduling each allocation with tag "hbase".
+
+When using the placement processor approach (see [Enabling placement constraints](#Enabling_placement_constraints)), this constraint mapping is specified within the `RegisterApplicationMasterRequest`.
+
+When using the placement allocator in the capacity scheduler, the constraints can also be added at each `SchedulingRequest` object. Each such constraint is valid for the tag of that scheduling request. In case constraints are specified both at the `ReisterApplicationMasterRequest` and the scheduling requests, the latter override the former.
+


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[08/32] hadoop git commit: YARN-7448. [API] Add SchedulingRequest to the AllocateRequest. (Panagiotis Garefalakis via asuresh)

Posted by as...@apache.org.
YARN-7448. [API] Add SchedulingRequest to the AllocateRequest. (Panagiotis Garefalakis via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/69de9a1b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/69de9a1b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/69de9a1b

Branch: refs/heads/trunk
Commit: 69de9a1ba9a587c7e03ae7c7aeae93e04c36d665
Parents: db92855
Author: Arun Suresh <as...@apache.org>
Authored: Fri Nov 17 10:42:43 2017 -0800
Committer: Arun Suresh <as...@apache.org>
Committed: Wed Jan 31 01:30:17 2018 -0800

----------------------------------------------------------------------
 .../api/protocolrecords/AllocateRequest.java    | 42 ++++++++++
 .../hadoop/yarn/api/records/ResourceSizing.java | 27 +++++++
 .../yarn/api/records/SchedulingRequest.java     |  1 +
 .../src/main/proto/yarn_service_protos.proto    |  1 +
 .../impl/pb/AllocateRequestPBImpl.java          | 83 ++++++++++++++++++++
 .../records/impl/pb/ResourceSizingPBImpl.java   |  2 +-
 .../impl/pb/SchedulingRequestPBImpl.java        | 16 ++++
 .../hadoop/yarn/api/TestPBImplRecords.java      | 19 +++++
 8 files changed, 190 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/69de9a1b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java
index ae0891e..d8d2347 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.yarn.api.protocolrecords;
 
+import java.util.Collections;
 import java.util.List;
 
 import org.apache.hadoop.classification.InterfaceAudience.Public;
@@ -28,6 +29,7 @@ import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
 import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
 import org.apache.hadoop.yarn.util.Records;
 
@@ -212,6 +214,32 @@ public abstract class AllocateRequest {
   public abstract void setUpdateRequests(
       List<UpdateContainerRequest> updateRequests);
 
+  /**
+   * Get the list of Scheduling requests being sent by the
+   * <code>ApplicationMaster</code>.
+   * @return list of {@link SchedulingRequest} being sent by the
+   *         <code>ApplicationMaster</code>.
+   */
+  @Public
+  @Unstable
+  public List<SchedulingRequest> getSchedulingRequests() {
+    return Collections.EMPTY_LIST;
+  }
+
+  /**
+   * Set the list of Scheduling requests to inform the
+   * <code>ResourceManager</code> about the application's resource requirements
+   * (potentially including allocation tags & placement constraints).
+   * @param schedulingRequests list of <code>SchedulingRequest</code> to update
+   *          the <code>ResourceManager</code> about the application's resource
+   *          requirements.
+   */
+  @Public
+  @Unstable
+  public void setSchedulingRequests(
+      List<SchedulingRequest> schedulingRequests) {
+  }
+
   @Public
   @Unstable
   public static AllocateRequestBuilder newBuilder() {
@@ -314,6 +342,20 @@ public abstract class AllocateRequest {
     }
 
     /**
+     * Set the <code>schedulingRequests</code> of the request.
+     * @see AllocateRequest#setSchedulingRequests(List)
+     * @param schedulingRequests <code>SchedulingRequest</code> of the request
+     * @return {@link AllocateRequestBuilder}
+     */
+    @Public
+    @Unstable
+    public AllocateRequestBuilder schedulingRequests(
+        List<SchedulingRequest> schedulingRequests) {
+      allocateRequest.setSchedulingRequests(schedulingRequests);
+      return this;
+    }
+
+    /**
      * Return generated {@link AllocateRequest} object.
      * @return {@link AllocateRequest}
      */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/69de9a1b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceSizing.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceSizing.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceSizing.java
index d82be11..8cdc63f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceSizing.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceSizing.java
@@ -61,4 +61,31 @@ public abstract class ResourceSizing {
   @Public
   @Unstable
   public abstract void setResources(Resource resources);
+
+  @Override
+  public int hashCode() {
+    int result = getResources().hashCode();
+    result = 31 * result + getNumAllocations();
+    return result;
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (this == obj) {
+      return true;
+    }
+    if(obj == null || getClass() != obj.getClass()) {
+      return false;
+    }
+
+    ResourceSizing that = (ResourceSizing) obj;
+
+    if(getNumAllocations() != that.getNumAllocations()) {
+      return  false;
+    }
+    if(!getResources().equals(that.getResources())) {
+      return false;
+    }
+    return true;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/69de9a1b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/SchedulingRequest.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/SchedulingRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/SchedulingRequest.java
index 47a0697..e32dd24 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/SchedulingRequest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/SchedulingRequest.java
@@ -49,6 +49,7 @@ public abstract class SchedulingRequest {
     return SchedulingRequest.newBuilder()
         .allocationRequestId(allocationRequestId).priority(priority)
         .executionType(executionType).allocationTags(allocationTags)
+        .resourceSizing(resourceSizing)
         .placementConstraintExpression(placementConstraintExpression).build();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/69de9a1b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
index 68e585d..e49c4e3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
@@ -91,6 +91,7 @@ message AllocateRequestProto {
   optional int32 response_id = 4;
   optional float progress = 5;
   repeated UpdateContainerRequestProto update_requests = 7;
+  repeated SchedulingRequestProto scheduling_requests = 10;
 }
 
 message NMTokenProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/69de9a1b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateRequestPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateRequestPBImpl.java
index 0f0f571..b460044 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateRequestPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateRequestPBImpl.java
@@ -29,14 +29,17 @@ import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
 import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
 import org.apache.hadoop.yarn.api.records.impl.pb.ContainerIdPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.ResourceBlacklistRequestPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.ResourceRequestPBImpl;
+import org.apache.hadoop.yarn.api.records.impl.pb.SchedulingRequestPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.UpdateContainerRequestPBImpl;
 import org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ResourceBlacklistRequestProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ResourceRequestProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.SchedulingRequestProto;
 import org.apache.hadoop.yarn.proto.YarnServiceProtos.UpdateContainerRequestProto;
 import org.apache.hadoop.yarn.proto.YarnServiceProtos.AllocateRequestProto;
 import org.apache.hadoop.yarn.proto.YarnServiceProtos.AllocateRequestProtoOrBuilder;
@@ -53,6 +56,7 @@ public class AllocateRequestPBImpl extends AllocateRequest {
   private List<ResourceRequest> ask = null;
   private List<ContainerId> release = null;
   private List<UpdateContainerRequest> updateRequests = null;
+  private List<SchedulingRequest> schedulingRequests = null;
   private ResourceBlacklistRequest blacklistRequest = null;
   
   public AllocateRequestPBImpl() {
@@ -101,6 +105,9 @@ public class AllocateRequestPBImpl extends AllocateRequest {
     if (this.updateRequests != null) {
       addUpdateRequestsToProto();
     }
+    if (this.schedulingRequests != null) {
+      addSchedulingRequestsToProto();
+    }
     if (this.blacklistRequest != null) {
       builder.setBlacklistRequest(convertToProtoFormat(this.blacklistRequest));
     }
@@ -178,6 +185,23 @@ public class AllocateRequestPBImpl extends AllocateRequest {
   }
 
   @Override
+  public List<SchedulingRequest> getSchedulingRequests() {
+    initSchedulingRequests();
+    return this.schedulingRequests;
+  }
+
+  @Override
+  public void setSchedulingRequests(
+      List<SchedulingRequest> schedulingRequests) {
+    if (schedulingRequests == null) {
+      return;
+    }
+    initSchedulingRequests();
+    this.schedulingRequests.clear();
+    this.schedulingRequests.addAll(schedulingRequests);
+  }
+
+  @Override
   public ResourceBlacklistRequest getResourceBlacklistRequest() {
     AllocateRequestProtoOrBuilder p = viaProto ? proto : builder;
     if (this.blacklistRequest != null) {
@@ -261,6 +285,20 @@ public class AllocateRequestPBImpl extends AllocateRequest {
     }
   }
 
+  private void initSchedulingRequests() {
+    if (this.schedulingRequests != null) {
+      return;
+    }
+    AllocateRequestProtoOrBuilder p = viaProto ? proto : builder;
+    List<SchedulingRequestProto> list =
+        p.getSchedulingRequestsList();
+    this.schedulingRequests = new ArrayList<>();
+
+    for (SchedulingRequestProto c : list) {
+      this.schedulingRequests.add(convertFromProtoFormat(c));
+    }
+  }
+
   private void addUpdateRequestsToProto() {
     maybeInitBuilder();
     builder.clearUpdateRequests();
@@ -297,6 +335,41 @@ public class AllocateRequestPBImpl extends AllocateRequest {
     builder.addAllUpdateRequests(iterable);
   }
 
+  private void addSchedulingRequestsToProto() {
+    maybeInitBuilder();
+    builder.clearSchedulingRequests();
+    if (schedulingRequests == null) {
+      return;
+    }
+    Iterable<SchedulingRequestProto> iterable =
+        new Iterable<SchedulingRequestProto>() {
+          @Override
+          public Iterator<SchedulingRequestProto> iterator() {
+            return new Iterator<SchedulingRequestProto>() {
+
+              private Iterator<SchedulingRequest> iter =
+                  schedulingRequests.iterator();
+
+              @Override
+              public boolean hasNext() {
+                return iter.hasNext();
+              }
+
+              @Override
+              public SchedulingRequestProto next() {
+                return convertToProtoFormat(iter.next());
+              }
+
+              @Override
+              public void remove() {
+                throw new UnsupportedOperationException();
+              }
+            };
+
+          }
+        };
+    builder.addAllSchedulingRequests(iterable);
+  }
   @Override
   public List<ContainerId> getReleaseList() {
     initReleases();
@@ -377,6 +450,16 @@ public class AllocateRequestPBImpl extends AllocateRequest {
     return ((UpdateContainerRequestPBImpl) t).getProto();
   }
 
+  private SchedulingRequestPBImpl convertFromProtoFormat(
+      SchedulingRequestProto p) {
+    return new SchedulingRequestPBImpl(p);
+  }
+
+  private SchedulingRequestProto convertToProtoFormat(
+      SchedulingRequest t) {
+    return ((SchedulingRequestPBImpl) t).getProto();
+  }
+
   private ContainerIdPBImpl convertFromProtoFormat(ContainerIdProto p) {
     return new ContainerIdPBImpl(p);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/69de9a1b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceSizingPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceSizingPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceSizingPBImpl.java
index 05bb3bd..f98e488 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceSizingPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceSizingPBImpl.java
@@ -112,6 +112,6 @@ public class ResourceSizingPBImpl extends ResourceSizing {
   }
 
   private ResourceProto convertToProtoFormat(Resource r) {
-    return ((ResourcePBImpl) r).getProto();
+    return ProtoUtils.convertToProtoFormat(r);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/69de9a1b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/SchedulingRequestPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/SchedulingRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/SchedulingRequestPBImpl.java
index 7826b36..305856a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/SchedulingRequestPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/SchedulingRequestPBImpl.java
@@ -263,4 +263,20 @@ public class SchedulingRequestPBImpl extends SchedulingRequest {
     this.allocationTags = new HashSet<>();
     this.allocationTags.addAll(p.getAllocationTagsList());
   }
+
+  @Override
+  public int hashCode() {
+    return getProto().hashCode();
+  }
+
+  @Override
+  public boolean equals(Object other) {
+    if (other == null) {
+      return false;
+    }
+    if (other.getClass().isAssignableFrom(this.getClass())) {
+      return this.getProto().equals(this.getClass().cast(other).getProto());
+    }
+    return false;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/69de9a1b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java
index c5585c2..a0b907d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java
@@ -149,8 +149,10 @@ import org.apache.hadoop.yarn.api.records.ResourceInformation;
 import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest;
 import org.apache.hadoop.yarn.api.records.ResourceOption;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.ResourceSizing;
 import org.apache.hadoop.yarn.api.records.ResourceTypeInfo;
 import org.apache.hadoop.yarn.api.records.ResourceUtilization;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
 import org.apache.hadoop.yarn.api.records.SerializedException;
 import org.apache.hadoop.yarn.api.records.StrictPreemptionContract;
 import org.apache.hadoop.yarn.api.records.Token;
@@ -189,7 +191,9 @@ import org.apache.hadoop.yarn.api.records.impl.pb.ResourceBlacklistRequestPBImpl
 import org.apache.hadoop.yarn.api.records.impl.pb.ResourceOptionPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.ResourcePBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.ResourceRequestPBImpl;
+import org.apache.hadoop.yarn.api.records.impl.pb.ResourceSizingPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.ResourceTypeInfoPBImpl;
+import org.apache.hadoop.yarn.api.records.impl.pb.SchedulingRequestPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.SerializedExceptionPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.StrictPreemptionContractPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.TokenPBImpl;
@@ -225,6 +229,8 @@ import org.apache.hadoop.yarn.proto.YarnProtos.ResourceBlacklistRequestProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ResourceOptionProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ResourceRequestProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ResourceSizingProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.SchedulingRequestProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.SerializedExceptionProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.StrictPreemptionContractProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.URLProto;
@@ -428,6 +434,8 @@ public class TestPBImplRecords extends BasePBImplRecordsTest {
     generateByNewInstance(QueueConfigurations.class);
     generateByNewInstance(CollectorInfo.class);
     generateByNewInstance(ResourceTypeInfo.class);
+    generateByNewInstance(ResourceSizing.class);
+    generateByNewInstance(SchedulingRequest.class);
   }
 
   @Test
@@ -907,6 +915,17 @@ public class TestPBImplRecords extends BasePBImplRecordsTest {
   }
 
   @Test
+  public void testResourceSizingPBImpl() throws Exception {
+    validatePBImplRecord(ResourceSizingPBImpl.class, ResourceSizingProto.class);
+  }
+
+  @Test
+  public void testSchedulingRequestPBImpl() throws Exception {
+    validatePBImplRecord(SchedulingRequestPBImpl.class,
+        SchedulingRequestProto.class);
+  }
+
+  @Test
   public void testSerializedExceptionPBImpl() throws Exception {
     validatePBImplRecord(SerializedExceptionPBImpl.class,
         SerializedExceptionProto.class);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[32/32] hadoop git commit: Merge branch 'YARN-6592' into trunk

Posted by as...@apache.org.
Merge branch 'YARN-6592' into trunk


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8d1e2c64
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8d1e2c64
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8d1e2c64

Branch: refs/heads/trunk
Commit: 8d1e2c6409a44f4515a1549ae82c7e2597e96467
Parents: 7288b08 8df7666
Author: Arun Suresh <as...@apache.org>
Authored: Wed Jan 31 01:42:42 2018 -0800
Committer: Arun Suresh <as...@apache.org>
Committed: Wed Jan 31 01:42:42 2018 -0800

----------------------------------------------------------------------
 .../v2/app/rm/TestRMContainerAllocator.java     |  15 +-
 .../hadoop/yarn/sls/nodemanager/NodeInfo.java   |   6 +
 .../yarn/sls/scheduler/RMNodeWrapper.java       |   6 +
 .../sls/scheduler/SLSCapacityScheduler.java     |  15 +-
 .../yarn/sls/scheduler/SLSFairScheduler.java    |  12 +-
 .../dev-support/findbugs-exclude.xml            |   8 +
 .../yarn/ams/ApplicationMasterServiceUtils.java |  16 +
 .../api/protocolrecords/AllocateRequest.java    |  42 ++
 .../api/protocolrecords/AllocateResponse.java   |  23 +
 .../RegisterApplicationMasterRequest.java       |  42 +-
 .../hadoop/yarn/api/records/Container.java      |  15 +
 .../api/records/RejectedSchedulingRequest.java  |  70 ++
 .../yarn/api/records/RejectionReason.java       |  44 ++
 .../hadoop/yarn/api/records/ResourceSizing.java |  91 +++
 .../yarn/api/records/SchedulingRequest.java     | 206 +++++
 .../yarn/api/resource/PlacementConstraint.java  | 744 +++++++++++++++++++
 .../yarn/api/resource/PlacementConstraints.java | 320 ++++++++
 .../hadoop/yarn/api/resource/package-info.java  |  23 +
 .../hadoop/yarn/conf/YarnConfiguration.java     |  31 +
 ...SchedulerInvalidResoureRequestException.java |  47 ++
 .../src/main/proto/yarn_protos.proto            |  86 +++
 .../src/main/proto/yarn_service_protos.proto    |   3 +
 .../api/resource/TestPlacementConstraints.java  | 107 +++
 .../distributedshell/ApplicationMaster.java     | 124 +++-
 .../applications/distributedshell/Client.java   |  14 +
 .../distributedshell/PlacementSpec.java         | 137 ++++
 .../hadoop/yarn/client/api/AMRMClient.java      |  38 +-
 .../yarn/client/api/async/AMRMClientAsync.java  |  48 ++
 .../api/async/impl/AMRMClientAsyncImpl.java     |  49 +-
 .../yarn/client/api/impl/AMRMClientImpl.java    | 142 +++-
 .../client/api/impl/BaseAMRMClientTest.java     | 212 ++++++
 .../yarn/client/api/impl/TestAMRMClient.java    | 156 +---
 .../api/impl/TestAMRMClientOnRMRestart.java     |   9 +-
 .../TestAMRMClientPlacementConstraints.java     | 204 +++++
 .../PlacementConstraintFromProtoConverter.java  | 116 +++
 .../pb/PlacementConstraintToProtoConverter.java | 174 +++++
 .../apache/hadoop/yarn/api/pb/package-info.java |  23 +
 .../impl/pb/AllocateRequestPBImpl.java          |  84 +++
 .../impl/pb/AllocateResponsePBImpl.java         |  85 +++
 .../RegisterApplicationMasterRequestPBImpl.java | 106 ++-
 .../api/records/impl/pb/ContainerPBImpl.java    |  31 +
 .../yarn/api/records/impl/pb/ProtoUtils.java    |  43 ++
 .../pb/RejectedSchedulingRequestPBImpl.java     | 148 ++++
 .../records/impl/pb/ResourceSizingPBImpl.java   | 128 ++++
 .../impl/pb/SchedulingRequestPBImpl.java        | 296 ++++++++
 .../PlacementConstraintTransformations.java     | 200 +++++
 .../hadoop/yarn/api/resource/package-info.java  |  23 +
 .../yarn/security/ContainerTokenIdentifier.java |  69 +-
 .../src/main/proto/yarn_security_token.proto    |   1 +
 .../src/main/resources/yarn-default.xml         |  36 +
 .../hadoop/yarn/api/BasePBImplRecordsTest.java  |  11 +
 .../hadoop/yarn/api/TestPBImplRecords.java      |  21 +
 .../TestPlacementConstraintPBConversion.java    | 195 +++++
 .../TestPlacementConstraintTransformations.java | 166 +++++
 .../api/protocolrecords/NMContainerStatus.java  |  14 +
 .../impl/pb/NMContainerStatusPBImpl.java        |  33 +
 .../server/scheduler/SchedulerRequestKey.java   |  11 +
 .../yarn_server_common_service_protos.proto     |   1 +
 .../containermanager/ContainerManagerImpl.java  |   3 +-
 .../container/ContainerImpl.java                |  19 +-
 .../ApplicationMasterService.java               |  15 +
 .../resourcemanager/DefaultAMSProcessor.java    |  13 +-
 .../resourcemanager/RMActiveServiceContext.java |  30 +
 .../yarn/server/resourcemanager/RMContext.java  |  11 +
 .../server/resourcemanager/RMContextImpl.java   |  25 +
 .../server/resourcemanager/ResourceManager.java |  22 +
 .../rmapp/attempt/RMAppAttemptImpl.java         |   5 +-
 .../rmcontainer/RMContainer.java                |   8 +
 .../rmcontainer/RMContainerImpl.java            |  37 +-
 .../server/resourcemanager/rmnode/RMNode.java   |   7 +
 .../resourcemanager/rmnode/RMNodeImpl.java      |   6 +
 .../scheduler/AbstractYarnScheduler.java        |  21 +-
 .../scheduler/AppSchedulingInfo.java            | 214 ++++--
 .../ApplicationPlacementAllocatorFactory.java   |  68 ++
 .../scheduler/ApplicationPlacementFactory.java  |  63 --
 .../scheduler/ContainerUpdateContext.java       |   4 +-
 .../scheduler/ResourceScheduler.java            |  13 +
 .../scheduler/SchedulerApplicationAttempt.java  |  24 +-
 .../scheduler/SchedulerNode.java                |  20 +-
 .../scheduler/YarnScheduler.java                |  15 +-
 .../scheduler/capacity/CapacityScheduler.java   | 154 +++-
 .../CapacitySchedulerConfiguration.java         |   5 +
 .../allocator/RegularContainerAllocator.java    |   3 +-
 .../scheduler/common/ContainerRequest.java      |  12 +
 .../scheduler/common/PendingAsk.java            |   6 +
 .../common/ResourceAllocationCommitter.java     |  12 +-
 .../scheduler/common/fica/FiCaSchedulerApp.java |  43 +-
 .../constraint/AllocationTagsManager.java       | 564 ++++++++++++++
 .../InvalidAllocationTagsQueryException.java    |  35 +
 .../MemoryPlacementConstraintManager.java       | 282 +++++++
 .../constraint/PlacementConstraintManager.java  | 151 ++++
 .../PlacementConstraintManagerService.java      |  93 +++
 .../constraint/PlacementConstraintsUtil.java    | 218 ++++++
 .../constraint/algorithm/CircularIterator.java  |  86 +++
 .../algorithm/DefaultPlacementAlgorithm.java    | 254 +++++++
 .../algorithm/LocalAllocationTagsManager.java   | 167 +++++
 .../iterators/PopularTagsIterator.java          |  71 ++
 .../algorithm/iterators/SerialIterator.java     |  53 ++
 .../algorithm/iterators/package-info.java       |  29 +
 .../constraint/algorithm/package-info.java      |  29 +
 .../api/ConstraintPlacementAlgorithm.java       |  43 ++
 .../api/ConstraintPlacementAlgorithmInput.java  |  32 +
 .../api/ConstraintPlacementAlgorithmOutput.java |  58 ++
 ...traintPlacementAlgorithmOutputCollector.java |  32 +
 .../constraint/api/PlacedSchedulingRequest.java |  79 ++
 .../constraint/api/SchedulingResponse.java      |  70 ++
 .../scheduler/constraint/api/package-info.java  |  28 +
 .../scheduler/constraint/package-info.java      |  29 +
 .../constraint/processor/BatchedRequests.java   | 144 ++++
 .../processor/NodeCandidateSelector.java        |  38 +
 .../processor/PlacementDispatcher.java          | 145 ++++
 .../processor/PlacementProcessor.java           | 361 +++++++++
 .../constraint/processor/package-info.java      |  29 +
 .../scheduler/fair/FairScheduler.java           |  12 +-
 .../scheduler/fifo/FifoScheduler.java           |   7 +-
 .../placement/AppPlacementAllocator.java        |  68 +-
 .../LocalityAppPlacementAllocator.java          |  35 +-
 .../SingleConstraintAppPlacementAllocator.java  | 533 +++++++++++++
 .../security/RMContainerTokenSecretManager.java |  21 +-
 .../resourcemanager/webapp/NodesPage.java       |   3 +
 .../webapp/dao/AllocationTagInfo.java           |  56 ++
 .../webapp/dao/AllocationTagsInfo.java          |  59 ++
 .../resourcemanager/webapp/dao/NodeInfo.java    |  15 +
 .../server/resourcemanager/Application.java     |   9 +-
 .../yarn/server/resourcemanager/MockAM.java     |  77 ++
 .../yarn/server/resourcemanager/MockNodes.java  |   6 +
 .../yarn/server/resourcemanager/MockRM.java     |  14 +
 .../attempt/TestRMAppAttemptTransitions.java    |  10 +-
 .../rmcontainer/TestRMContainerImpl.java        | 151 +++-
 .../scheduler/TestAppSchedulingInfo.java        |   4 +-
 .../capacity/CapacitySchedulerTestBase.java     |  79 ++
 .../capacity/TestCapacityScheduler.java         |  91 +--
 .../TestCapacitySchedulerAsyncScheduling.java   |  12 +-
 .../TestCapacitySchedulerAutoQueueCreation.java |   2 +-
 ...apacitySchedulerSchedulingRequestUpdate.java | 262 +++++++
 .../capacity/TestContainerAllocation.java       |   5 +-
 .../capacity/TestIncreaseAllocationExpirer.java |   2 +-
 ...estSchedulingRequestContainerAllocation.java | 269 +++++++
 ...hedulingRequestContainerAllocationAsync.java | 138 ++++
 .../scheduler/capacity/TestUtils.java           |  11 +
 .../constraint/TestAllocationTagsManager.java   | 413 ++++++++++
 .../TestBatchedRequestsIterators.java           |  82 ++
 .../TestPlacementConstraintManagerService.java  | 182 +++++
 .../TestPlacementConstraintsUtil.java           | 309 ++++++++
 .../constraint/TestPlacementProcessor.java      | 642 ++++++++++++++++
 .../algorithm/TestCircularIterator.java         |  84 +++
 .../TestLocalAllocationTagsManager.java         | 139 ++++
 .../scheduler/fair/FairSchedulerTestBase.java   |   6 +-
 .../fair/TestContinuousScheduling.java          |  10 +-
 .../scheduler/fair/TestFairScheduler.java       |  30 +-
 .../scheduler/fifo/TestFifoScheduler.java       |  33 +-
 ...stSingleConstraintAppPlacementAllocator.java | 403 ++++++++++
 .../resourcemanager/webapp/TestNodesPage.java   |   4 +-
 .../webapp/TestRMWebServicesNodes.java          |  77 +-
 .../site/markdown/PlacementConstraints.md.vm    | 149 ++++
 155 files changed, 13063 insertions(+), 569 deletions(-)
----------------------------------------------------------------------



---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[14/32] hadoop git commit: YARN-6596. Introduce Placement Constraint Manager module. (Konstantinos Karanasos via asuresh)

Posted by as...@apache.org.
YARN-6596. Introduce Placement Constraint Manager module. (Konstantinos Karanasos via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1efb2b6f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1efb2b6f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1efb2b6f

Branch: refs/heads/trunk
Commit: 1efb2b6f250022f41fe5911c1bb3028ec15c5447
Parents: 37f1a7b
Author: Arun Suresh <as...@apache.org>
Authored: Fri Dec 22 13:26:30 2017 -0800
Committer: Arun Suresh <as...@apache.org>
Committed: Wed Jan 31 01:30:17 2018 -0800

----------------------------------------------------------------------
 .../resourcemanager/RMActiveServiceContext.java |  15 +
 .../yarn/server/resourcemanager/RMContext.java  |   6 +
 .../server/resourcemanager/RMContextImpl.java   |  13 +
 .../server/resourcemanager/ResourceManager.java |  13 +
 .../MemoryPlacementConstraintManager.java       | 282 +++++++++++++++++++
 .../constraint/PlacementConstraintManager.java  | 151 ++++++++++
 .../PlacementConstraintManagerService.java      |  93 ++++++
 .../scheduler/constraint/package-info.java      |  29 ++
 .../TestPlacementConstraintManagerService.java  | 182 ++++++++++++
 9 files changed, 784 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1efb2b6f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java
index 4d0c230..06a1d00 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAlloca
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.AllocationTagsManager;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.PlacementConstraintManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.distributed.QueueLimitCalculator;
 import org.apache.hadoop.yarn.server.resourcemanager.security.AMRMTokenSecretManager;
 import org.apache.hadoop.yarn.server.resourcemanager.security.ClientToAMTokenSecretManagerInRM;
@@ -109,6 +110,7 @@ public class RMActiveServiceContext {
   private RMAppLifetimeMonitor rmAppLifetimeMonitor;
   private QueueLimitCalculator queueLimitCalculator;
   private AllocationTagsManager allocationTagsManager;
+  private PlacementConstraintManager placementConstraintManager;
 
   public RMActiveServiceContext() {
     queuePlacementManager = new PlacementManager();
@@ -413,6 +415,19 @@ public class RMActiveServiceContext {
 
   @Private
   @Unstable
+  public PlacementConstraintManager getPlacementConstraintManager() {
+    return placementConstraintManager;
+  }
+
+  @Private
+  @Unstable
+  public void setPlacementConstraintManager(
+      PlacementConstraintManager placementConstraintManager) {
+    this.placementConstraintManager = placementConstraintManager;
+  }
+
+  @Private
+  @Unstable
   public RMDelegatedNodeLabelsUpdater getRMDelegatedNodeLabelsUpdater() {
     return rmDelegatedNodeLabelsUpdater;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1efb2b6f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java
index 00da108..eb91a31 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAlloca
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.PlacementConstraintManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.AllocationTagsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.distributed.QueueLimitCalculator;
 import org.apache.hadoop.yarn.server.resourcemanager.security.AMRMTokenSecretManager;
@@ -171,4 +172,9 @@ public interface RMContext extends ApplicationMasterServiceContext {
   AllocationTagsManager getAllocationTagsManager();
 
   void setAllocationTagsManager(AllocationTagsManager allocationTagsManager);
+
+  PlacementConstraintManager getPlacementConstraintManager();
+
+  void setPlacementConstraintManager(
+      PlacementConstraintManager placementConstraintManager);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1efb2b6f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java
index da50ef8..0b6be72 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java
@@ -50,6 +50,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.AllocationTagsManager;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.PlacementConstraintManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.distributed.QueueLimitCalculator;
 import org.apache.hadoop.yarn.server.resourcemanager.security.AMRMTokenSecretManager;
 import org.apache.hadoop.yarn.server.resourcemanager.security.ClientToAMTokenSecretManagerInRM;
@@ -516,6 +517,18 @@ public class RMContextImpl implements RMContext {
   }
 
   @Override
+  public PlacementConstraintManager getPlacementConstraintManager() {
+    return activeServiceContext.getPlacementConstraintManager();
+  }
+
+  @Override
+  public void setPlacementConstraintManager(
+      PlacementConstraintManager placementConstraintManager) {
+    activeServiceContext
+        .setPlacementConstraintManager(placementConstraintManager);
+  }
+
+  @Override
   public RMDelegatedNodeLabelsUpdater getRMDelegatedNodeLabelsUpdater() {
     return activeServiceContext.getRMDelegatedNodeLabelsUpdater();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1efb2b6f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
index 1d838f0..5140c9f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
@@ -97,6 +97,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEventType;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.AllocationTagsManager;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.MemoryPlacementConstraintManager;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.PlacementConstraintManagerService;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEventType;
 import org.apache.hadoop.yarn.server.resourcemanager.security.DelegationTokenRenewer;
@@ -498,6 +500,12 @@ public class ResourceManager extends CompositeService implements Recoverable {
   protected AllocationTagsManager createAllocationTagsManager() {
     return new AllocationTagsManager(this.rmContext);
   }
+
+  protected PlacementConstraintManagerService
+      createPlacementConstraintManager() {
+    // Use the in memory Placement Constraint Manager.
+    return new MemoryPlacementConstraintManager();
+  }
   
   protected DelegationTokenRenewer createDelegationTokenRenewer() {
     return new DelegationTokenRenewer();
@@ -628,6 +636,11 @@ public class ResourceManager extends CompositeService implements Recoverable {
           createAllocationTagsManager();
       rmContext.setAllocationTagsManager(allocationTagsManager);
 
+      PlacementConstraintManagerService placementConstraintManager =
+          createPlacementConstraintManager();
+      addService(placementConstraintManager);
+      rmContext.setPlacementConstraintManager(placementConstraintManager);
+
       RMDelegatedNodeLabelsUpdater delegatedNodeLabelsUpdater =
           createRMDelegatedNodeLabelsUpdater();
       if (delegatedNodeLabelsUpdater != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1efb2b6f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/MemoryPlacementConstraintManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/MemoryPlacementConstraintManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/MemoryPlacementConstraintManager.java
new file mode 100644
index 0000000..ceff6f6
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/MemoryPlacementConstraintManager.java
@@ -0,0 +1,282 @@
+/*
+ * *
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ * /
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * In memory implementation of the {@link PlacementConstraintManagerService}.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class MemoryPlacementConstraintManager
+    extends PlacementConstraintManagerService {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(MemoryPlacementConstraintManager.class);
+
+  private ReentrantReadWriteLock.ReadLock readLock;
+  private ReentrantReadWriteLock.WriteLock writeLock;
+
+  /**
+   * Stores the global constraints that will be manipulated by the cluster
+   * admin. The key of each entry is the tag that will enable the corresponding
+   * constraint.
+   */
+  private Map<String, PlacementConstraint> globalConstraints;
+  /**
+   * Stores the constraints for each application, along with the allocation tags
+   * that will enable each of the constraints for a given application.
+   */
+  private Map<ApplicationId, Map<String, PlacementConstraint>> appConstraints;
+
+  public MemoryPlacementConstraintManager() {
+    this.globalConstraints = new HashMap<>();
+    this.appConstraints = new HashMap<>();
+    ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
+    readLock = lock.readLock();
+    writeLock = lock.writeLock();
+  }
+
+  @Override
+  protected void serviceInit(Configuration conf) throws Exception {
+    super.serviceInit(conf);
+  }
+
+  @Override
+  public void registerApplication(ApplicationId appId,
+      Map<Set<String>, PlacementConstraint> constraintMap) {
+    // Check if app already exists. If not, prepare its constraint map.
+    Map<String, PlacementConstraint> constraintsForApp = new HashMap<>();
+    try {
+      readLock.lock();
+      if (appConstraints.get(appId) != null) {
+        LOG.warn("Application {} has already been registered.", appId);
+        return;
+      }
+      // Go over each sourceTag-constraint pair, validate it, and add it to the
+      // constraint map for this app.
+      for (Map.Entry<Set<String>, PlacementConstraint> entry : constraintMap
+          .entrySet()) {
+        Set<String> sourceTags = entry.getKey();
+        PlacementConstraint constraint = entry.getValue();
+        if (validateConstraint(sourceTags, constraint)) {
+          String sourceTag = getValidSourceTag(sourceTags);
+          constraintsForApp.put(sourceTag, constraint);
+        }
+      }
+    } finally {
+      readLock.unlock();
+    }
+
+    if (constraintsForApp.isEmpty()) {
+      LOG.info("Application {} was registered, but no constraints were added.",
+          appId);
+    }
+    // Update appConstraints.
+    try {
+      writeLock.lock();
+      appConstraints.put(appId, constraintsForApp);
+    } finally {
+      writeLock.unlock();
+    }
+  }
+
+  @Override
+  public void addConstraint(ApplicationId appId, Set<String> sourceTags,
+      PlacementConstraint placementConstraint, boolean replace) {
+    try {
+      writeLock.lock();
+      Map<String, PlacementConstraint> constraintsForApp =
+          appConstraints.get(appId);
+      if (constraintsForApp == null) {
+        LOG.info("Cannot add constraint to application {}, as it has not "
+            + "been registered yet.", appId);
+        return;
+      }
+
+      addConstraintToMap(constraintsForApp, sourceTags, placementConstraint,
+          replace);
+    } finally {
+      writeLock.unlock();
+    }
+  }
+
+  @Override
+  public void addGlobalConstraint(Set<String> sourceTags,
+      PlacementConstraint placementConstraint, boolean replace) {
+    try {
+      writeLock.lock();
+      addConstraintToMap(globalConstraints, sourceTags, placementConstraint,
+          replace);
+    } finally {
+      writeLock.unlock();
+    }
+  }
+
+  /**
+   * Helper method that adds a constraint to a map for a given source tag.
+   * Assumes there is already a lock on the constraint map.
+   *
+   * @param constraintMap constraint map to which the constraint will be added
+   * @param sourceTags the source tags that will enable this constraint
+   * @param placementConstraint the new constraint to be added
+   * @param replace if true, an existing constraint for these sourceTags will be
+   *          replaced with the new one
+   */
+  private void addConstraintToMap(
+      Map<String, PlacementConstraint> constraintMap, Set<String> sourceTags,
+      PlacementConstraint placementConstraint, boolean replace) {
+    if (validateConstraint(sourceTags, placementConstraint)) {
+      String sourceTag = getValidSourceTag(sourceTags);
+      if (constraintMap.get(sourceTag) == null || replace) {
+        if (replace) {
+          LOG.info("Replacing the constraint associated with tag {} with {}.",
+              sourceTag, placementConstraint);
+        }
+        constraintMap.put(sourceTag, placementConstraint);
+      } else {
+        LOG.info("Constraint {} will not be added. There is already a "
+                + "constraint associated with tag {}.",
+            placementConstraint, sourceTag);
+      }
+    }
+  }
+
+  @Override
+  public Map<Set<String>, PlacementConstraint> getConstraints(
+      ApplicationId appId) {
+    try {
+      readLock.lock();
+      if (appConstraints.get(appId) == null) {
+        LOG.info("Application {} is not registered in the Placement "
+            + "Constraint Manager.", appId);
+        return null;
+      }
+
+      // Copy to a new map and return an unmodifiable version of it.
+      // Each key of the map is a set with a single source tag.
+      Map<Set<String>, PlacementConstraint> constraintMap =
+          appConstraints.get(appId).entrySet().stream()
+              .collect(Collectors.toMap(
+                  e -> Stream.of(e.getKey()).collect(Collectors.toSet()),
+                  e -> e.getValue()));
+
+      return Collections.unmodifiableMap(constraintMap);
+    } finally {
+      readLock.unlock();
+    }
+  }
+
+  @Override
+  public PlacementConstraint getConstraint(ApplicationId appId,
+      Set<String> sourceTags) {
+    if (!validateSourceTags(sourceTags)) {
+      return null;
+    }
+    String sourceTag = getValidSourceTag(sourceTags);
+    try {
+      readLock.lock();
+      if (appConstraints.get(appId) == null) {
+        LOG.info("Application {} is not registered in the Placement "
+            + "Constraint Manager.", appId);
+        return null;
+      }
+      // TODO: Merge this constraint with the global one for this tag, if one
+      // exists.
+      return appConstraints.get(appId).get(sourceTag);
+    } finally {
+      readLock.unlock();
+    }
+  }
+
+  @Override
+  public PlacementConstraint getGlobalConstraint(Set<String> sourceTags) {
+    if (!validateSourceTags(sourceTags)) {
+      return null;
+    }
+    String sourceTag = getValidSourceTag(sourceTags);
+    try {
+      readLock.lock();
+      return globalConstraints.get(sourceTag);
+    } finally {
+      readLock.unlock();
+    }
+  }
+
+  @Override
+  public void unregisterApplication(ApplicationId appId) {
+    try {
+      writeLock.lock();
+      appConstraints.remove(appId);
+    } finally {
+      writeLock.unlock();
+    }
+  }
+
+  @Override
+  public void removeGlobalConstraint(Set<String> sourceTags) {
+    if (!validateSourceTags(sourceTags)) {
+      return;
+    }
+    String sourceTag = getValidSourceTag(sourceTags);
+    try {
+      writeLock.lock();
+      globalConstraints.remove(sourceTag);
+    } finally {
+      writeLock.unlock();
+    }
+  }
+
+  @Override
+  public int getNumRegisteredApplications() {
+    try {
+      readLock.lock();
+      return appConstraints.size();
+    } finally {
+      readLock.unlock();
+    }
+  }
+
+  @Override
+  public int getNumGlobalConstraints() {
+    try {
+      readLock.lock();
+      return globalConstraints.size();
+    } finally {
+      readLock.unlock();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1efb2b6f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintManager.java
new file mode 100644
index 0000000..7725d0d
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintManager.java
@@ -0,0 +1,151 @@
+/*
+ * *
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ * /
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint;
+
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
+
+/**
+ * Interface for storing and retrieving placement constraints (see
+ * {@link PlacementConstraint}).
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public interface PlacementConstraintManager {
+
+  /**
+   * Register all placement constraints of an application.
+   *
+   * @param appId the application ID
+   * @param constraintMap the map of allocation tags to constraints for this
+   *          application
+   */
+  void registerApplication(ApplicationId appId,
+      Map<Set<String>, PlacementConstraint> constraintMap);
+
+  /**
+   * Add a placement constraint for a given application and a given set of
+   * (source) allocation tags. The constraint will be used on Scheduling
+   * Requests that carry this set of allocation tags.
+   * TODO: Support merge and not only replace when adding a constraint.
+   *
+   * @param appId the application ID
+   * @param sourceTags the set of allocation tags that will enable this
+   *          constraint
+   * @param placementConstraint the constraint
+   * @param replace if true, an existing constraint for these tags will be
+   *          replaced by the given one
+   */
+  void addConstraint(ApplicationId appId, Set<String> sourceTags,
+      PlacementConstraint placementConstraint, boolean replace);
+
+  /**
+   * Add a placement constraint that will be used globally. These constraints
+   * are added by the cluster administrator.
+   * TODO: Support merge and not only replace when adding a constraint.
+   *
+   * @param sourceTags the allocation tags that will enable this constraint
+   * @param placementConstraint the constraint
+   * @param replace if true, an existing constraint for these tags will be
+   *          replaced by the given one
+   */
+  void addGlobalConstraint(Set<String> sourceTags,
+      PlacementConstraint placementConstraint, boolean replace);
+
+  /**
+   * Retrieve all constraints for a given application, along with the allocation
+   * tags that enable each constraint.
+   *
+   * @param appId the application ID
+   * @return the constraints for this application with the associated tags
+   */
+  Map<Set<String>, PlacementConstraint> getConstraints(ApplicationId appId);
+
+  /**
+   * Retrieve the placement constraint that is associated with a set of
+   * allocation tags for a given application.
+   *
+   * @param appId the application ID
+   * @param sourceTags the allocation tags that enable this constraint
+   * @return the constraint
+   */
+  PlacementConstraint getConstraint(ApplicationId appId,
+      Set<String> sourceTags);
+
+  /**
+   * Retrieve a global constraint that is associated with a given set of
+   * allocation tags.
+   *
+   * @param sourceTags the allocation tags that enable this constraint
+   * @return the constraint
+   */
+  PlacementConstraint getGlobalConstraint(Set<String> sourceTags);
+
+  /**
+   * Remove the constraints that correspond to a given application.
+   *
+   * @param appId the application that will be removed.
+   */
+  void unregisterApplication(ApplicationId appId);
+
+  /**
+   * Remove a global constraint that is associated with the given allocation
+   * tags.
+   *
+   * @param sourceTags the allocation tags
+   */
+  void removeGlobalConstraint(Set<String> sourceTags);
+
+  /**
+   * Returns the number of currently registered applications in the Placement
+   * Constraint Manager.
+   *
+   * @return number of registered applications.
+   */
+  int getNumRegisteredApplications();
+
+  /**
+   * Returns the number of global constraints registered in the Placement
+   * Constraint Manager.
+   *
+   * @return number of global constraints.
+   */
+  int getNumGlobalConstraints();
+
+  /**
+   * Validate a placement constraint and the set of allocation tags that will
+   * enable it.
+   *
+   * @param sourceTags the associated allocation tags
+   * @param placementConstraint the constraint
+   * @return true if constraint and tags are valid
+   */
+  default boolean validateConstraint(Set<String> sourceTags,
+      PlacementConstraint placementConstraint) {
+    return true;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1efb2b6f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintManagerService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintManagerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintManagerService.java
new file mode 100644
index 0000000..967f251
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintManagerService.java
@@ -0,0 +1,93 @@
+/*
+ * *
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ * /
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint;
+
+import java.util.Set;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
+
+/**
+ * The service that implements the {@link PlacementConstraintManager} interface.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public abstract class PlacementConstraintManagerService extends AbstractService
+    implements PlacementConstraintManager {
+
+  protected static final Log LOG =
+      LogFactory.getLog(PlacementConstraintManagerService.class);
+
+  private PlacementConstraintManager placementConstraintManager = null;
+
+  public PlacementConstraintManagerService() {
+    super(PlacementConstraintManagerService.class.getName());
+  }
+
+  @Override
+  public boolean validateConstraint(Set<String> sourceTags,
+      PlacementConstraint placementConstraint) {
+    if (!validateSourceTags(sourceTags)) {
+      return false;
+    }
+    // TODO: Perform actual validation of the constraint (in YARN-6621).
+    // TODO: Perform satisfiability check for constraint.
+    return true;
+  }
+
+  /**
+   * Validates whether the allocation tags that will enable a constraint have
+   * the expected format. At the moment we support a single allocation tag per
+   * constraint.
+   *
+   * @param sourceTags the source allocation tags
+   * @return true if the tags have the expected format
+   */
+  protected boolean validateSourceTags(Set<String> sourceTags) {
+    if (sourceTags.isEmpty()) {
+      LOG.warn("A placement constraint cannot be associated with an empty "
+          + "set of tags.");
+      return false;
+    }
+    if (sourceTags.size() > 1) {
+      LOG.warn("Only a single tag can be associated with a placement "
+          + "constraint currently.");
+      return false;
+    }
+    return true;
+  }
+
+  /**
+   * This method will return a single allocation tag. It should be called after
+   * validating the tags by calling {@link #validateSourceTags}.
+   *
+   * @param sourceTags the source allocation tags
+   * @return the single source tag
+   */
+  protected String getValidSourceTag(Set<String> sourceTags) {
+    return sourceTags.iterator().next();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1efb2b6f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/package-info.java
new file mode 100644
index 0000000..cbb7a55
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/package-info.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Package org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement
+ * contains classes related to scheduling containers using placement
+ * constraints.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1efb2b6f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementConstraintManagerService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementConstraintManagerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementConstraintManagerService.java
new file mode 100644
index 0000000..abcab1a
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementConstraintManagerService.java
@@ -0,0 +1,182 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint;
+
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.NODE;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.RACK;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.targetCardinality;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.targetIn;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.targetNotIn;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.PlacementTargets.allocationTag;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.PlacementTargets.nodeAttribute;
+
+import java.util.AbstractMap.SimpleEntry;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraints;
+import org.apache.hadoop.yarn.server.utils.BuilderUtils;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * Unit tests for {@link PlacementConstraintManagerService}.
+ */
+public class TestPlacementConstraintManagerService {
+
+  private PlacementConstraintManagerService pcm;
+
+  protected PlacementConstraintManagerService createPCM() {
+    return new MemoryPlacementConstraintManager();
+  }
+
+  private ApplicationId appId1, appId2;
+  private PlacementConstraint c1, c2, c3, c4;
+  private Set<String> sourceTag1, sourceTag2, sourceTag3, sourceTag4;
+  private Map<Set<String>, PlacementConstraint> constraintMap1, constraintMap2;
+
+  @Before
+  public void before() {
+    this.pcm = createPCM();
+
+    // Build appIDs, constraints, source tags, and constraint map.
+    long ts = System.currentTimeMillis();
+    appId1 = BuilderUtils.newApplicationId(ts, 123);
+    appId2 = BuilderUtils.newApplicationId(ts, 234);
+
+    c1 = PlacementConstraints.build(targetIn(NODE, allocationTag("hbase-m")));
+    c2 = PlacementConstraints.build(targetIn(RACK, allocationTag("hbase-rs")));
+    c3 = PlacementConstraints
+        .build(targetNotIn(NODE, nodeAttribute("java", "1.8")));
+    c4 = PlacementConstraints
+        .build(targetCardinality(RACK, 2, 10, allocationTag("zk")));
+
+    sourceTag1 = new HashSet<>(Arrays.asList("spark"));
+    sourceTag2 = new HashSet<>(Arrays.asList("zk"));
+    sourceTag3 = new HashSet<>(Arrays.asList("storm"));
+    sourceTag4 = new HashSet<>(Arrays.asList("hbase-m", "hbase-sec"));
+
+    constraintMap1 = Stream
+        .of(new SimpleEntry<>(sourceTag1, c1),
+            new SimpleEntry<>(sourceTag2, c2))
+        .collect(Collectors.toMap(SimpleEntry::getKey, SimpleEntry::getValue));
+
+    constraintMap2 = Stream.of(new SimpleEntry<>(sourceTag3, c4))
+        .collect(Collectors.toMap(SimpleEntry::getKey, SimpleEntry::getValue));
+  }
+
+  @Test
+  public void testRegisterUnregisterApps() {
+    Assert.assertEquals(0, pcm.getNumRegisteredApplications());
+
+    // Register two applications.
+    pcm.registerApplication(appId1, constraintMap1);
+    Assert.assertEquals(1, pcm.getNumRegisteredApplications());
+    Map<Set<String>, PlacementConstraint> constrMap =
+        pcm.getConstraints(appId1);
+    Assert.assertNotNull(constrMap);
+    Assert.assertEquals(2, constrMap.size());
+    Assert.assertNotNull(constrMap.get(sourceTag1));
+    Assert.assertNotNull(constrMap.get(sourceTag2));
+
+    pcm.registerApplication(appId2, constraintMap2);
+    Assert.assertEquals(2, pcm.getNumRegisteredApplications());
+    constrMap = pcm.getConstraints(appId2);
+    Assert.assertNotNull(constrMap);
+    Assert.assertEquals(1, constrMap.size());
+    Assert.assertNotNull(constrMap.get(sourceTag3));
+    Assert.assertNull(constrMap.get(sourceTag2));
+
+    // Try to register the same app again.
+    pcm.registerApplication(appId2, constraintMap1);
+    Assert.assertEquals(2, pcm.getNumRegisteredApplications());
+
+    // Unregister appId1.
+    pcm.unregisterApplication(appId1);
+    Assert.assertEquals(1, pcm.getNumRegisteredApplications());
+    Assert.assertNull(pcm.getConstraints(appId1));
+    Assert.assertNotNull(pcm.getConstraints(appId2));
+  }
+
+  @Test
+  public void testAddConstraint() {
+    // Cannot add constraint to unregistered app.
+    Assert.assertEquals(0, pcm.getNumRegisteredApplications());
+    pcm.addConstraint(appId1, sourceTag1, c1, false);
+    Assert.assertEquals(0, pcm.getNumRegisteredApplications());
+
+    // Register application.
+    pcm.registerApplication(appId1, new HashMap<>());
+    Assert.assertEquals(1, pcm.getNumRegisteredApplications());
+    Assert.assertEquals(0, pcm.getConstraints(appId1).size());
+
+    // Add two constraints.
+    pcm.addConstraint(appId1, sourceTag1, c1, false);
+    pcm.addConstraint(appId1, sourceTag2, c3, false);
+    Assert.assertEquals(2, pcm.getConstraints(appId1).size());
+
+    // Constraint for sourceTag1 should not be replaced.
+    pcm.addConstraint(appId1, sourceTag1, c2, false);
+    Assert.assertEquals(2, pcm.getConstraints(appId1).size());
+    Assert.assertEquals(c1, pcm.getConstraint(appId1, sourceTag1));
+    Assert.assertNotEquals(c2, pcm.getConstraint(appId1, sourceTag1));
+
+    // Now c2 should replace c1 for sourceTag1.
+    pcm.addConstraint(appId1, sourceTag1, c2, true);
+    Assert.assertEquals(2, pcm.getConstraints(appId1).size());
+    Assert.assertEquals(c2, pcm.getConstraint(appId1, sourceTag1));
+  }
+
+  @Test
+  public void testGlobalConstraints() {
+    Assert.assertEquals(0, pcm.getNumGlobalConstraints());
+    pcm.addGlobalConstraint(sourceTag1, c1, false);
+    Assert.assertEquals(1, pcm.getNumGlobalConstraints());
+    Assert.assertNotNull(pcm.getGlobalConstraint(sourceTag1));
+
+    // Constraint for sourceTag1 should not be replaced.
+    pcm.addGlobalConstraint(sourceTag1, c2, false);
+    Assert.assertEquals(1, pcm.getNumGlobalConstraints());
+    Assert.assertEquals(c1, pcm.getGlobalConstraint(sourceTag1));
+    Assert.assertNotEquals(c2, pcm.getGlobalConstraint(sourceTag1));
+
+    // Now c2 should replace c1 for sourceTag1.
+    pcm.addGlobalConstraint(sourceTag1, c2, true);
+    Assert.assertEquals(1, pcm.getNumGlobalConstraints());
+    Assert.assertEquals(c2, pcm.getGlobalConstraint(sourceTag1));
+
+    pcm.removeGlobalConstraint(sourceTag1);
+    Assert.assertEquals(0, pcm.getNumGlobalConstraints());
+  }
+
+  @Test
+  public void testValidateConstraint() {
+    // At the moment we only disallow multiple source tags to be associated with
+    // a constraint. TODO: More tests to be added for YARN-6621.
+    Assert.assertTrue(pcm.validateConstraint(sourceTag1, c1));
+    Assert.assertFalse(pcm.validateConstraint(sourceTag4, c1));
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[30/32] hadoop git commit: YARN-7779. Display allocation tags in RM web UI and expose same through REST API. Contributed by Weiwei Yang.

Posted by as...@apache.org.
YARN-7779. Display allocation tags in RM web UI and expose same through REST API. Contributed by Weiwei Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9b81cb05
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9b81cb05
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9b81cb05

Branch: refs/heads/trunk
Commit: 9b81cb0537e5b731581e6a375bf0a59abf61c359
Parents: adbe87a
Author: Sunil G <su...@apache.org>
Authored: Tue Jan 23 17:09:58 2018 +0530
Committer: Arun Suresh <as...@apache.org>
Committed: Wed Jan 31 01:30:17 2018 -0800

----------------------------------------------------------------------
 .../hadoop/yarn/sls/nodemanager/NodeInfo.java   |  6 ++
 .../yarn/sls/scheduler/RMNodeWrapper.java       |  6 ++
 .../server/resourcemanager/rmnode/RMNode.java   |  7 ++
 .../resourcemanager/rmnode/RMNodeImpl.java      |  6 ++
 .../constraint/AllocationTagsManager.java       | 11 +++
 .../resourcemanager/webapp/NodesPage.java       |  3 +
 .../webapp/dao/AllocationTagInfo.java           | 56 ++++++++++++++
 .../webapp/dao/AllocationTagsInfo.java          | 59 +++++++++++++++
 .../resourcemanager/webapp/dao/NodeInfo.java    | 15 ++++
 .../yarn/server/resourcemanager/MockNodes.java  |  6 ++
 .../resourcemanager/webapp/TestNodesPage.java   |  4 +-
 .../webapp/TestRMWebServicesNodes.java          | 77 +++++++++++++++++++-
 12 files changed, 253 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b81cb05/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
index 1016ce1..0c99139 100644
--- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
+++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.yarn.sls.nodemanager;
 
 import java.util.ArrayList;
 import java.util.List;
+import java.util.Map;
 import java.util.Set;
 
 import org.apache.hadoop.classification.InterfaceAudience.Private;
@@ -213,6 +214,11 @@ public class NodeInfo {
     }
 
     @Override
+    public Map<String, Long> getAllocationTagsWithCount() {
+      return null;
+    }
+
+    @Override
     public Resource getPhysicalResource() {
       return null;
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b81cb05/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
index fdad826..92f9b0f 100644
--- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
+++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmnode
 
 import java.util.Collections;
 import java.util.List;
+import java.util.Map;
 import java.util.Set;
 
 @Private
@@ -203,6 +204,11 @@ public class RMNodeWrapper implements RMNode {
   }
 
   @Override
+  public Map<String, Long> getAllocationTagsWithCount() {
+    return node.getAllocationTagsWithCount();
+  }
+
+  @Override
   public Resource getPhysicalResource() {
     return null;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b81cb05/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java
index a5615ef..872f2a6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.rmnode;
 
 
 import java.util.List;
+import java.util.Map;
 import java.util.Set;
 
 import org.apache.hadoop.net.Node;
@@ -182,4 +183,10 @@ public interface RMNode {
    * @return the decommissioning timeout in second.
    */
   Integer getDecommissioningTimeout();
+
+  /**
+   * Get the allocation tags and their counts associated with this node.
+   * @return a map of each allocation tag and its count.
+   */
+  Map<String, Long> getAllocationTagsWithCount();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b81cb05/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
index da54eb9..4fc2d8a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
@@ -1529,4 +1529,10 @@ public class RMNodeImpl implements RMNode, EventHandler<RMNodeEvent> {
   public Integer getDecommissioningTimeout() {
     return decommissioningTimeout;
   }
+
+  @Override
+  public Map<String, Long> getAllocationTagsWithCount() {
+    return context.getAllocationTagsManager()
+        .getAllocationTagsWithCount(getNodeID());
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b81cb05/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
index 7ad5e8c..42a78c9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
@@ -548,4 +548,15 @@ public class AllocationTagsManager {
       readLock.unlock();
     }
   }
+
+  /**
+   * Returns a map whose key is the allocation tag and value is the
+   * count of allocations with this tag.
+   *
+   * @param nodeId
+   * @return allocation tag to count mapping
+   */
+  public Map<String, Long> getAllocationTagsWithCount(NodeId nodeId) {
+    return globalNodeMapping.getTypeToTagsWithCount().get(nodeId);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b81cb05/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
index d0e384d..3e78cf4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
@@ -81,12 +81,14 @@ class NodesPage extends RmView {
 
       if (!this.opportunisticContainersEnabled) {
         trbody.th(".containers", "Containers")
+            .th(".allocationTags", "Allocation Tags")
             .th(".mem", "Mem Used")
             .th(".mem", "Mem Avail")
             .th(".vcores", "VCores Used")
             .th(".vcores", "VCores Avail");
       } else {
         trbody.th(".containers", "Running Containers (G)")
+            .th(".allocationTags", "Allocation Tags")
             .th(".mem", "Mem Used (G)")
             .th(".mem", "Mem Avail (G)")
             .th(".vcores", "VCores Used (G)")
@@ -167,6 +169,7 @@ class NodesPage extends RmView {
             .append(Times.format(info.getLastHealthUpdate())).append("\",\"")
             .append(info.getHealthReport()).append("\",\"")
             .append(String.valueOf(info.getNumContainers())).append("\",\"")
+            .append(info.getAllocationTagsSummary()).append("\",\"")
             .append("<br title='").append(String.valueOf(usedMemory))
             .append("'>").append(StringUtils.byteDesc(usedMemory * BYTES_IN_MB))
             .append("\",\"").append("<br title='")

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b81cb05/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AllocationTagInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AllocationTagInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AllocationTagInfo.java
new file mode 100644
index 0000000..97f9e90
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AllocationTagInfo.java
@@ -0,0 +1,56 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlRootElement;
+
+/**
+ * DAO object to display node allocation tag.
+ */
+@XmlRootElement(name = "allocationTagInfo")
+@XmlAccessorType(XmlAccessType.FIELD)
+public class AllocationTagInfo {
+
+  private String allocationTag;
+  private long allocationsCount;
+
+  public AllocationTagInfo() {
+    // JAXB needs this
+  }
+
+  public AllocationTagInfo(String tag, long count) {
+    this.allocationTag = tag;
+    this.allocationsCount = count;
+  }
+
+  public String getAllocationTag() {
+    return this.allocationTag;
+  }
+
+  public long getAllocationsCount() {
+    return this.allocationsCount;
+  }
+
+  @Override
+  public String toString() {
+    return allocationTag + "(" + allocationsCount + ")";
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b81cb05/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AllocationTagsInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AllocationTagsInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AllocationTagsInfo.java
new file mode 100644
index 0000000..ee09aa2
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AllocationTagsInfo.java
@@ -0,0 +1,59 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao;
+
+import org.apache.hadoop.util.StringUtils;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlRootElement;
+import java.util.ArrayList;
+import java.util.Iterator;
+
+/**
+ * DAO object to display node allocation tags.
+ */
+@XmlRootElement(name = "allocationTagsInfo")
+@XmlAccessorType(XmlAccessType.FIELD)
+public class AllocationTagsInfo {
+
+  private ArrayList<AllocationTagInfo> allocationTagInfo;
+
+  public AllocationTagsInfo() {
+    allocationTagInfo = new ArrayList<>();
+  }
+
+  public void addAllocationTag(AllocationTagInfo info) {
+    allocationTagInfo.add(info);
+  }
+
+  @Override
+  public String toString() {
+    StringBuffer sb = new StringBuffer();
+    Iterator<AllocationTagInfo> it = allocationTagInfo.iterator();
+    while (it.hasNext()) {
+      AllocationTagInfo current = it.next();
+      sb.append(current.toString());
+      if (it.hasNext()) {
+        sb.append(StringUtils.COMMA);
+      }
+    }
+    return sb.toString();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b81cb05/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodeInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodeInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodeInfo.java
index 3cec215..46a6e60 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodeInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodeInfo.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao;
 
 import java.util.ArrayList;
 import java.util.Collections;
+import java.util.Map;
 import java.util.Set;
 
 import javax.xml.bind.annotation.XmlAccessType;
@@ -57,6 +58,7 @@ public class NodeInfo {
   private long usedVirtualCoresOpport;
   private int numQueuedContainers;
   protected ArrayList<String> nodeLabels = new ArrayList<String>();
+  private AllocationTagsInfo allocationTags;
   protected ResourceUtilizationInfo resourceUtilization;
   protected ResourceInfo usedResource;
   protected ResourceInfo availableResource;
@@ -111,6 +113,14 @@ public class NodeInfo {
       Collections.sort(nodeLabels);
     }
 
+    // add allocation tags
+    allocationTags = new AllocationTagsInfo();
+    Map<String, Long> allocationTagsInfo = ni.getAllocationTagsWithCount();
+    if (allocationTagsInfo != null) {
+      allocationTagsInfo.forEach((tag, count) ->
+          allocationTags.addAllocationTag(new AllocationTagInfo(tag, count)));
+    }
+
     // update node and containers resource utilization
     this.resourceUtilization = new ResourceUtilizationInfo(ni);
   }
@@ -207,6 +217,11 @@ public class NodeInfo {
     return this.resourceUtilization;
   }
 
+  public String getAllocationTagsSummary() {
+    return this.allocationTags == null ? "" :
+        this.allocationTags.toString();
+  }
+
   @VisibleForTesting
   public void setId(String id) {
     this.id = id;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b81cb05/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java
index d6549b9..84105d9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java
@@ -22,6 +22,7 @@ import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
 import java.util.Set;
+import java.util.Map;
 
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
@@ -280,6 +281,11 @@ public class MockNodes {
     }
 
     @Override
+    public Map<String, Long> getAllocationTagsWithCount() {
+      return null;
+    }
+
+    @Override
     public Resource getPhysicalResource() {
       return this.physicalResource;
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b81cb05/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java
index cc97674..26e8c2a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java
@@ -48,8 +48,8 @@ public class TestNodesPage {
 
   // Number of Actual Table Headers for NodesPage.NodesBlock might change in
   // future. In that case this value should be adjusted to the new value.
-  final int numberOfThInMetricsTable = 23;
-  final int numberOfActualTableHeaders = 13;
+  private final int numberOfThInMetricsTable = 23;
+  private final int numberOfActualTableHeaders = 14;
   private final int numberOfThForOpportunisticContainers = 4;
 
   private Injector injector;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b81cb05/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java
index fb597fc..7ea7e81 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java
@@ -22,10 +22,15 @@ import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.assertResponseS
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
 
 import java.io.StringReader;
 import java.util.ArrayList;
 import java.util.EnumSet;
+import java.util.Map;
+import java.util.TreeMap;
+import java.util.Iterator;
 
 import javax.ws.rs.core.MediaType;
 import javax.xml.parsers.DocumentBuilder;
@@ -51,6 +56,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeStartedEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeStatusEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.AllocationTagsManager;
 import org.apache.hadoop.yarn.util.RackResolver;
 import org.apache.hadoop.yarn.util.YarnVersionInfo;
 import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
@@ -734,7 +740,7 @@ public class TestRMWebServicesNodes extends JerseyTestBase {
 
   public void verifyNodeInfo(JSONObject nodeInfo, RMNode nm)
       throws JSONException, Exception {
-    assertEquals("incorrect number of elements", 18, nodeInfo.length());
+    assertEquals("incorrect number of elements", 19, nodeInfo.length());
 
     JSONObject resourceInfo = nodeInfo.getJSONObject("resourceUtilization");
     verifyNodeInfoGeneric(nm, nodeInfo.getString("state"),
@@ -837,4 +843,73 @@ public class TestRMWebServicesNodes extends JerseyTestBase {
     }
   }
 
+  @Test
+  public void testNodesAllocationTags() throws Exception {
+    NodeId nm1 = NodeId.newInstance("host1", 1234);
+    NodeId nm2 = NodeId.newInstance("host2", 2345);
+    AllocationTagsManager atm = mock(AllocationTagsManager.class);
+
+    Map<String, Map<String, Long>> expectedAllocationTags = new TreeMap<>();
+    Map<String, Long> nm1Tags = new TreeMap<>();
+    nm1Tags.put("A", 1L);
+    nm1Tags.put("B", 2L);
+    Map<String, Long> nm2Tags = new TreeMap<>();
+    nm2Tags.put("C", 1L);
+    nm2Tags.put("D", 2L);
+    expectedAllocationTags.put(nm1.toString(), nm1Tags);
+    expectedAllocationTags.put(nm2.toString(), nm2Tags);
+
+    when(atm.getAllocationTagsWithCount(nm1)).thenReturn(nm1Tags);
+    when(atm.getAllocationTagsWithCount(nm2)).thenReturn(nm2Tags);
+    rm.getRMContext().setAllocationTagsManager(atm);
+
+    rm.start();
+
+    rm.registerNode(nm1.toString(), 1024);
+    rm.registerNode(nm2.toString(), 1024);
+
+    WebResource r = resource();
+    ClientResponse response = r.path("ws").path("v1").path("cluster")
+        .path("nodes").accept("application/json").get(ClientResponse.class);
+    assertEquals(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
+        response.getType().toString());
+    JSONObject nodesInfoJson = response.getEntity(JSONObject.class);
+    verifyNodeAllocationTag(nodesInfoJson, expectedAllocationTags);
+
+    rm.stop();
+  }
+
+  private void verifyNodeAllocationTag(JSONObject json,
+      Map<String, Map<String, Long>> expectedAllocationTags)
+      throws JSONException {
+    JSONArray nodes = json.getJSONObject("nodes").getJSONArray("node");
+    assertEquals(expectedAllocationTags.size(), nodes.length());
+    for (int i=0; i<nodes.length(); i++) {
+      JSONObject nodeJson = nodes.getJSONObject(i);
+      String nodeId = nodeJson.getString("id");
+
+      // Ensure the response contains all nodes info
+      assertTrue("Nodes info should have expected node IDs",
+          expectedAllocationTags.containsKey(nodeId));
+
+      Map<String, Long> expectedTags = expectedAllocationTags.get(nodeId);
+      JSONArray tagsInfo = nodeJson.getJSONObject("allocationTags")
+          .getJSONArray("allocationTagInfo");
+
+      // Ensure number of tags are expected.
+      assertEquals(expectedTags.size(), tagsInfo.length());
+
+      // Iterate expected tags and make sure the actual
+      // tags/counts are matched.
+      Iterator<String> it = expectedTags.keySet().iterator();
+      for (int j=0; j<tagsInfo.length(); j++) {
+        JSONObject tagInfo = tagsInfo.getJSONObject(j);
+        String expectedTag = it.next();
+        assertEquals(tagInfo.getString("allocationTag"), expectedTag);
+        assertEquals(tagInfo.getLong("allocationsCount"),
+            expectedTags.get(expectedTag).longValue());
+      }
+    }
+  }
+
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[29/32] hadoop git commit: YARN-7784. Fix Cluster metrics when placement processor is enabled. (asuresh)

Posted by as...@apache.org.
YARN-7784. Fix Cluster metrics when placement processor is enabled. (asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f8c5f5b2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f8c5f5b2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f8c5f5b2

Branch: refs/heads/trunk
Commit: f8c5f5b23732a1e35f012c1a6850bed09c8a5180
Parents: c23980c
Author: Arun Suresh <as...@apache.org>
Authored: Thu Jan 25 19:09:21 2018 -0800
Committer: Arun Suresh <as...@apache.org>
Committed: Wed Jan 31 01:30:17 2018 -0800

----------------------------------------------------------------------
 .../scheduler/AppSchedulingInfo.java            | 10 ++++-
 .../scheduler/common/fica/FiCaSchedulerApp.java |  7 +++
 .../constraint/TestPlacementProcessor.java      | 45 ++++++++++++++++++++
 3 files changed, 60 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8c5f5b2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
index 0389895..1efdd8b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
@@ -694,6 +694,12 @@ public class AppSchedulingInfo {
       metrics.runAppAttempt(applicationId, user);
     }
 
+    updateMetrics(applicationId, type, node, containerAllocated, user, queue);
+  }
+
+  public static void updateMetrics(ApplicationId applicationId, NodeType type,
+      SchedulerNode node, Container containerAllocated, String user,
+      Queue queue) {
     if (LOG.isDebugEnabled()) {
       LOG.debug("allocate: applicationId=" + applicationId + " container="
           + containerAllocated.getId() + " host=" + containerAllocated
@@ -702,10 +708,10 @@ public class AppSchedulingInfo {
           + type);
     }
     if(node != null) {
-      metrics.allocateResources(node.getPartition(), user, 1,
+      queue.getMetrics().allocateResources(node.getPartition(), user, 1,
           containerAllocated.getResource(), true);
     }
-    metrics.incrNodeTypeAggregations(user, type);
+    queue.getMetrics().incrNodeTypeAggregations(user, type);
   }
 
   // Get AppPlacementAllocator by specified schedulerKey

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8c5f5b2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
index 7eb1e31..f3da0a3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
@@ -56,6 +56,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerRese
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerState;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractUsersManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Allocation;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Queue;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueResourceQuotas;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits;
@@ -548,6 +549,12 @@ public class FiCaSchedulerApp extends SchedulerApplicationAttempt {
               ((RMContainerImpl) rmContainer).setAllocationTags(
                   containerRequest.getSchedulingRequest().getAllocationTags());
             }
+          } else {
+            AppSchedulingInfo.updateMetrics(getApplicationId(),
+                allocation.getAllocationLocalityType(),
+                schedulerContainer.getSchedulerNode(),
+                schedulerContainer.getRmContainer().getContainer(), getUser(),
+                getQueue());
           }
 
           attemptResourceUsage.incUsed(schedulerContainer.getNodePartition(),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8c5f5b2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementProcessor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementProcessor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementProcessor.java
index 8426b20..698c17b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementProcessor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementProcessor.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.MockAM;
 import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
 import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
@@ -148,6 +149,10 @@ public class TestPlacementProcessor {
         .collect(Collectors.toSet());
     // Ensure unique nodes (antiaffinity)
     Assert.assertEquals(4, nodeIds.size());
+
+    QueueMetrics metrics = rm.getResourceScheduler().getRootQueueMetrics();
+    // Verify Metrics
+    verifyMetrics(metrics, 11264, 11, 5120, 5, 5);
   }
 
   @Test(timeout = 300000)
@@ -197,6 +202,10 @@ public class TestPlacementProcessor {
         .collect(Collectors.toSet());
     // Ensure unique nodes (antiaffinity)
     Assert.assertEquals(5, nodeIds.size());
+
+    QueueMetrics metrics = rm.getResourceScheduler().getRootQueueMetrics();
+    // Verify Metrics
+    verifyMetrics(metrics, 14336, 14, 6144, 6, 6);
   }
 
   @Test(timeout = 300000)
@@ -245,6 +254,10 @@ public class TestPlacementProcessor {
     for (NodeId n : nodeIdContainerIdMap.keySet()) {
       Assert.assertTrue(nodeIdContainerIdMap.get(n) < 5);
     }
+
+    QueueMetrics metrics = rm.getResourceScheduler().getRootQueueMetrics();
+    // Verify Metrics
+    verifyMetrics(metrics, 23552, 23, 9216, 9, 9);
   }
 
   @Test(timeout = 300000)
@@ -288,6 +301,10 @@ public class TestPlacementProcessor {
         .collect(Collectors.toSet());
     // Ensure all containers end up on the same node (affinity)
     Assert.assertEquals(1, nodeIds.size());
+
+    QueueMetrics metrics = rm.getResourceScheduler().getRootQueueMetrics();
+    // Verify Metrics
+    verifyMetrics(metrics, 26624, 26, 6144, 6, 6);
   }
 
   @Test(timeout = 300000)
@@ -340,6 +357,10 @@ public class TestPlacementProcessor {
     for (NodeId n : nodeIdContainerIdMap.keySet()) {
       Assert.assertTrue(nodeIdContainerIdMap.get(n) < 4);
     }
+
+    QueueMetrics metrics = rm.getResourceScheduler().getRootQueueMetrics();
+    // Verify Metrics
+    verifyMetrics(metrics, 9216, 9, 7168, 7, 7);
   }
 
   @Test(timeout = 300000)
@@ -407,6 +428,10 @@ public class TestPlacementProcessor {
     Assert.assertEquals(4, rej.getRequest().getAllocationRequestId());
     Assert.assertEquals(RejectionReason.COULD_NOT_SCHEDULE_ON_NODE,
         rej.getReason());
+
+    QueueMetrics metrics = rm.getResourceScheduler().getRootQueueMetrics();
+    // Verify Metrics
+    verifyMetrics(metrics, 12288, 12, 4096, 4, 4);
   }
 
   @Test(timeout = 300000)
@@ -490,6 +515,10 @@ public class TestPlacementProcessor {
         .map(x -> x.getNodeId()).collect(Collectors.toSet());
     // Ensure unique nodes
     Assert.assertEquals(4, nodeIds.size());
+
+    QueueMetrics metrics = rm.getResourceScheduler().getRootQueueMetrics();
+    // Verify Metrics
+    verifyMetrics(metrics, 15360, 19, 9216, 5, 5);
   }
 
   @Test(timeout = 300000)
@@ -557,6 +586,10 @@ public class TestPlacementProcessor {
     RejectedSchedulingRequest rej = rejectedReqs.get(0);
     Assert.assertEquals(RejectionReason.COULD_NOT_PLACE_ON_NODE,
         rej.getReason());
+
+    QueueMetrics metrics = rm.getResourceScheduler().getRootQueueMetrics();
+    // Verify Metrics
+    verifyMetrics(metrics, 11264, 11, 5120, 5, 5);
   }
 
   private static void waitForContainerAllocation(Collection<MockNM> nodes,
@@ -594,4 +627,16 @@ public class TestPlacementProcessor {
             ResourceSizing.newInstance(1, Resource.newInstance(mem, cores)))
         .build();
   }
+
+  private static void verifyMetrics(QueueMetrics metrics, long availableMB,
+      int availableVirtualCores, long allocatedMB,
+      int allocatedVirtualCores, int allocatedContainers) {
+    Assert.assertEquals(availableMB, metrics.getAvailableMB());
+    Assert.assertEquals(availableVirtualCores,
+        metrics.getAvailableVirtualCores());
+    Assert.assertEquals(allocatedMB, metrics.getAllocatedMB());
+    Assert.assertEquals(allocatedVirtualCores,
+        metrics.getAllocatedVirtualCores());
+    Assert.assertEquals(allocatedContainers, metrics.getAllocatedContainers());
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[04/32] hadoop git commit: YARN-6619. AMRMClient Changes to use the PlacementConstraint and SchcedulingRequest objects. (Arun Suresh via wangda)

Posted by as...@apache.org.
YARN-6619. AMRMClient Changes to use the PlacementConstraint and SchcedulingRequest objects. (Arun Suresh via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/29d9e4d5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/29d9e4d5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/29d9e4d5

Branch: refs/heads/trunk
Commit: 29d9e4d5814900d5c59d77fe05d32186d4ad9385
Parents: a5c1fc8
Author: Wangda Tan <wa...@apache.org>
Authored: Wed Jan 17 11:36:26 2018 -0800
Committer: Arun Suresh <as...@apache.org>
Committed: Wed Jan 31 01:30:17 2018 -0800

----------------------------------------------------------------------
 .../hadoop/yarn/client/api/AMRMClient.java      |  38 +++-
 .../yarn/client/api/async/AMRMClientAsync.java  |  48 +++++
 .../api/async/impl/AMRMClientAsyncImpl.java     |  49 ++++-
 .../yarn/client/api/impl/AMRMClientImpl.java    | 142 ++++++++++++-
 .../client/api/impl/BaseAMRMClientTest.java     | 212 +++++++++++++++++++
 .../yarn/client/api/impl/TestAMRMClient.java    | 156 +-------------
 .../TestAMRMClientPlacementConstraints.java     | 204 ++++++++++++++++++
 .../rmcontainer/RMContainerImpl.java            |   3 +
 .../scheduler/AbstractYarnScheduler.java        |   1 +
 .../scheduler/SchedulerApplicationAttempt.java  |   1 +
 .../constraint/PlacementConstraintsUtil.java    |   4 +-
 11 files changed, 700 insertions(+), 158 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/29d9e4d5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java
index d3d1974..914a146 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.yarn.client.api;
 
 import java.io.IOException;
 import java.util.Collection;
+import java.util.Map;
+import java.util.Set;
 import java.util.function.Supplier;
 import java.util.List;
 
@@ -39,7 +41,9 @@ import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.ProfileCapability;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
 import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
 import org.apache.hadoop.yarn.client.api.impl.AMRMClientImpl;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.util.resource.Resources;
@@ -554,6 +558,18 @@ public abstract class AMRMClient<T extends AMRMClient.ContainerRequest> extends
   }
 
   /**
+   * Add a Collection of SchedulingRequests. The AMRMClient will ensure that
+   * all requests in the same batch are sent in the same allocate call.
+   * @param schedulingRequests Collection of Scheduling Requests.
+   */
+  @Public
+  @InterfaceStability.Unstable
+  public void addSchedulingRequests(
+      Collection<SchedulingRequest> schedulingRequests) {
+
+  }
+
+  /**
    * Register the application master. This must be called before any 
    * other interaction
    * @param appHostName Name of the host on which master is running
@@ -568,7 +584,27 @@ public abstract class AMRMClient<T extends AMRMClient.ContainerRequest> extends
                                          int appHostPort,
                                          String appTrackingUrl) 
                throws YarnException, IOException;
-  
+
+  /**
+   * Register the application master. This must be called before any
+   * other interaction
+   * @param appHostName Name of the host on which master is running
+   * @param appHostPort Port master is listening on
+   * @param appTrackingUrl URL at which the master info can be seen
+   * @param placementConstraints Placement Constraints mappings.
+   * @return <code>RegisterApplicationMasterResponse</code>
+   * @throws YarnException
+   * @throws IOException
+   */
+  @Public
+  @InterfaceStability.Unstable
+  public RegisterApplicationMasterResponse registerApplicationMaster(
+      String appHostName, int appHostPort, String appTrackingUrl,
+      Map<Set<String>, PlacementConstraint> placementConstraints)
+      throws YarnException, IOException {
+    throw new YarnException("Not supported");
+  }
+
   /**
    * Request additional containers and receive new container allocations.
    * Requests made via <code>addContainerRequest</code> are sent to the

http://git-wip-us.apache.org/repos/asf/hadoop/blob/29d9e4d5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/AMRMClientAsync.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/AMRMClientAsync.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/AMRMClientAsync.java
index 2b82ad6..0af687b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/AMRMClientAsync.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/AMRMClientAsync.java
@@ -21,6 +21,8 @@ package org.apache.hadoop.yarn.client.api.async;
 import java.io.IOException;
 import java.util.Collection;
 import java.util.List;
+import java.util.Map;
+import java.util.Set;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.function.Supplier;
 
@@ -38,9 +40,12 @@ import org.apache.hadoop.yarn.api.records.ExecutionType;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.NodeReport;
 import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.RejectedSchedulingRequest;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
 import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
 import org.apache.hadoop.yarn.api.records.UpdatedContainer;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
 import org.apache.hadoop.yarn.client.api.AMRMClient;
 import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest;
 import org.apache.hadoop.yarn.client.api.TimelineV2Client;
@@ -206,6 +211,19 @@ extends AbstractService {
                                                    Resource capability);
 
   /**
+   * Add a Collection of SchedulingRequests. The AMRMClient will ensure that
+   * all requests in the same batch are sent in the same allocate call.
+   * @param schedulingRequests Collection of Scheduling Requests.
+   */
+  @Public
+  @Unstable
+  public void addSchedulingRequests(
+      Collection<SchedulingRequest> schedulingRequests) {
+
+  }
+
+
+  /**
    * Returns all matching ContainerRequests that match the given Priority,
    * ResourceName, ExecutionType and Capability.
    *
@@ -250,6 +268,26 @@ extends AbstractService {
       throws YarnException, IOException;
 
   /**
+   * Register the application master. This must be called before any
+   * other interaction
+   * @param appHostName Name of the host on which master is running
+   * @param appHostPort Port master is listening on
+   * @param appTrackingUrl URL at which the master info can be seen
+   * @param placementConstraints Placement Constraints mappings.
+   * @return <code>RegisterApplicationMasterResponse</code>
+   * @throws YarnException
+   * @throws IOException
+   */
+  @Public
+  @Unstable
+  public RegisterApplicationMasterResponse registerApplicationMaster(
+      String appHostName, int appHostPort, String appTrackingUrl,
+      Map<Set<String>, PlacementConstraint> placementConstraints)
+      throws YarnException, IOException {
+    throw new YarnException("Not supported");
+  }
+
+  /**
    * Unregister the application master. This must be called in the end.
    * @param appStatus Success/Failure status of the master
    * @param appMessage Diagnostics message on failure
@@ -494,6 +532,16 @@ extends AbstractService {
     public void onContainersReceivedFromPreviousAttempts(
         List<Container> containers) {
     }
+
+    /**
+     * Called when the RM has rejected Scheduling Requests.
+     * @param rejectedSchedulingRequests Rejected Scheduling Requests.
+     */
+    @Public
+    @Unstable
+    public void onRequestsRejected(
+        List<RejectedSchedulingRequest> rejectedSchedulingRequests) {
+    }
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/29d9e4d5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/AMRMClientAsyncImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/AMRMClientAsyncImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/AMRMClientAsyncImpl.java
index 33b0aba..4f04b66 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/AMRMClientAsyncImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/AMRMClientAsyncImpl.java
@@ -22,6 +22,8 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
+import java.util.Map;
+import java.util.Set;
 import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.LinkedBlockingQueue;
 
@@ -36,9 +38,12 @@ import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.NodeReport;
 import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.RejectedSchedulingRequest;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
 import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
 import org.apache.hadoop.yarn.api.records.UpdatedContainer;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
 import org.apache.hadoop.yarn.client.api.AMRMClient;
 import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest;
 import org.apache.hadoop.yarn.client.api.TimelineV2Client;
@@ -150,18 +155,50 @@ extends AMRMClientAsync<T> {
                                                    Resource capability) {
     return client.getMatchingRequests(priority, resourceName, capability);
   }
-  
+
+  @Override
+  public void addSchedulingRequests(
+      Collection<SchedulingRequest> schedulingRequests) {
+    client.addSchedulingRequests(schedulingRequests);
+  }
+
   /**
    * Registers this application master with the resource manager. On successful
    * registration, starts the heartbeating thread.
+   *
+   * @param appHostName Name of the host on which master is running
+   * @param appHostPort Port master is listening on
+   * @param appTrackingUrl URL at which the master info can be seen
+   * @return Register AM Response.
    * @throws YarnException
    * @throws IOException
    */
   public RegisterApplicationMasterResponse registerApplicationMaster(
       String appHostName, int appHostPort, String appTrackingUrl)
       throws YarnException, IOException {
+    return registerApplicationMaster(
+        appHostName, appHostPort, appTrackingUrl, null);
+  }
+
+  /**
+   * Registers this application master with the resource manager. On successful
+   * registration, starts the heartbeating thread.
+   *
+   * @param appHostName Name of the host on which master is running
+   * @param appHostPort Port master is listening on
+   * @param appTrackingUrl URL at which the master info can be seen
+   * @param placementConstraintsMap Placement Constraints Mapping.
+   * @return Register AM Response.
+   * @throws YarnException
+   * @throws IOException
+   */
+  public RegisterApplicationMasterResponse registerApplicationMaster(
+      String appHostName, int appHostPort, String appTrackingUrl,
+      Map<Set<String>, PlacementConstraint> placementConstraintsMap)
+      throws YarnException, IOException {
     RegisterApplicationMasterResponse response = client
-        .registerApplicationMaster(appHostName, appHostPort, appTrackingUrl);
+        .registerApplicationMaster(appHostName, appHostPort,
+            appTrackingUrl, placementConstraintsMap);
     heartbeatThread.start();
     return response;
   }
@@ -366,6 +403,14 @@ extends AMRMClientAsync<T> {
                       response.getContainersFromPreviousAttempts());
             }
           }
+          List<RejectedSchedulingRequest> rejectedSchedulingRequests =
+              response.getRejectedSchedulingRequests();
+          if (!rejectedSchedulingRequests.isEmpty()) {
+            if (handler instanceof AMRMClientAsync.AbstractCallbackHandler) {
+              ((AMRMClientAsync.AbstractCallbackHandler) handler)
+                  .onRequestsRejected(rejectedSchedulingRequests);
+            }
+          }
           progress = handler.getProgress();
         } catch (Throwable ex) {
           handler.onError(ex);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/29d9e4d5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java
index 5507c07..8e2336f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java
@@ -30,9 +30,11 @@ import java.util.LinkedHashSet;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
+import java.util.Queue;
 import java.util.Set;
 import java.util.TreeSet;
 import java.util.AbstractMap.SimpleEntry;
+import java.util.concurrent.ConcurrentHashMap;
 
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
@@ -60,9 +62,11 @@ import org.apache.hadoop.yarn.api.records.ProfileCapability;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
 import org.apache.hadoop.yarn.api.records.Token;
 import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
 import org.apache.hadoop.yarn.api.records.UpdatedContainer;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
 import org.apache.hadoop.yarn.client.ClientRMProxy;
 import org.apache.hadoop.yarn.client.api.AMRMClient;
 import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest;
@@ -106,6 +110,12 @@ public class AMRMClientImpl<T extends ContainerRequest> extends AMRMClient<T> {
   protected final Set<String> blacklistedNodes = new HashSet<String>();
   protected final Set<String> blacklistAdditions = new HashSet<String>();
   protected final Set<String> blacklistRemovals = new HashSet<String>();
+  private Map<Set<String>, PlacementConstraint> placementConstraints =
+      new HashMap<>();
+  private Queue<Collection<SchedulingRequest>> batchedSchedulingRequests =
+      new LinkedList<>();
+  private Map<Set<String>, List<SchedulingRequest>> outstandingSchedRequests =
+      new ConcurrentHashMap<>();
 
   protected Map<String, Resource> resourceProfilesMap;
   
@@ -218,14 +228,26 @@ public class AMRMClientImpl<T extends ContainerRequest> extends AMRMClient<T> {
     }
     super.serviceStop();
   }
-  
+
   @Override
   public RegisterApplicationMasterResponse registerApplicationMaster(
       String appHostName, int appHostPort, String appTrackingUrl)
       throws YarnException, IOException {
+    return registerApplicationMaster(appHostName, appHostPort, appTrackingUrl,
+        null);
+  }
+
+  @Override
+  public RegisterApplicationMasterResponse registerApplicationMaster(
+      String appHostName, int appHostPort, String appTrackingUrl,
+      Map<Set<String>, PlacementConstraint> placementConstraintsMap)
+      throws YarnException, IOException {
     this.appHostName = appHostName;
     this.appHostPort = appHostPort;
     this.appTrackingUrl = appTrackingUrl;
+    if (placementConstraintsMap != null && !placementConstraintsMap.isEmpty()) {
+      this.placementConstraints.putAll(placementConstraintsMap);
+    }
     Preconditions.checkArgument(appHostName != null,
         "The host name should not be null");
     Preconditions.checkArgument(appHostPort >= -1, "Port number of the host"
@@ -240,6 +262,9 @@ public class AMRMClientImpl<T extends ContainerRequest> extends AMRMClient<T> {
     RegisterApplicationMasterRequest request =
         RegisterApplicationMasterRequest.newInstance(this.appHostName,
             this.appHostPort, this.appTrackingUrl);
+    if (!this.placementConstraints.isEmpty()) {
+      request.setPlacementConstraints(this.placementConstraints);
+    }
     RegisterApplicationMasterResponse response =
         rmClient.registerApplicationMaster(request);
     synchronized (this) {
@@ -248,11 +273,23 @@ public class AMRMClientImpl<T extends ContainerRequest> extends AMRMClient<T> {
         populateNMTokens(response.getNMTokensFromPreviousAttempts());
       }
       this.resourceProfilesMap = response.getResourceProfiles();
+      List<Container> prevContainers =
+          response.getContainersFromPreviousAttempts();
+      removeFromOutstandingSchedulingRequests(prevContainers);
+      recreateSchedulingRequestBatch();
     }
     return response;
   }
 
   @Override
+  public void addSchedulingRequests(
+      Collection<SchedulingRequest> schedulingRequests) {
+    synchronized (this.batchedSchedulingRequests) {
+      this.batchedSchedulingRequests.add(schedulingRequests);
+    }
+  }
+
+  @Override
   public AllocateResponse allocate(float progressIndicator) 
       throws YarnException, IOException {
     Preconditions.checkArgument(progressIndicator >= 0,
@@ -288,6 +325,7 @@ public class AMRMClientImpl<T extends ContainerRequest> extends AMRMClient<T> {
             .responseId(lastResponseId).progress(progressIndicator)
             .askList(askList).resourceBlacklistRequest(blacklistRequest)
             .releaseList(releaseList).updateRequests(updateList).build();
+        populateSchedulingRequests(allocateRequest);
         // clear blacklistAdditions and blacklistRemovals before
         // unsynchronized part
         blacklistAdditions.clear();
@@ -296,6 +334,10 @@ public class AMRMClientImpl<T extends ContainerRequest> extends AMRMClient<T> {
 
       try {
         allocateResponse = rmClient.allocate(allocateRequest);
+        removeFromOutstandingSchedulingRequests(
+            allocateResponse.getAllocatedContainers());
+        removeFromOutstandingSchedulingRequests(
+            allocateResponse.getContainersFromPreviousAttempts());
       } catch (ApplicationMasterNotRegisteredException e) {
         LOG.warn("ApplicationMaster is out of sync with ResourceManager,"
             + " hence resyncing.");
@@ -397,6 +439,104 @@ public class AMRMClientImpl<T extends ContainerRequest> extends AMRMClient<T> {
     return allocateResponse;
   }
 
+  private void populateSchedulingRequests(AllocateRequest allocateRequest) {
+    synchronized (this.batchedSchedulingRequests) {
+      if (!this.batchedSchedulingRequests.isEmpty()) {
+        List<SchedulingRequest> newReqs = new LinkedList<>();
+        Iterator<Collection<SchedulingRequest>> iter =
+            this.batchedSchedulingRequests.iterator();
+        while (iter.hasNext()) {
+          Collection<SchedulingRequest> requests = iter.next();
+          newReqs.addAll(requests);
+          addToOutstandingSchedulingRequests(requests);
+          iter.remove();
+        }
+        allocateRequest.setSchedulingRequests(newReqs);
+      }
+    }
+  }
+
+  private void recreateSchedulingRequestBatch() {
+    List<SchedulingRequest> batched = new ArrayList<>();
+    synchronized (this.outstandingSchedRequests) {
+      for (List<SchedulingRequest> schedReqs :
+          this.outstandingSchedRequests.values()) {
+        batched.addAll(schedReqs);
+      }
+    }
+    synchronized (this.batchedSchedulingRequests) {
+      this.batchedSchedulingRequests.add(batched);
+    }
+  }
+
+  private void addToOutstandingSchedulingRequests(
+      Collection<SchedulingRequest> requests) {
+    for (SchedulingRequest req : requests) {
+      List<SchedulingRequest> schedulingRequests =
+          this.outstandingSchedRequests.computeIfAbsent(
+              req.getAllocationTags(), x -> new LinkedList<>());
+      SchedulingRequest matchingReq = null;
+      synchronized (schedulingRequests) {
+        for (SchedulingRequest schedReq : schedulingRequests) {
+          if (isMatching(req, schedReq)) {
+            matchingReq = schedReq;
+            break;
+          }
+        }
+        if (matchingReq != null) {
+          matchingReq.getResourceSizing().setNumAllocations(
+              req.getResourceSizing().getNumAllocations());
+        } else {
+          schedulingRequests.add(req);
+        }
+      }
+    }
+  }
+
+  private boolean isMatching(SchedulingRequest schedReq1,
+      SchedulingRequest schedReq2) {
+    return schedReq1.getPriority().equals(schedReq2.getPriority()) &&
+        schedReq1.getExecutionType().getExecutionType().equals(
+            schedReq1.getExecutionType().getExecutionType()) &&
+        schedReq1.getAllocationRequestId() ==
+            schedReq2.getAllocationRequestId();
+  }
+
+  private void removeFromOutstandingSchedulingRequests(
+      Collection<Container> containers) {
+    if (containers == null || containers.isEmpty()) {
+      return;
+    }
+    for (Container container : containers) {
+      if (container.getAllocationTags() != null &&
+          !container.getAllocationTags().isEmpty()) {
+        List<SchedulingRequest> schedReqs =
+            this.outstandingSchedRequests.get(container.getAllocationTags());
+        if (schedReqs != null && !schedReqs.isEmpty()) {
+          synchronized (schedReqs) {
+            Iterator<SchedulingRequest> iter = schedReqs.iterator();
+            while (iter.hasNext()) {
+              SchedulingRequest schedReq = iter.next();
+              if (schedReq.getPriority().equals(container.getPriority()) &&
+                  schedReq.getAllocationRequestId() ==
+                      container.getAllocationRequestId()) {
+                int numAllocations =
+                    schedReq.getResourceSizing().getNumAllocations();
+                numAllocations--;
+                if (numAllocations == 0) {
+                  iter.remove();
+                } else {
+                  schedReq.getResourceSizing()
+                      .setNumAllocations(numAllocations);
+                }
+              }
+            }
+          }
+        }
+      }
+    }
+  }
+
   private List<UpdateContainerRequest> createUpdateList() {
     List<UpdateContainerRequest> updateList = new ArrayList<>();
     for (Map.Entry<ContainerId, SimpleEntry<Container,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/29d9e4d5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/BaseAMRMClientTest.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/BaseAMRMClientTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/BaseAMRMClientTest.java
new file mode 100644
index 0000000..d18652f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/BaseAMRMClientTest.java
@@ -0,0 +1,212 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.client.api.impl;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.service.Service;
+import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest;
+import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
+import org.apache.hadoop.yarn.api.records.LocalResource;
+import org.apache.hadoop.yarn.api.records.NodeReport;
+import org.apache.hadoop.yarn.api.records.NodeState;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.YarnApplicationState;
+import org.apache.hadoop.yarn.client.ClientRMProxy;
+import org.apache.hadoop.yarn.client.api.YarnClient;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.server.MiniYARNCluster;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
+import org.apache.hadoop.yarn.server.utils.BuilderUtils;
+import org.apache.hadoop.yarn.util.Records;
+import org.junit.After;
+import org.junit.Before;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Base class for testing AMRMClient.
+ */
+public class BaseAMRMClientTest {
+
+  protected Configuration conf = null;
+  protected MiniYARNCluster yarnCluster = null;
+  protected YarnClient yarnClient = null;
+  protected List<NodeReport> nodeReports = null;
+  protected ApplicationAttemptId attemptId = null;
+
+  protected String schedulerName = CapacityScheduler.class.getName();
+  protected boolean autoUpdate = false;
+
+  protected int nodeCount = 3;
+  protected long amExpireMs = 4000;
+  protected int rollingIntervalSec = 13;
+
+
+  protected Resource capability;
+  protected Priority priority;
+  protected Priority priority2;
+  protected String node;
+  protected String rack;
+  protected String[] nodes;
+  protected String[] racks;
+
+  @Before
+  public void setup() throws Exception {
+    conf = new YarnConfiguration();
+    createClusterAndStartApplication(conf);
+  }
+
+  protected void createClusterAndStartApplication(Configuration conf)
+      throws Exception {
+    // start minicluster
+    this.conf = conf;
+    if (autoUpdate) {
+      conf.setBoolean(YarnConfiguration.RM_AUTO_UPDATE_CONTAINERS, true);
+    }
+    conf.set(YarnConfiguration.RM_SCHEDULER, schedulerName);
+    conf.setLong(
+        YarnConfiguration.RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS,
+        rollingIntervalSec);
+    conf.setLong(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS, amExpireMs);
+    conf.setInt(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS, 100);
+    // set the minimum allocation so that resource decrease can go under 1024
+    conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 512);
+    conf.setLong(YarnConfiguration.NM_LOG_RETAIN_SECONDS, 1);
+    conf.setBoolean(
+        YarnConfiguration.OPPORTUNISTIC_CONTAINER_ALLOCATION_ENABLED, true);
+    conf.setInt(
+        YarnConfiguration.NM_OPPORTUNISTIC_CONTAINERS_MAX_QUEUE_LENGTH, 10);
+    yarnCluster = new MiniYARNCluster(
+        TestAMRMClient.class.getName(), nodeCount, 1, 1);
+    yarnCluster.init(conf);
+    yarnCluster.start();
+
+    // start rm client
+    yarnClient = YarnClient.createYarnClient();
+    yarnClient.init(conf);
+    yarnClient.start();
+
+    // get node info
+    assertTrue("All node managers did not connect to the RM within the "
+            + "allotted 5-second timeout",
+        yarnCluster.waitForNodeManagersToConnect(5000L));
+    nodeReports = yarnClient.getNodeReports(NodeState.RUNNING);
+    assertEquals("Not all node managers were reported running",
+        nodeCount, nodeReports.size());
+
+    priority = Priority.newInstance(1);
+    priority2 = Priority.newInstance(2);
+    capability = Resource.newInstance(1024, 1);
+
+    node = nodeReports.get(0).getNodeId().getHost();
+    rack = nodeReports.get(0).getRackName();
+    nodes = new String[]{ node };
+    racks = new String[]{ rack };
+
+    // submit new app
+    ApplicationSubmissionContext appContext =
+        yarnClient.createApplication().getApplicationSubmissionContext();
+    ApplicationId appId = appContext.getApplicationId();
+    // set the application name
+    appContext.setApplicationName("Test");
+    // Set the priority for the application master
+    Priority pri = Records.newRecord(Priority.class);
+    pri.setPriority(0);
+    appContext.setPriority(pri);
+    // Set the queue to which this application is to be submitted in the RM
+    appContext.setQueue("default");
+    // Set up the container launch context for the application master
+    ContainerLaunchContext amContainer =
+        BuilderUtils.newContainerLaunchContext(
+            Collections.<String, LocalResource> emptyMap(),
+            new HashMap<String, String>(), Arrays.asList("sleep", "100"),
+            new HashMap<String, ByteBuffer>(), null,
+            new HashMap<ApplicationAccessType, String>());
+    appContext.setAMContainerSpec(amContainer);
+    appContext.setResource(Resource.newInstance(1024, 1));
+    // Create the request to send to the applications manager
+    SubmitApplicationRequest appRequest = Records
+        .newRecord(SubmitApplicationRequest.class);
+    appRequest.setApplicationSubmissionContext(appContext);
+    // Submit the application to the applications manager
+    yarnClient.submitApplication(appContext);
+
+    // wait for app to start
+    RMAppAttempt appAttempt = null;
+    while (true) {
+      ApplicationReport appReport = yarnClient.getApplicationReport(appId);
+      if (appReport.getYarnApplicationState() ==
+          YarnApplicationState.ACCEPTED) {
+        attemptId = appReport.getCurrentApplicationAttemptId();
+        appAttempt =
+            yarnCluster.getResourceManager().getRMContext().getRMApps()
+                .get(attemptId.getApplicationId()).getCurrentAppAttempt();
+        while (true) {
+          if (appAttempt.getAppAttemptState() == RMAppAttemptState.LAUNCHED) {
+            break;
+          }
+        }
+        break;
+      }
+    }
+    // Just dig into the ResourceManager and get the AMRMToken just for the sake
+    // of testing.
+    UserGroupInformation.setLoginUser(UserGroupInformation
+        .createRemoteUser(UserGroupInformation.getCurrentUser().getUserName()));
+
+    // emulate RM setup of AMRM token in credentials by adding the token
+    // *before* setting the token service
+    UserGroupInformation.getCurrentUser().addToken(appAttempt.getAMRMToken());
+    appAttempt.getAMRMToken().setService(
+        ClientRMProxy.getAMRMTokenService(conf));
+  }
+
+  @After
+  public void teardown() throws YarnException, IOException {
+    yarnClient.killApplication(attemptId.getApplicationId());
+    attemptId = null;
+
+    if (yarnClient != null &&
+        yarnClient.getServiceState() == Service.STATE.STARTED) {
+      yarnClient.stop();
+    }
+    if (yarnCluster != null &&
+        yarnCluster.getServiceState() == Service.STATE.STARTED) {
+      yarnCluster.stop();
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/29d9e4d5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
index 3ecc5cd..b059118 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
@@ -43,7 +43,6 @@ import java.util.Map;
 import java.util.Set;
 import java.util.TreeSet;
 
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.io.Text;
@@ -56,24 +55,18 @@ import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest;
 import org.apache.hadoop.yarn.api.records.*;
-import org.apache.hadoop.yarn.client.ClientRMProxy;
 import org.apache.hadoop.yarn.client.api.AMRMClient;
 import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest;
 import org.apache.hadoop.yarn.client.api.InvalidContainerRequestException;
 import org.apache.hadoop.yarn.client.api.NMClient;
 import org.apache.hadoop.yarn.client.api.NMTokenCache;
-import org.apache.hadoop.yarn.client.api.YarnClient;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.ipc.YarnRPC;
 import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
-import org.apache.hadoop.yarn.server.MiniYARNCluster;
 import org.apache.hadoop.yarn.server.nodemanager.NodeManager;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
-import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
-import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent;
@@ -81,10 +74,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairSchedule
 import org.apache.hadoop.yarn.server.resourcemanager.security.AMRMTokenSecretManager;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.apache.hadoop.yarn.util.Records;
-import org.junit.After;
 import org.junit.Assert;
 import org.junit.Assume;
-import org.junit.Before;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
@@ -97,26 +88,8 @@ import org.eclipse.jetty.util.log.Log;
  * Test application master client class to resource manager.
  */
 @RunWith(value = Parameterized.class)
-public class TestAMRMClient {
-  private String schedulerName = null;
-  private boolean autoUpdate = false;
-  private Configuration conf = null;
-  private MiniYARNCluster yarnCluster = null;
-  private YarnClient yarnClient = null;
-  private List<NodeReport> nodeReports = null;
-  private ApplicationAttemptId attemptId = null;
-  private int nodeCount = 3;
-  
-  static final int rolling_interval_sec = 13;
-  static final long am_expire_ms = 4000;
-
-  private Resource capability;
-  private Priority priority;
-  private Priority priority2;
-  private String node;
-  private String rack;
-  private String[] nodes;
-  private String[] racks;
+public class TestAMRMClient extends BaseAMRMClientTest{
+
   private final static int DEFAULT_ITERATION = 3;
 
   public TestAMRMClient(String schedulerName, boolean autoUpdate) {
@@ -134,127 +107,6 @@ public class TestAMRMClient {
     });
   }
 
-  @Before
-  public void setup() throws Exception {
-    conf = new YarnConfiguration();
-    createClusterAndStartApplication(conf);
-  }
-
-  private void createClusterAndStartApplication(Configuration conf)
-      throws Exception {
-    // start minicluster
-    this.conf = conf;
-    if (autoUpdate) {
-      conf.setBoolean(YarnConfiguration.RM_AUTO_UPDATE_CONTAINERS, true);
-    }
-    conf.set(YarnConfiguration.RM_SCHEDULER, schedulerName);
-    conf.setLong(
-      YarnConfiguration.RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS,
-      rolling_interval_sec);
-    conf.setLong(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS, am_expire_ms);
-    conf.setInt(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS, 100);
-    // set the minimum allocation so that resource decrease can go under 1024
-    conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 512);
-    conf.setLong(YarnConfiguration.NM_LOG_RETAIN_SECONDS, 1);
-    conf.setBoolean(
-        YarnConfiguration.OPPORTUNISTIC_CONTAINER_ALLOCATION_ENABLED, true);
-    conf.setInt(
-        YarnConfiguration.NM_OPPORTUNISTIC_CONTAINERS_MAX_QUEUE_LENGTH, 10);
-    yarnCluster = new MiniYARNCluster(TestAMRMClient.class.getName(), nodeCount, 1, 1);
-    yarnCluster.init(conf);
-    yarnCluster.start();
-
-    // start rm client
-    yarnClient = YarnClient.createYarnClient();
-    yarnClient.init(conf);
-    yarnClient.start();
-
-    // get node info
-    assertTrue("All node managers did not connect to the RM within the "
-        + "allotted 5-second timeout",
-        yarnCluster.waitForNodeManagersToConnect(5000L));
-    nodeReports = yarnClient.getNodeReports(NodeState.RUNNING);
-    assertEquals("Not all node managers were reported running",
-        nodeCount, nodeReports.size());
-
-    priority = Priority.newInstance(1);
-    priority2 = Priority.newInstance(2);
-    capability = Resource.newInstance(1024, 1);
-
-    node = nodeReports.get(0).getNodeId().getHost();
-    rack = nodeReports.get(0).getRackName();
-    nodes = new String[]{ node };
-    racks = new String[]{ rack };
-
-    // submit new app
-    ApplicationSubmissionContext appContext = 
-        yarnClient.createApplication().getApplicationSubmissionContext();
-    ApplicationId appId = appContext.getApplicationId();
-    // set the application name
-    appContext.setApplicationName("Test");
-    // Set the priority for the application master
-    Priority pri = Records.newRecord(Priority.class);
-    pri.setPriority(0);
-    appContext.setPriority(pri);
-    // Set the queue to which this application is to be submitted in the RM
-    appContext.setQueue("default");
-    // Set up the container launch context for the application master
-    ContainerLaunchContext amContainer =
-        BuilderUtils.newContainerLaunchContext(
-          Collections.<String, LocalResource> emptyMap(),
-          new HashMap<String, String>(), Arrays.asList("sleep", "100"),
-          new HashMap<String, ByteBuffer>(), null,
-          new HashMap<ApplicationAccessType, String>());
-    appContext.setAMContainerSpec(amContainer);
-    appContext.setResource(Resource.newInstance(1024, 1));
-    // Create the request to send to the applications manager
-    SubmitApplicationRequest appRequest = Records
-        .newRecord(SubmitApplicationRequest.class);
-    appRequest.setApplicationSubmissionContext(appContext);
-    // Submit the application to the applications manager
-    yarnClient.submitApplication(appContext);
-
-    // wait for app to start
-    RMAppAttempt appAttempt = null;
-    while (true) {
-      ApplicationReport appReport = yarnClient.getApplicationReport(appId);
-      if (appReport.getYarnApplicationState() == YarnApplicationState.ACCEPTED) {
-        attemptId = appReport.getCurrentApplicationAttemptId();
-        appAttempt =
-            yarnCluster.getResourceManager().getRMContext().getRMApps()
-              .get(attemptId.getApplicationId()).getCurrentAppAttempt();
-        while (true) {
-          if (appAttempt.getAppAttemptState() == RMAppAttemptState.LAUNCHED) {
-            break;
-          }
-        }
-        break;
-      }
-    }
-    // Just dig into the ResourceManager and get the AMRMToken just for the sake
-    // of testing.
-    UserGroupInformation.setLoginUser(UserGroupInformation
-      .createRemoteUser(UserGroupInformation.getCurrentUser().getUserName()));
-
-    // emulate RM setup of AMRM token in credentials by adding the token
-    // *before* setting the token service
-    UserGroupInformation.getCurrentUser().addToken(appAttempt.getAMRMToken());
-    appAttempt.getAMRMToken().setService(ClientRMProxy.getAMRMTokenService(conf));
-  }
-  
-  @After
-  public void teardown() throws YarnException, IOException {
-    yarnClient.killApplication(attemptId.getApplicationId());
-    attemptId = null;
-
-    if (yarnClient != null && yarnClient.getServiceState() == STATE.STARTED) {
-      yarnClient.stop();
-    }
-    if (yarnCluster != null && yarnCluster.getServiceState() == STATE.STARTED) {
-      yarnCluster.stop();
-    }
-  }
-
   @Test (timeout = 60000)
   public void testAMRMClientNoMatchingRequests()
       throws IOException, YarnException {
@@ -905,7 +757,7 @@ public class TestAMRMClient {
     initAMRMClientAndTest(false);
   }
 
-  private void initAMRMClientAndTest(boolean useAllocReqId)
+  protected void initAMRMClientAndTest(boolean useAllocReqId)
       throws YarnException, IOException {
     AMRMClient<ContainerRequest> amClient = null;
     try {
@@ -1946,7 +1798,7 @@ public class TestAMRMClient {
       // Wait for enough time and make sure the roll_over happens
       // At mean time, the old AMRMToken should continue to work
       while (System.currentTimeMillis() - startTime <
-          rolling_interval_sec * 1000) {
+          rollingIntervalSec * 1000) {
         amClient.allocate(0.1f);
         sleep(1000);
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/29d9e4d5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientPlacementConstraints.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientPlacementConstraints.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientPlacementConstraints.java
new file mode 100644
index 0000000..fdc8d58
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientPlacementConstraints.java
@@ -0,0 +1,204 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.client.api.impl;
+
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.ExecutionType;
+import org.apache.hadoop.yarn.api.records.ExecutionTypeRequest;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.NodeReport;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.RejectedSchedulingRequest;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceSizing;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
+import org.apache.hadoop.yarn.api.records.UpdatedContainer;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraints;
+import org.apache.hadoop.yarn.client.api.AMRMClient;
+import org.apache.hadoop.yarn.client.api.NMTokenCache;
+import org.apache.hadoop.yarn.client.api.async.AMRMClientAsync;
+import org.apache.hadoop.yarn.client.api.async.impl.AMRMClientAsyncImpl;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+import static java.lang.Thread.sleep;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.NODE;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.PlacementTargets.allocationTag;
+
+/**
+ * Test Placement Constraints and Scheduling Requests.
+ */
+public class TestAMRMClientPlacementConstraints extends BaseAMRMClientTest {
+
+  @Test(timeout=60000)
+  public void testAMRMClientWithPlacementConstraints()
+      throws Exception {
+    // we have to create a new instance of MiniYARNCluster to avoid SASL qop
+    // mismatches between client and server
+    teardown();
+    conf = new YarnConfiguration();
+    conf.setBoolean(YarnConfiguration.RM_PLACEMENT_CONSTRAINTS_ENABLED, true);
+    createClusterAndStartApplication(conf);
+
+    AMRMClient<AMRMClient.ContainerRequest> amClient =
+        AMRMClient.<AMRMClient.ContainerRequest>createAMRMClient();
+    amClient.setNMTokenCache(new NMTokenCache());
+    //asserting we are not using the singleton instance cache
+    Assert.assertNotSame(NMTokenCache.getSingleton(),
+        amClient.getNMTokenCache());
+
+    final List<Container> allocatedContainers = new ArrayList<>();
+    final List<RejectedSchedulingRequest> rejectedSchedulingRequests =
+        new ArrayList<>();
+    AMRMClientAsync asyncClient = new AMRMClientAsyncImpl<>(amClient, 1000,
+        new AMRMClientAsync.AbstractCallbackHandler() {
+          @Override
+          public void onContainersAllocated(List<Container> containers) {
+            allocatedContainers.addAll(containers);
+          }
+
+          @Override
+          public void onRequestsRejected(
+              List<RejectedSchedulingRequest> rejReqs) {
+            rejectedSchedulingRequests.addAll(rejReqs);
+          }
+
+          @Override
+          public void onContainersCompleted(List<ContainerStatus> statuses) {}
+          @Override
+          public void onContainersUpdated(List<UpdatedContainer> containers) {}
+          @Override
+          public void onShutdownRequest() {}
+          @Override
+          public void onNodesUpdated(List<NodeReport> updatedNodes) {}
+          @Override
+          public void onError(Throwable e) {}
+
+          @Override
+          public float getProgress() {
+            return 0.1f;
+          }
+        });
+
+    asyncClient.init(conf);
+    asyncClient.start();
+    Map<Set<String>, PlacementConstraint> pcMapping = new HashMap<>();
+    pcMapping.put(Collections.singleton("foo"),
+        PlacementConstraints.build(
+            PlacementConstraints.targetNotIn(NODE, allocationTag("foo"))));
+    pcMapping.put(Collections.singleton("bar"),
+        PlacementConstraints.build(
+            PlacementConstraints.targetNotIn(NODE, allocationTag("bar"))));
+    asyncClient.registerApplicationMaster("Host", 10000, "", pcMapping);
+
+    // Send two types of requests - 4 with source tag "foo" have numAlloc = 1
+    // and 1 with source tag "bar" and has numAlloc = 4. Both should be
+    // handled similarly. i.e: Since there are only 3 nodes,
+    // 2 schedulingRequests - 1 with source tag "foo" on one with source
+    // tag "bar" should get rejected.
+    asyncClient.addSchedulingRequests(
+        Arrays.asList(
+            // 4 reqs with numAlloc = 1
+            schedulingRequest(1, 1, 1, 1, 512, "foo"),
+            schedulingRequest(1, 1, 2, 1, 512, "foo"),
+            schedulingRequest(1, 1, 3, 1, 512, "foo"),
+            schedulingRequest(1, 1, 4, 1, 512, "foo"),
+            // 1 req with numAlloc = 4
+            schedulingRequest(4, 1, 5, 1, 512, "bar")));
+
+    // kick the scheduler
+    waitForContainerAllocation(allocatedContainers,
+        rejectedSchedulingRequests, 6, 2);
+
+    Assert.assertEquals(6, allocatedContainers.size());
+    Map<NodeId, List<Container>> containersPerNode =
+        allocatedContainers.stream().collect(
+            Collectors.groupingBy(Container::getNodeId));
+
+    // Ensure 2 containers allocated per node.
+    // Each node should have a "foo" and a "bar" container.
+    Assert.assertEquals(3, containersPerNode.entrySet().size());
+    HashSet<String> srcTags = new HashSet<>(Arrays.asList("foo", "bar"));
+    containersPerNode.entrySet().forEach(
+        x ->
+          Assert.assertEquals(
+              srcTags,
+              x.getValue()
+                  .stream()
+                  .map(y -> y.getAllocationTags().iterator().next())
+                  .collect(Collectors.toSet()))
+    );
+
+    // Ensure 2 rejected requests - 1 of "foo" and 1 of "bar"
+    Assert.assertEquals(2, rejectedSchedulingRequests.size());
+    Assert.assertEquals(srcTags,
+        rejectedSchedulingRequests
+            .stream()
+            .map(x -> x.getRequest().getAllocationTags().iterator().next())
+            .collect(Collectors.toSet()));
+
+    asyncClient.stop();
+  }
+
+  private static void waitForContainerAllocation(
+      List<Container> allocatedContainers,
+      List<RejectedSchedulingRequest> rejectedRequests,
+      int containerNum, int rejNum) throws Exception {
+
+    int maxCount = 10;
+    while (maxCount >= 0 &&
+        (allocatedContainers.size() < containerNum ||
+            rejectedRequests.size() < rejNum)) {
+      maxCount--;
+      sleep(1000);
+    }
+  }
+
+  private static SchedulingRequest schedulingRequest(int numAllocations,
+      int priority, long allocReqId, int cores, int mem, String... tags) {
+    return schedulingRequest(numAllocations, priority, allocReqId, cores, mem,
+        ExecutionType.GUARANTEED, tags);
+  }
+
+  private static SchedulingRequest schedulingRequest(int numAllocations,
+      int priority, long allocReqId, int cores, int mem,
+      ExecutionType execType, String... tags) {
+    return SchedulingRequest.newBuilder()
+        .priority(Priority.newInstance(priority))
+        .allocationRequestId(allocReqId)
+        .allocationTags(new HashSet<>(Arrays.asList(tags)))
+        .executionType(ExecutionTypeRequest.newInstance(execType, true))
+        .resourceSizing(
+            ResourceSizing.newInstance(numAllocations,
+                Resource.newInstance(mem, cores)))
+        .build();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/29d9e4d5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
index 563df0d..a504221 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
@@ -262,6 +262,9 @@ public class RMContainerImpl implements RMContainer {
       rmContext.getSystemMetricsPublisher().containerCreated(
           this, this.creationTime);
     }
+    if (this.container != null) {
+      this.allocationTags = this.container.getAllocationTags();
+    }
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/29d9e4d5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
index 213d784..72376df 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
@@ -589,6 +589,7 @@ public abstract class AbstractYarnScheduler
     container.setVersion(status.getVersion());
     container.setExecutionType(status.getExecutionType());
     container.setAllocationRequestId(status.getAllocationRequestId());
+    container.setAllocationTags(status.getAllocationTags());
     ApplicationAttemptId attemptId =
         container.getId().getApplicationAttemptId();
     RMContainer rmContainer = new RMContainerImpl(container,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/29d9e4d5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
index 88a9049..3930a35 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
@@ -672,6 +672,7 @@ public class SchedulerApplicationAttempt implements SchedulableEntity {
               containerType, container.getExecutionType(),
               container.getAllocationRequestId(),
               rmContainer.getAllocationTags()));
+      container.setAllocationTags(rmContainer.getAllocationTags());
       updateNMToken(container);
     } catch (IllegalArgumentException e) {
       // DNS might be down, skip returning this container.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/29d9e4d5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java
index 956a3c9..c4b82e8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java
@@ -64,12 +64,12 @@ public final class PlacementConstraintsUtil {
       throws InvalidAllocationTagsQueryException {
     long minScopeCardinality = 0;
     long maxScopeCardinality = 0;
-    if (sc.getScope() == PlacementConstraints.NODE) {
+    if (sc.getScope().equals(PlacementConstraints.NODE)) {
       minScopeCardinality = tm.getNodeCardinalityByOp(node.getNodeID(), appId,
           te.getTargetValues(), Long::max);
       maxScopeCardinality = tm.getNodeCardinalityByOp(node.getNodeID(), appId,
           te.getTargetValues(), Long::min);
-    } else if (sc.getScope() == PlacementConstraints.RACK) {
+    } else if (sc.getScope().equals(PlacementConstraints.RACK)) {
       minScopeCardinality = tm.getRackCardinalityByOp(node.getRackName(), appId,
           te.getTargetValues(), Long::max);
       maxScopeCardinality = tm.getRackCardinalityByOp(node.getRackName(), appId,


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[22/32] hadoop git commit: YARN-6599. Support anti-affinity constraint via AppPlacementAllocator. (Wangda Tan via asuresh)

Posted by as...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/38af2379/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoQueueCreation.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoQueueCreation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoQueueCreation.java
index a3b88c0..01d5e6c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoQueueCreation.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoQueueCreation.java
@@ -170,7 +170,7 @@ public class TestCapacitySchedulerAutoQueueCreation
           1 * GB, 1, true, priority, recordFactory);
 
       cs.allocate(appAttemptId, Collections.<ResourceRequest>singletonList(r1),
-          Collections.<ContainerId>emptyList(), Collections.singletonList(host),
+          null, Collections.<ContainerId>emptyList(), Collections.singletonList(host),
           null, NULL_UPDATE_REQUESTS);
 
       //And this will result in container assignment for app1

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38af2379/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerSchedulingRequestUpdate.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerSchedulingRequestUpdate.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerSchedulingRequestUpdate.java
new file mode 100644
index 0000000..b6ac4b6
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerSchedulingRequestUpdate.java
@@ -0,0 +1,260 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.ResourceSizing;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraints;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.MockAM;
+import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
+import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
+import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.NullRMNodeLabelsManager;
+import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptRemovedSchedulerEvent;
+import org.apache.hadoop.yarn.util.resource.Resources;
+import org.junit.Test;
+
+import java.util.Arrays;
+
+public class TestCapacitySchedulerSchedulingRequestUpdate
+    extends CapacitySchedulerTestBase {
+  @Test
+  public void testBasicPendingResourceUpdate() throws Exception {
+    Configuration conf = TestUtils.getConfigurationWithQueueLabels(
+        new Configuration(false));
+    conf.setBoolean(YarnConfiguration.NODE_LABELS_ENABLED, true);
+
+    final RMNodeLabelsManager mgr = new NullRMNodeLabelsManager();
+    mgr.init(conf);
+    mgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("x", "y"));
+    mgr.addLabelsToNode(
+        ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("x")));
+
+    MockRM rm = new MockRM(conf) {
+      protected RMNodeLabelsManager createNodeLabelManager() {
+        return mgr;
+      }
+    };
+
+    rm.start();
+    MockNM nm1 = // label = x
+        new MockNM("h1:1234", 200 * GB, rm.getResourceTrackerService());
+    nm1.registerNode();
+
+    MockNM nm2 = // label = ""
+        new MockNM("h2:1234", 200 * GB, rm.getResourceTrackerService());
+    nm2.registerNode();
+
+    // Launch app1 in queue=a1
+    RMApp app1 = rm.submitApp(1 * GB, "app", "user", null, "a1");
+    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm2);
+
+    // Launch app2 in queue=b1
+    RMApp app2 = rm.submitApp(8 * GB, "app", "user", null, "b1");
+    MockAM am2 = MockRM.launchAndRegisterAM(app2, rm, nm2);
+    // am1 asks for 8 * 1GB container for no label
+    am1.allocateIntraAppAntiAffinity(
+        ResourceSizing.newInstance(8, Resource.newInstance(1 * GB, 1)),
+        Priority.newInstance(1), 0, ImmutableSet.of("mapper", "reducer"),
+        "mapper", "reducer");
+
+    checkPendingResource(rm, "a1", 8 * GB, null);
+    checkPendingResource(rm, "a", 8 * GB, null);
+    checkPendingResource(rm, "root", 8 * GB, null);
+
+    // am2 asks for 8 * 1GB container for no label
+    am2.allocate(Arrays.asList(ResourceRequest
+        .newInstance(Priority.newInstance(1), "*",
+            Resources.createResource(1 * GB), 8)), null);
+
+    checkPendingResource(rm, "a1", 8 * GB, null);
+    checkPendingResource(rm, "a", 8 * GB, null);
+    checkPendingResource(rm, "b1", 8 * GB, null);
+    checkPendingResource(rm, "b", 8 * GB, null);
+    // root = a + b
+    checkPendingResource(rm, "root", 16 * GB, null);
+
+    // am2 asks for 8 * 1GB container in another priority for no label
+    am2.allocate(Arrays.asList(ResourceRequest
+        .newInstance(Priority.newInstance(2), "*",
+            Resources.createResource(1 * GB), 8)), null);
+
+    checkPendingResource(rm, "a1", 8 * GB, null);
+    checkPendingResource(rm, "a", 8 * GB, null);
+    checkPendingResource(rm, "b1", 16 * GB, null);
+    checkPendingResource(rm, "b", 16 * GB, null);
+    // root = a + b
+    checkPendingResource(rm, "root", 24 * GB, null);
+
+    // am1 asks 4 GB resource instead of 8 * GB for priority=1
+    // am1 asks for 8 * 1GB container for no label
+    am1.allocateIntraAppAntiAffinity(
+        ResourceSizing.newInstance(4, Resource.newInstance(1 * GB, 1)),
+        Priority.newInstance(1), 0, ImmutableSet.of("mapper", "reducer"),
+        "mapper", "reducer");
+
+    checkPendingResource(rm, "a1", 4 * GB, null);
+    checkPendingResource(rm, "a", 4 * GB, null);
+    checkPendingResource(rm, "b1", 16 * GB, null);
+    checkPendingResource(rm, "b", 16 * GB, null);
+    // root = a + b
+    checkPendingResource(rm, "root", 20 * GB, null);
+
+    // am1 asks 8 * GB resource which label=x
+    am1.allocate(Arrays.asList(ResourceRequest
+        .newInstance(Priority.newInstance(2), "*",
+            Resources.createResource(8 * GB), 1, true, "x")), null);
+
+    checkPendingResource(rm, "a1", 4 * GB, null);
+    checkPendingResource(rm, "a", 4 * GB, null);
+    checkPendingResource(rm, "a1", 8 * GB, "x");
+    checkPendingResource(rm, "a", 8 * GB, "x");
+    checkPendingResource(rm, "b1", 16 * GB, null);
+    checkPendingResource(rm, "b", 16 * GB, null);
+    // root = a + b
+    checkPendingResource(rm, "root", 20 * GB, null);
+    checkPendingResource(rm, "root", 8 * GB, "x");
+
+    // complete am1/am2, pending resource should be 0 now
+    AppAttemptRemovedSchedulerEvent appRemovedEvent =
+        new AppAttemptRemovedSchedulerEvent(am2.getApplicationAttemptId(),
+            RMAppAttemptState.FINISHED, false);
+    rm.getResourceScheduler().handle(appRemovedEvent);
+    appRemovedEvent = new AppAttemptRemovedSchedulerEvent(
+        am1.getApplicationAttemptId(), RMAppAttemptState.FINISHED, false);
+    rm.getResourceScheduler().handle(appRemovedEvent);
+
+    checkPendingResource(rm, "a1", 0 * GB, null);
+    checkPendingResource(rm, "a", 0 * GB, null);
+    checkPendingResource(rm, "a1", 0 * GB, "x");
+    checkPendingResource(rm, "a", 0 * GB, "x");
+    checkPendingResource(rm, "b1", 0 * GB, null);
+    checkPendingResource(rm, "b", 0 * GB, null);
+    checkPendingResource(rm, "root", 0 * GB, null);
+    checkPendingResource(rm, "root", 0 * GB, "x");
+  }
+
+  @Test
+  public void testNodePartitionPendingResourceUpdate() throws Exception {
+    Configuration conf = TestUtils.getConfigurationWithQueueLabels(
+        new Configuration(false));
+    conf.setBoolean(YarnConfiguration.NODE_LABELS_ENABLED, true);
+
+    final RMNodeLabelsManager mgr = new NullRMNodeLabelsManager();
+    mgr.init(conf);
+    mgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("x", "y"));
+    mgr.addLabelsToNode(
+        ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("x")));
+
+    MockRM rm = new MockRM(conf) {
+      protected RMNodeLabelsManager createNodeLabelManager() {
+        return mgr;
+      }
+    };
+
+    rm.start();
+    MockNM nm1 = // label = x
+        new MockNM("h1:1234", 200 * GB, rm.getResourceTrackerService());
+    nm1.registerNode();
+
+    MockNM nm2 = // label = ""
+        new MockNM("h2:1234", 200 * GB, rm.getResourceTrackerService());
+    nm2.registerNode();
+
+    // Launch app1 in queue=a1
+    RMApp app1 = rm.submitApp(1 * GB, "app", "user", null, "a1");
+    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm2);
+
+    // Launch app2 in queue=b1
+    RMApp app2 = rm.submitApp(8 * GB, "app", "user", null, "b1");
+    MockAM am2 = MockRM.launchAndRegisterAM(app2, rm, nm2);
+    // am1 asks for 8 * 1GB container for "x"
+    am1.allocateIntraAppAntiAffinity("x",
+        ResourceSizing.newInstance(8, Resource.newInstance(1 * GB, 1)),
+        Priority.newInstance(1), 0, "mapper", "reducer");
+
+    checkPendingResource(rm, "a1", 8 * GB, "x");
+    checkPendingResource(rm, "a", 8 * GB, "x");
+    checkPendingResource(rm, "root", 8 * GB, "x");
+
+    // am2 asks for 8 * 1GB container for "x"
+    am2.allocateIntraAppAntiAffinity("x",
+        ResourceSizing.newInstance(8, Resource.newInstance(1 * GB, 1)),
+        Priority.newInstance(1), 0, "mapper", "reducer");
+
+    checkPendingResource(rm, "a1", 8 * GB, "x");
+    checkPendingResource(rm, "a", 8 * GB, "x");
+    checkPendingResource(rm, "b1", 8 * GB, "x");
+    checkPendingResource(rm, "b", 8 * GB, "x");
+    // root = a + b
+    checkPendingResource(rm, "root", 16 * GB, "x");
+
+    // am1 asks for 6 * 1GB container for "x" in another priority
+    am1.allocateIntraAppAntiAffinity("x",
+        ResourceSizing.newInstance(6, Resource.newInstance(1 * GB, 1)),
+        Priority.newInstance(2), 0, "mapper", "reducer");
+
+    checkPendingResource(rm, "a1", 14 * GB, "x");
+    checkPendingResource(rm, "a", 14 * GB, "x");
+    checkPendingResource(rm, "b1", 8 * GB, "x");
+    checkPendingResource(rm, "b", 8 * GB, "x");
+    // root = a + b
+    checkPendingResource(rm, "root", 22 * GB, "x");
+
+    // am1 asks for 4 * 1GB container for "x" in priority=1, which should
+    // override 8 * 1GB
+    am1.allocateIntraAppAntiAffinity("x",
+        ResourceSizing.newInstance(4, Resource.newInstance(1 * GB, 1)),
+        Priority.newInstance(1), 0, "mapper", "reducer");
+
+    checkPendingResource(rm, "a1", 10 * GB, "x");
+    checkPendingResource(rm, "a", 10 * GB, "x");
+    checkPendingResource(rm, "b1", 8 * GB, "x");
+    checkPendingResource(rm, "b", 8 * GB, "x");
+    // root = a + b
+    checkPendingResource(rm, "root", 18 * GB, "x");
+
+    // complete am1/am2, pending resource should be 0 now
+    AppAttemptRemovedSchedulerEvent appRemovedEvent =
+        new AppAttemptRemovedSchedulerEvent(am2.getApplicationAttemptId(),
+            RMAppAttemptState.FINISHED, false);
+    rm.getResourceScheduler().handle(appRemovedEvent);
+    appRemovedEvent = new AppAttemptRemovedSchedulerEvent(
+        am1.getApplicationAttemptId(), RMAppAttemptState.FINISHED, false);
+    rm.getResourceScheduler().handle(appRemovedEvent);
+
+    checkPendingResource(rm, "a1", 0 * GB, null);
+    checkPendingResource(rm, "a", 0 * GB, null);
+    checkPendingResource(rm, "a1", 0 * GB, "x");
+    checkPendingResource(rm, "a", 0 * GB, "x");
+    checkPendingResource(rm, "b1", 0 * GB, null);
+    checkPendingResource(rm, "b", 0 * GB, null);
+    checkPendingResource(rm, "root", 0 * GB, null);
+    checkPendingResource(rm, "root", 0 * GB, "x");
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38af2379/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestIncreaseAllocationExpirer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestIncreaseAllocationExpirer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestIncreaseAllocationExpirer.java
index d2e28be..a800bef 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestIncreaseAllocationExpirer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestIncreaseAllocationExpirer.java
@@ -132,7 +132,7 @@ public class TestIncreaseAllocationExpirer {
     Assert.assertEquals(RMContainerState.RUNNING,
         rm1.getResourceScheduler().getRMContainer(containerId2).getState());
     // Verify container size is 3G
-    Assert.assertEquals(
+      Assert.assertEquals(
         3 * GB, rm1.getResourceScheduler().getRMContainer(containerId2)
             .getAllocatedResource().getMemorySize());
     // Verify total resource usage

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38af2379/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestSchedulingRequestContainerAllocation.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestSchedulingRequestContainerAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestSchedulingRequestContainerAllocation.java
new file mode 100644
index 0000000..0a44a1e
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestSchedulingRequestContainerAllocation.java
@@ -0,0 +1,277 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
+
+import com.google.common.collect.ImmutableSet;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceSizing;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraints;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.MockAM;
+import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
+import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
+import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.NullRMNodeLabelsManager;
+import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
+import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerAppReport;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptRemovedSchedulerEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestSchedulingRequestContainerAllocation {
+  private final int GB = 1024;
+
+  private YarnConfiguration conf;
+
+  RMNodeLabelsManager mgr;
+
+  @Before
+  public void setUp() throws Exception {
+    conf = new YarnConfiguration();
+    conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
+        ResourceScheduler.class);
+    mgr = new NullRMNodeLabelsManager();
+    mgr.init(conf);
+  }
+
+  @Test
+  public void testIntraAppAntiAffinity() throws Exception {
+    Configuration csConf = TestUtils.getConfigurationWithMultipleQueues(
+        new Configuration());
+    csConf.setBoolean(CapacitySchedulerConfiguration.SCHEDULING_REQUEST_ALLOWED,
+        true);
+
+    // inject node label manager
+    MockRM rm1 = new MockRM(csConf) {
+      @Override
+      public RMNodeLabelsManager createNodeLabelManager() {
+        return mgr;
+      }
+    };
+
+    rm1.getRMContext().setNodeLabelManager(mgr);
+    rm1.start();
+
+    // 4 NMs.
+    MockNM[] nms = new MockNM[4];
+    RMNode[] rmNodes = new RMNode[4];
+    for (int i = 0; i < 4; i++) {
+      nms[i] = rm1.registerNode("192.168.0." + i + ":1234", 10 * GB);
+      rmNodes[i] = rm1.getRMContext().getRMNodes().get(nms[i].getNodeId());
+    }
+
+    // app1 -> c
+    RMApp app1 = rm1.submitApp(1 * GB, "app", "user", null, "c");
+    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nms[0]);
+
+    // app1 asks for 10 anti-affinity containers for the same app. It should
+    // only get 4 containers allocated because we only have 4 nodes.
+    am1.allocateIntraAppAntiAffinity(
+        ResourceSizing.newInstance(10, Resource.newInstance(1024, 1)),
+        Priority.newInstance(1), 1L, ImmutableSet.of("mapper"), "mapper");
+
+    CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
+
+    for (int i = 0; i < 3; i++) {
+      for (int j = 0; j < 4; j++) {
+        cs.handle(new NodeUpdateSchedulerEvent(rmNodes[j]));
+      }
+    }
+
+    // App1 should get 5 containers allocated (1 AM + 1 node each).
+    FiCaSchedulerApp schedulerApp = cs.getApplicationAttempt(
+        am1.getApplicationAttemptId());
+    Assert.assertEquals(5, schedulerApp.getLiveContainers().size());
+
+    // Similarly, app1 asks 10 anti-affinity containers at different priority,
+    // it should be satisfied as well.
+    // app1 asks for 10 anti-affinity containers for the same app. It should
+    // only get 4 containers allocated because we only have 4 nodes.
+    am1.allocateIntraAppAntiAffinity(
+        ResourceSizing.newInstance(10, Resource.newInstance(2048, 1)),
+        Priority.newInstance(2), 1L, ImmutableSet.of("reducer"), "reducer");
+
+    for (int i = 0; i < 3; i++) {
+      for (int j = 0; j < 4; j++) {
+        cs.handle(new NodeUpdateSchedulerEvent(rmNodes[j]));
+      }
+    }
+
+    // App1 should get 9 containers allocated (1 AM + 8 containers).
+    Assert.assertEquals(9, schedulerApp.getLiveContainers().size());
+
+    // Test anti-affinity to both of "mapper/reducer", we should only get no
+    // container allocated
+    am1.allocateIntraAppAntiAffinity(
+        ResourceSizing.newInstance(10, Resource.newInstance(2048, 1)),
+        Priority.newInstance(3), 1L, ImmutableSet.of("reducer2"), "mapper");
+    for (int i = 0; i < 3; i++) {
+      for (int j = 0; j < 4; j++) {
+        cs.handle(new NodeUpdateSchedulerEvent(rmNodes[j]));
+      }
+    }
+
+    // App1 should get 10 containers allocated (1 AM + 9 containers).
+    Assert.assertEquals(9, schedulerApp.getLiveContainers().size());
+
+    rm1.close();
+  }
+
+  @Test
+  public void testIntraAppAntiAffinityWithMultipleTags() throws Exception {
+    Configuration csConf = TestUtils.getConfigurationWithMultipleQueues(
+        new Configuration());
+    csConf.setBoolean(CapacitySchedulerConfiguration.SCHEDULING_REQUEST_ALLOWED,
+        true);
+
+    // inject node label manager
+    MockRM rm1 = new MockRM(csConf) {
+      @Override
+      public RMNodeLabelsManager createNodeLabelManager() {
+        return mgr;
+      }
+    };
+
+    rm1.getRMContext().setNodeLabelManager(mgr);
+    rm1.start();
+
+    // 4 NMs.
+    MockNM[] nms = new MockNM[4];
+    RMNode[] rmNodes = new RMNode[4];
+    for (int i = 0; i < 4; i++) {
+      nms[i] = rm1.registerNode("192.168.0." + i + ":1234", 10 * GB);
+      rmNodes[i] = rm1.getRMContext().getRMNodes().get(nms[i].getNodeId());
+    }
+
+    // app1 -> c
+    RMApp app1 = rm1.submitApp(1 * GB, "app", "user", null, "c");
+    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nms[0]);
+
+    // app1 asks for 2 anti-affinity containers for the same app.
+    am1.allocateIntraAppAntiAffinity(
+        ResourceSizing.newInstance(2, Resource.newInstance(1024, 1)),
+        Priority.newInstance(1), 1L, ImmutableSet.of("tag_1_1", "tag_1_2"),
+        "tag_1_1", "tag_1_2");
+
+    CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
+
+    for (int i = 0; i < 3; i++) {
+      for (int j = 0; j < 4; j++) {
+        cs.handle(new NodeUpdateSchedulerEvent(rmNodes[j]));
+      }
+    }
+
+    // App1 should get 3 containers allocated (1 AM + 2 task).
+    FiCaSchedulerApp schedulerApp = cs.getApplicationAttempt(
+        am1.getApplicationAttemptId());
+    Assert.assertEquals(3, schedulerApp.getLiveContainers().size());
+
+    // app1 asks for 1 anti-affinity containers for the same app. anti-affinity
+    // to tag_1_1/tag_1_2. With allocation_tag = tag_2_1/tag_2_2
+    am1.allocateIntraAppAntiAffinity(
+        ResourceSizing.newInstance(1, Resource.newInstance(1024, 1)),
+        Priority.newInstance(2), 1L, ImmutableSet.of("tag_2_1", "tag_2_2"),
+        "tag_1_1", "tag_1_2");
+
+    for (int i = 0; i < 3; i++) {
+      for (int j = 0; j < 4; j++) {
+        cs.handle(new NodeUpdateSchedulerEvent(rmNodes[j]));
+      }
+    }
+
+    // App1 should get 4 containers allocated (1 AM + 2 task (first request) +
+    // 1 task (2nd request).
+    Assert.assertEquals(4, schedulerApp.getLiveContainers().size());
+
+    // app1 asks for 10 anti-affinity containers for the same app. anti-affinity
+    // to tag_1_1/tag_1_2/tag_2_1/tag_2_2. With allocation_tag = tag_3
+    am1.allocateIntraAppAntiAffinity(
+        ResourceSizing.newInstance(1, Resource.newInstance(1024, 1)),
+        Priority.newInstance(3), 1L, ImmutableSet.of("tag_3"),
+        "tag_1_1", "tag_1_2", "tag_2_1", "tag_2_2");
+
+    for (int i = 0; i < 3; i++) {
+      for (int j = 0; j < 4; j++) {
+        cs.handle(new NodeUpdateSchedulerEvent(rmNodes[j]));
+      }
+    }
+
+    // App1 should get 1 more containers allocated
+    // 1 AM + 2 task (first request) + 1 task (2nd request) +
+    // 1 task (3rd request)
+    Assert.assertEquals(5, schedulerApp.getLiveContainers().size());
+
+    rm1.close();
+  }
+
+  @Test
+  public void testSchedulingRequestDisabledByDefault() throws Exception {
+    Configuration csConf = TestUtils.getConfigurationWithMultipleQueues(
+        new Configuration());
+
+    // inject node label manager
+    MockRM rm1 = new MockRM(csConf) {
+      @Override
+      public RMNodeLabelsManager createNodeLabelManager() {
+        return mgr;
+      }
+    };
+
+    rm1.getRMContext().setNodeLabelManager(mgr);
+    rm1.start();
+
+    // 4 NMs.
+    MockNM[] nms = new MockNM[4];
+    RMNode[] rmNodes = new RMNode[4];
+    for (int i = 0; i < 4; i++) {
+      nms[i] = rm1.registerNode("192.168.0." + i + ":1234", 10 * GB);
+      rmNodes[i] = rm1.getRMContext().getRMNodes().get(nms[i].getNodeId());
+    }
+
+    // app1 -> c
+    RMApp app1 = rm1.submitApp(1 * GB, "app", "user", null, "c");
+    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nms[0]);
+
+    // app1 asks for 2 anti-affinity containers for the same app.
+    boolean caughtException = false;
+    try {
+      // Since feature is disabled by default, we should expect exception.
+      am1.allocateIntraAppAntiAffinity(
+          ResourceSizing.newInstance(2, Resource.newInstance(1024, 1)),
+          Priority.newInstance(1), 1L, ImmutableSet.of("tag_1_1", "tag_1_2"),
+          "tag_1_1", "tag_1_2");
+    } catch (Exception e) {
+      caughtException = true;
+    }
+    Assert.assertTrue(caughtException);
+    rm1.close();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38af2379/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestSchedulingRequestContainerAllocationAsync.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestSchedulingRequestContainerAllocationAsync.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestSchedulingRequestContainerAllocationAsync.java
new file mode 100644
index 0000000..c7f13cd
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestSchedulingRequestContainerAllocationAsync.java
@@ -0,0 +1,139 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
+
+import com.google.common.collect.ImmutableSet;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceSizing;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraints;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.MockAM;
+import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
+import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
+import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.NullRMNodeLabelsManager;
+import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestSchedulingRequestContainerAllocationAsync {
+  private final int GB = 1024;
+
+  private YarnConfiguration conf;
+
+  RMNodeLabelsManager mgr;
+
+  @Before
+  public void setUp() throws Exception {
+    conf = new YarnConfiguration();
+    conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
+        ResourceScheduler.class);
+    mgr = new NullRMNodeLabelsManager();
+    mgr.init(conf);
+  }
+
+  private void testIntraAppAntiAffinityAsync(int numThreads) throws Exception {
+    Configuration csConf = TestUtils.getConfigurationWithMultipleQueues(
+        new Configuration());
+    csConf.setBoolean(CapacitySchedulerConfiguration.SCHEDULING_REQUEST_ALLOWED,
+        true);
+    csConf.setInt(
+        CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_MAXIMUM_THREAD,
+        numThreads);
+    csConf.setInt(CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_PREFIX
+        + ".scheduling-interval-ms", 0);
+
+    // inject node label manager
+    MockRM rm1 = new MockRM(csConf) {
+      @Override
+      public RMNodeLabelsManager createNodeLabelManager() {
+        return mgr;
+      }
+    };
+
+    rm1.getRMContext().setNodeLabelManager(mgr);
+    rm1.start();
+
+    // 200 NMs.
+    int nNMs = 200;
+    MockNM[] nms = new MockNM[nNMs];
+    RMNode[] rmNodes = new RMNode[nNMs];
+    for (int i = 0; i < nNMs; i++) {
+      nms[i] = rm1.registerNode("127.0.0." + i + ":1234", 10 * GB);
+      rmNodes[i] = rm1.getRMContext().getRMNodes().get(nms[i].getNodeId());
+    }
+
+    // app1 -> c
+    RMApp app1 = rm1.submitApp(1 * GB, "app", "user", null, "c");
+    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nms[0]);
+
+    // app1 asks for 10 anti-affinity containers for the same app. It should
+    // only get 4 containers allocated because we only have 4 nodes.
+    am1.allocateIntraAppAntiAffinity(
+        ResourceSizing.newInstance(1000, Resource.newInstance(1024, 1)),
+        Priority.newInstance(1), 1L, ImmutableSet.of("mapper"), "mapper");
+
+    CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
+
+    for (int i = 0; i < 3; i++) {
+      for (int j = 0; j < nNMs; j++) {
+        cs.handle(new NodeUpdateSchedulerEvent(rmNodes[j]));
+      }
+    }
+
+    // App1 should get #NM + 1 containers allocated (1 node each + 1 AM).
+    FiCaSchedulerApp schedulerApp = cs.getApplicationAttempt(
+        am1.getApplicationAttemptId());
+    Assert.assertEquals(nNMs + 1, schedulerApp.getLiveContainers().size());
+
+    rm1.close();
+  }
+
+  @Test(timeout = 300000)
+  public void testSingleThreadAsyncContainerAllocation() throws Exception {
+    testIntraAppAntiAffinityAsync(1);
+  }
+
+  @Test(timeout = 300000)
+  public void testTwoThreadsAsyncContainerAllocation() throws Exception {
+    testIntraAppAntiAffinityAsync(2);
+  }
+
+  @Test(timeout = 300000)
+  public void testThreeThreadsAsyncContainerAllocation() throws Exception {
+    testIntraAppAntiAffinityAsync(3);
+  }
+
+  @Test(timeout = 300000)
+  public void testFourThreadsAsyncContainerAllocation() throws Exception {
+    testIntraAppAntiAffinityAsync(4);
+  }
+
+  @Test(timeout = 300000)
+  public void testFiveThreadsAsyncContainerAllocation() throws Exception {
+    testIntraAppAntiAffinityAsync(5);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38af2379/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java
index e8734cc..542ba3e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java
@@ -275,6 +275,8 @@ public class TestUtils {
   public static Configuration getConfigurationWithQueueLabels(Configuration config) {
     CapacitySchedulerConfiguration conf =
         new CapacitySchedulerConfiguration(config);
+    conf.setBoolean(CapacitySchedulerConfiguration.SCHEDULING_REQUEST_ALLOWED,
+        true);
     
     // Define top-level queues
     conf.setQueues(CapacitySchedulerConfiguration.ROOT, new String[] {"a", "b", "c"});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38af2379/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestAllocationTagsManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestAllocationTagsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestAllocationTagsManager.java
index f1d5663..7afe4ef 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestAllocationTagsManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestAllocationTagsManager.java
@@ -20,10 +20,10 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint;
 
-import java.util.List;
-
+import com.google.common.collect.ImmutableSet;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraints;
 import org.apache.hadoop.yarn.server.resourcemanager.MockNodes;
 import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
@@ -33,7 +33,7 @@ import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 
-import com.google.common.collect.ImmutableSet;
+import java.util.List;
 
 /**
  * Test functionality of AllocationTagsManager.
@@ -54,7 +54,6 @@ public class TestAllocationTagsManager {
     rmContext = rm.getRMContext();
   }
 
-
   @Test
   public void testAllocationTagsManagerSimpleCases()
       throws InvalidAllocationTagsQueryException {
@@ -141,30 +140,31 @@ public class TestAllocationTagsManager {
 
     // Get Node Cardinality of app1 on node2, with tag "<applicationId>", op=max
     // (Expect this returns #containers from app1 on node2)
-    Assert
-        .assertEquals(2,
-            atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"),
-                TestUtils.getMockApplicationId(1),
-                ImmutableSet.of(AllocationTagsNamespaces.APP_ID
-                    + TestUtils.getMockApplicationId(1).toString()),
-                Long::max));
+    Assert.assertEquals(2,
+        atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"),
+            TestUtils.getMockApplicationId(1), null, Long::max));
 
     // Get Node Cardinality of app1 on node2, with empty tag set, op=max
     Assert.assertEquals(2,
         atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"),
+            TestUtils.getMockApplicationId(1), null, Long::max));
+
+    // Get Cardinality of app1 on node2, with empty tag set, op=max
+    Assert.assertEquals(2,
+        atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"),
             TestUtils.getMockApplicationId(1), ImmutableSet.of(), Long::max));
 
     // Get Node Cardinality of all apps on node2, with empty tag set, op=sum
-    Assert.assertEquals(7, atm.getNodeCardinalityByOp(
+    Assert.assertEquals(4, atm.getNodeCardinalityByOp(
         NodeId.fromString("host2:123"), null, ImmutableSet.of(), Long::sum));
 
     // Get Node Cardinality of app_1 on node2, with empty tag set, op=sum
-    Assert.assertEquals(5,
+    Assert.assertEquals(3,
         atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"),
             TestUtils.getMockApplicationId(1), ImmutableSet.of(), Long::sum));
 
     // Get Node Cardinality of app_1 on node2, with empty tag set, op=sum
-    Assert.assertEquals(2,
+    Assert.assertEquals(1,
         atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"),
             TestUtils.getMockApplicationId(2), ImmutableSet.of(), Long::sum));
 
@@ -296,7 +296,7 @@ public class TestAllocationTagsManager {
     Assert.assertEquals(3, atm.getRackCardinality("rack0", null, "reducer"));
 
     // Get Rack Cardinality of app_1 on rack0, with empty tag set, op=max
-    Assert.assertEquals(2, atm.getRackCardinalityByOp("rack0",
+    Assert.assertEquals(1, atm.getRackCardinalityByOp("rack0",
         TestUtils.getMockApplicationId(1), ImmutableSet.of(), Long::max));
 
     // Get Rack Cardinality of app_1 on rack0, with empty tag set, op=min

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38af2379/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementConstraintsUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementConstraintsUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementConstraintsUtil.java
index 7492233..8ad726e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementConstraintsUtil.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementConstraintsUtil.java
@@ -117,9 +117,9 @@ public class TestPlacementConstraintsUtil {
       RMNode currentNode = nodeIterator.next();
       FiCaSchedulerNode schedulerNode = TestUtils.getMockNode(
           currentNode.getHostName(), currentNode.getRackName(), 123, 4 * GB);
-      Assert.assertFalse(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
+      Assert.assertFalse(PlacementConstraintsUtil.canSatisfySingleConstraint(appId1,
           sourceTag1, schedulerNode, pcm, tm));
-      Assert.assertFalse(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
+      Assert.assertFalse(PlacementConstraintsUtil.canSatisfySingleConstraint(appId1,
           sourceTag2, schedulerNode, pcm, tm));
     }
     /**
@@ -145,14 +145,14 @@ public class TestPlacementConstraintsUtil {
     tm.addContainer(n0_r1.getNodeID(), hbase_m, ImmutableSet.of("hbase-m"));
 
     // 'spark' placement on Node0 should now SUCCEED
-    Assert.assertTrue(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
+    Assert.assertTrue(PlacementConstraintsUtil.canSatisfySingleConstraint(appId1,
         sourceTag1, schedulerNode0, pcm, tm));
     // FAIL on the rest of the nodes
-    Assert.assertFalse(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
+    Assert.assertFalse(PlacementConstraintsUtil.canSatisfySingleConstraint(appId1,
         sourceTag1, schedulerNode1, pcm, tm));
-    Assert.assertFalse(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
+    Assert.assertFalse(PlacementConstraintsUtil.canSatisfySingleConstraint(appId1,
         sourceTag1, schedulerNode2, pcm, tm));
-    Assert.assertFalse(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
+    Assert.assertFalse(PlacementConstraintsUtil.canSatisfySingleConstraint(appId1,
         sourceTag1, schedulerNode3, pcm, tm));
   }
 
@@ -187,15 +187,15 @@ public class TestPlacementConstraintsUtil {
     FiCaSchedulerNode schedulerNode3 = TestUtils
         .getMockNode(n3_r2.getHostName(), n3_r2.getRackName(), 123, 4 * GB);
     // 'zk' placement on Rack1 should now SUCCEED
-    Assert.assertTrue(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
+    Assert.assertTrue(PlacementConstraintsUtil.canSatisfySingleConstraint(appId1,
         sourceTag2, schedulerNode0, pcm, tm));
-    Assert.assertTrue(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
+    Assert.assertTrue(PlacementConstraintsUtil.canSatisfySingleConstraint(appId1,
         sourceTag2, schedulerNode1, pcm, tm));
 
     // FAIL on the rest of the RACKs
-    Assert.assertFalse(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
+    Assert.assertFalse(PlacementConstraintsUtil.canSatisfySingleConstraint(appId1,
         sourceTag2, schedulerNode2, pcm, tm));
-    Assert.assertFalse(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
+    Assert.assertFalse(PlacementConstraintsUtil.canSatisfySingleConstraint(appId1,
         sourceTag2, schedulerNode3, pcm, tm));
   }
 
@@ -230,14 +230,14 @@ public class TestPlacementConstraintsUtil {
     tm.addContainer(n0_r1.getNodeID(), hbase_m, ImmutableSet.of("hbase-m"));
 
     // 'spark' placement on Node0 should now FAIL
-    Assert.assertFalse(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
+    Assert.assertFalse(PlacementConstraintsUtil.canSatisfySingleConstraint(appId1,
         sourceTag1, schedulerNode0, pcm, tm));
     // SUCCEED on the rest of the nodes
-    Assert.assertTrue(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
+    Assert.assertTrue(PlacementConstraintsUtil.canSatisfySingleConstraint(appId1,
         sourceTag1, schedulerNode1, pcm, tm));
-    Assert.assertTrue(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
+    Assert.assertTrue(PlacementConstraintsUtil.canSatisfySingleConstraint(appId1,
         sourceTag1, schedulerNode2, pcm, tm));
-    Assert.assertTrue(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
+    Assert.assertTrue(PlacementConstraintsUtil.canSatisfySingleConstraint(appId1,
         sourceTag1, schedulerNode3, pcm, tm));
   }
 
@@ -273,15 +273,15 @@ public class TestPlacementConstraintsUtil {
         .getMockNode(n3_r2.getHostName(), n3_r2.getRackName(), 123, 4 * GB);
 
     // 'zk' placement on Rack1 should FAIL
-    Assert.assertFalse(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
+    Assert.assertFalse(PlacementConstraintsUtil.canSatisfySingleConstraint(appId1,
         sourceTag2, schedulerNode0, pcm, tm));
-    Assert.assertFalse(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
+    Assert.assertFalse(PlacementConstraintsUtil.canSatisfySingleConstraint(appId1,
         sourceTag2, schedulerNode1, pcm, tm));
 
     // SUCCEED on the rest of the RACKs
-    Assert.assertTrue(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
+    Assert.assertTrue(PlacementConstraintsUtil.canSatisfySingleConstraint(appId1,
         sourceTag2, schedulerNode2, pcm, tm));
-    Assert.assertTrue(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
+    Assert.assertTrue(PlacementConstraintsUtil.canSatisfySingleConstraint(appId1,
         sourceTag2, schedulerNode3, pcm, tm));
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38af2379/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java
index 5f29186..b998564 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java
@@ -192,7 +192,7 @@ public class FairSchedulerTestBase {
     resourceManager.getRMContext().getRMApps()
         .put(id.getApplicationId(), rmApp);
 
-    scheduler.allocate(id, ask, new ArrayList<ContainerId>(),
+    scheduler.allocate(id, ask, null, new ArrayList<ContainerId>(),
         null, null, NULL_UPDATE_REQUESTS);
     scheduler.update();
     return id;
@@ -222,7 +222,7 @@ public class FairSchedulerTestBase {
     resourceManager.getRMContext().getRMApps()
         .put(id.getApplicationId(), rmApp);
 
-    scheduler.allocate(id, ask, new ArrayList<ContainerId>(),
+    scheduler.allocate(id, ask, null, new ArrayList<ContainerId>(),
         null, null, NULL_UPDATE_REQUESTS);
     return id;
   }
@@ -245,7 +245,7 @@ public class FairSchedulerTestBase {
       ResourceRequest request, ApplicationAttemptId attId) {
     List<ResourceRequest> ask = new ArrayList<ResourceRequest>();
     ask.add(request);
-    scheduler.allocate(attId, ask,  new ArrayList<ContainerId>(),
+    scheduler.allocate(attId, ask, null, new ArrayList<ContainerId>(),
         null, null, NULL_UPDATE_REQUESTS);
     scheduler.update();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38af2379/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java
index 95dbaea..2512787 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java
@@ -125,7 +125,7 @@ public class TestContinuousScheduling extends FairSchedulerTestBase {
     List<ResourceRequest> ask = new ArrayList<>();
     ask.add(createResourceRequest(1024, 1, ResourceRequest.ANY, 1, 1, true));
     scheduler.allocate(
-        appAttemptId, ask, new ArrayList<ContainerId>(),
+        appAttemptId, ask, null, new ArrayList<ContainerId>(),
         null, null, NULL_UPDATE_REQUESTS);
     FSAppAttempt app = scheduler.getSchedulerApp(appAttemptId);
 
@@ -163,8 +163,7 @@ public class TestContinuousScheduling extends FairSchedulerTestBase {
     ResourceRequest request =
         createResourceRequest(1024, 1, ResourceRequest.ANY, 1, 1, true);
     ask.add(request);
-    scheduler.allocate(appAttemptId, ask,
-        new ArrayList<ContainerId>(), null, null, NULL_UPDATE_REQUESTS);
+    scheduler.allocate(appAttemptId, ask, null, new ArrayList<ContainerId>(), null, null, NULL_UPDATE_REQUESTS);
     triggerSchedulingAttempt();
 
     FSAppAttempt app = scheduler.getSchedulerApp(appAttemptId);
@@ -175,8 +174,7 @@ public class TestContinuousScheduling extends FairSchedulerTestBase {
         createResourceRequest(1024, 1, ResourceRequest.ANY, 2, 1, true);
     ask.clear();
     ask.add(request);
-    scheduler.allocate(appAttemptId, ask,
-        new ArrayList<ContainerId>(), null, null, NULL_UPDATE_REQUESTS);
+    scheduler.allocate(appAttemptId, ask, null, new ArrayList<ContainerId>(), null, null, NULL_UPDATE_REQUESTS);
     triggerSchedulingAttempt();
 
     checkAppConsumption(app, Resources.createResource(2048,2));
@@ -373,7 +371,7 @@ public class TestContinuousScheduling extends FairSchedulerTestBase {
         true);
     ask1.add(request1);
     ask1.add(request2);
-    scheduler.allocate(id11, ask1, new ArrayList<ContainerId>(), null, null,
+    scheduler.allocate(id11, ask1, null, new ArrayList<ContainerId>(), null, null,
         NULL_UPDATE_REQUESTS);
 
     NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38af2379/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
index 77b6d04..d9c06a7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
@@ -1280,7 +1280,7 @@ public class TestFairScheduler extends FairSchedulerTestBase {
     List<ResourceRequest> asks = new ArrayList<ResourceRequest>();
     asks.add(createResourceRequest(2048, node2.getRackName(), 1, 1, false));
 
-    scheduler.allocate(attemptId, asks, new ArrayList<ContainerId>(), null,
+    scheduler.allocate(attemptId, asks, null, new ArrayList<ContainerId>(), null,
             null, NULL_UPDATE_REQUESTS);
 
     ApplicationAttemptId attId = createSchedulingRequest(2048, "queue1", "user1", 1);
@@ -2125,7 +2125,7 @@ public class TestFairScheduler extends FairSchedulerTestBase {
     ResourceRequest request1 = createResourceRequest(minReqSize * 2,
         ResourceRequest.ANY, 1, 1, true);
     ask1.add(request1);
-    scheduler.allocate(id11, ask1, new ArrayList<ContainerId>(),
+    scheduler.allocate(id11, ask1, null, new ArrayList<ContainerId>(),
         null, null, NULL_UPDATE_REQUESTS);
 
     // Second ask, queue2 requests 1 large.
@@ -2141,7 +2141,7 @@ public class TestFairScheduler extends FairSchedulerTestBase {
         ResourceRequest.ANY, 1, 1, false);
     ask2.add(request2);
     ask2.add(request3);
-    scheduler.allocate(id21, ask2, new ArrayList<ContainerId>(),
+    scheduler.allocate(id21, ask2, null, new ArrayList<ContainerId>(),
         null, null, NULL_UPDATE_REQUESTS);
 
     // Third ask, queue2 requests 2 small (minReqSize).
@@ -2157,7 +2157,7 @@ public class TestFairScheduler extends FairSchedulerTestBase {
         ResourceRequest.ANY, 2, 2, true);
     ask3.add(request4);
     ask3.add(request5);
-    scheduler.allocate(id22, ask3, new ArrayList<ContainerId>(),
+    scheduler.allocate(id22, ask3, null, new ArrayList<ContainerId>(),
         null, null, NULL_UPDATE_REQUESTS);
 
     scheduler.update();
@@ -2683,7 +2683,7 @@ public class TestFairScheduler extends FairSchedulerTestBase {
     // Complete the first container so we can trigger allocation for app2
     ContainerId containerId =
         app1.getLiveContainers().iterator().next().getContainerId();
-    scheduler.allocate(app1.getApplicationAttemptId(), new ArrayList<>(),
+    scheduler.allocate(app1.getApplicationAttemptId(), new ArrayList<>(), null,
         Arrays.asList(containerId), null, null, NULL_UPDATE_REQUESTS);
 
     // Trigger allocation for app2
@@ -2769,7 +2769,7 @@ public class TestFairScheduler extends FairSchedulerTestBase {
     asks.add(createResourceRequest(1024, node3.getRackName(), 1, 1, true));
     asks.add(createResourceRequest(1024, ResourceRequest.ANY, 1, 2, true));
 
-    scheduler.allocate(attemptId, asks, new ArrayList<ContainerId>(), null,
+    scheduler.allocate(attemptId, asks, null, new ArrayList<ContainerId>(), null,
         null, NULL_UPDATE_REQUESTS);
     
     // node 1 checks in
@@ -3216,7 +3216,7 @@ public class TestFairScheduler extends FairSchedulerTestBase {
         createResourceRequest(1024, node1.getHostName(), 1, 0, true),
         createResourceRequest(1024, "rack1", 1, 0, true),
         createResourceRequest(1024, ResourceRequest.ANY, 1, 1, true));
-    scheduler.allocate(attId1, update, new ArrayList<ContainerId>(),
+    scheduler.allocate(attId1, update, null, new ArrayList<ContainerId>(),
         null, null, NULL_UPDATE_REQUESTS);
     
     // then node2 should get the container
@@ -4432,7 +4432,7 @@ public class TestFairScheduler extends FairSchedulerTestBase {
         createResourceRequest(1024, 8, ResourceRequest.ANY, 1, 1, true);
 
     ask1.add(request1);
-    scheduler.allocate(id11, ask1, new ArrayList<ContainerId>(), null,
+    scheduler.allocate(id11, ask1, null, new ArrayList<ContainerId>(), null,
         null, NULL_UPDATE_REQUESTS);
 
     String hostName = "127.0.0.1";
@@ -4508,11 +4508,11 @@ public class TestFairScheduler extends FairSchedulerTestBase {
 
     // Verify the blacklist can be updated independent of requesting containers
     scheduler.allocate(appAttemptId, Collections.<ResourceRequest>emptyList(),
-        Collections.<ContainerId>emptyList(),
+        null, Collections.<ContainerId>emptyList(),
         Collections.singletonList(host), null, NULL_UPDATE_REQUESTS);
     assertTrue(app.isPlaceBlacklisted(host));
     scheduler.allocate(appAttemptId, Collections.<ResourceRequest>emptyList(),
-        Collections.<ContainerId>emptyList(), null,
+        null, Collections.<ContainerId>emptyList(), null,
         Collections.singletonList(host), NULL_UPDATE_REQUESTS);
     assertFalse(scheduler.getSchedulerApp(appAttemptId)
         .isPlaceBlacklisted(host));
@@ -4521,8 +4521,7 @@ public class TestFairScheduler extends FairSchedulerTestBase {
         createResourceRequest(GB, node.getHostName(), 1, 0, true));
 
     // Verify a container does not actually get placed on the blacklisted host
-    scheduler.allocate(appAttemptId, update,
-        Collections.<ContainerId>emptyList(),
+    scheduler.allocate(appAttemptId, update, null, Collections.<ContainerId>emptyList(),
         Collections.singletonList(host), null, NULL_UPDATE_REQUESTS);
     assertTrue(app.isPlaceBlacklisted(host));
     scheduler.update();
@@ -4531,8 +4530,7 @@ public class TestFairScheduler extends FairSchedulerTestBase {
         .getLiveContainers().size());
 
     // Verify a container gets placed on the empty blacklist
-    scheduler.allocate(appAttemptId, update,
-        Collections.<ContainerId>emptyList(), null,
+    scheduler.allocate(appAttemptId, update, null, Collections.<ContainerId>emptyList(), null,
         Collections.singletonList(host), NULL_UPDATE_REQUESTS);
     assertFalse(app.isPlaceBlacklisted(host));
     createSchedulingRequest(GB, "root.default", "user", 1);
@@ -5391,8 +5389,8 @@ public class TestFairScheduler extends FairSchedulerTestBase {
     ask1.add(request3);
 
     // Perform allocation
-    scheduler.allocate(appAttemptId, ask1, new ArrayList<ContainerId>(), null,
-        null, NULL_UPDATE_REQUESTS);
+    scheduler.allocate(appAttemptId, ask1, null, new ArrayList<ContainerId>(),
+        null, null, NULL_UPDATE_REQUESTS);
     scheduler.update();
     scheduler.handle(new NodeUpdateSchedulerEvent(node));
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38af2379/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java
index db749ac..8814c0e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java
@@ -281,7 +281,7 @@ public class TestFifoScheduler {
     ask.add(nodeLocal);
     ask.add(rackLocal);
     ask.add(any);
-    scheduler.allocate(appAttemptId, ask, new ArrayList<ContainerId>(),
+    scheduler.allocate(appAttemptId, ask, null, new ArrayList<ContainerId>(),
         null, null, NULL_UPDATE_REQUESTS);
 
     NodeUpdateSchedulerEvent node0Update = new NodeUpdateSchedulerEvent(node0);
@@ -378,7 +378,7 @@ public class TestFifoScheduler {
     ask.add(nodeLocal);
     ask.add(rackLocal);
     ask.add(any);
-    scheduler.allocate(appAttemptId, ask, new ArrayList<ContainerId>(),
+    scheduler.allocate(appAttemptId, ask, null, new ArrayList<ContainerId>(),
         null, null, NULL_UPDATE_REQUESTS);
 
     // Before the node update event, there are one local request
@@ -954,7 +954,7 @@ public class TestFifoScheduler {
     ask1.add(BuilderUtils.newResourceRequest(BuilderUtils.newPriority(0),
         ResourceRequest.ANY, BuilderUtils.newResource(GB, 1), 1,
         RMNodeLabelsManager.NO_LABEL));
-    fs.allocate(appAttemptId1, ask1, emptyId,
+    fs.allocate(appAttemptId1, ask1, null, emptyId,
         Collections.singletonList(host_1_0), null, NULL_UPDATE_REQUESTS);
 
     // Trigger container assignment
@@ -963,7 +963,7 @@ public class TestFifoScheduler {
     // Get the allocation for the application and verify no allocation on
     // blacklist node
     Allocation allocation1 =
-        fs.allocate(appAttemptId1, emptyAsk, emptyId,
+        fs.allocate(appAttemptId1, emptyAsk, null, emptyId,
             null, null, NULL_UPDATE_REQUESTS);
 
     Assert.assertEquals("allocation1", 0, allocation1.getContainers().size());
@@ -971,7 +971,7 @@ public class TestFifoScheduler {
     // verify host_1_1 can get allocated as not in blacklist
     fs.handle(new NodeUpdateSchedulerEvent(n4));
     Allocation allocation2 =
-        fs.allocate(appAttemptId1, emptyAsk, emptyId,
+        fs.allocate(appAttemptId1, emptyAsk, null, emptyId,
             null, null, NULL_UPDATE_REQUESTS);
     Assert.assertEquals("allocation2", 1, allocation2.getContainers().size());
     List<Container> containerList = allocation2.getContainers();
@@ -986,33 +986,33 @@ public class TestFifoScheduler {
     // be assigned
     ask2.add(BuilderUtils.newResourceRequest(BuilderUtils.newPriority(0),
         ResourceRequest.ANY, BuilderUtils.newResource(GB, 1), 1));
-    fs.allocate(appAttemptId1, ask2, emptyId,
+    fs.allocate(appAttemptId1, ask2, null, emptyId,
         Collections.singletonList("rack0"), null, NULL_UPDATE_REQUESTS);
 
     // verify n1 is not qualified to be allocated
     fs.handle(new NodeUpdateSchedulerEvent(n1));
     Allocation allocation3 =
-        fs.allocate(appAttemptId1, emptyAsk, emptyId,
+        fs.allocate(appAttemptId1, emptyAsk, null, emptyId,
             null, null, NULL_UPDATE_REQUESTS);
     Assert.assertEquals("allocation3", 0, allocation3.getContainers().size());
 
     // verify n2 is not qualified to be allocated
     fs.handle(new NodeUpdateSchedulerEvent(n2));
     Allocation allocation4 =
-        fs.allocate(appAttemptId1, emptyAsk, emptyId,
+        fs.allocate(appAttemptId1, emptyAsk, null, emptyId,
             null, null, NULL_UPDATE_REQUESTS);
     Assert.assertEquals("allocation4", 0, allocation4.getContainers().size());
 
     // verify n3 is not qualified to be allocated
     fs.handle(new NodeUpdateSchedulerEvent(n3));
     Allocation allocation5 =
-        fs.allocate(appAttemptId1, emptyAsk, emptyId,
+        fs.allocate(appAttemptId1, emptyAsk, null, emptyId,
             null, null, NULL_UPDATE_REQUESTS);
     Assert.assertEquals("allocation5", 0, allocation5.getContainers().size());
 
     fs.handle(new NodeUpdateSchedulerEvent(n4));
     Allocation allocation6 =
-        fs.allocate(appAttemptId1, emptyAsk, emptyId,
+        fs.allocate(appAttemptId1, emptyAsk, null, emptyId,
             null, null, NULL_UPDATE_REQUESTS);
     Assert.assertEquals("allocation6", 1, allocation6.getContainers().size());
 
@@ -1072,14 +1072,14 @@ public class TestFifoScheduler {
     List<ResourceRequest> ask1 = new ArrayList<ResourceRequest>();
     ask1.add(BuilderUtils.newResourceRequest(BuilderUtils.newPriority(0),
         ResourceRequest.ANY, BuilderUtils.newResource(GB, 1), 1));
-    fs.allocate(appAttemptId1, ask1, emptyId,
+    fs.allocate(appAttemptId1, ask1, null, emptyId,
         null, null, NULL_UPDATE_REQUESTS);
 
     // Ask for a 2 GB container for app 2
     List<ResourceRequest> ask2 = new ArrayList<ResourceRequest>();
     ask2.add(BuilderUtils.newResourceRequest(BuilderUtils.newPriority(0),
         ResourceRequest.ANY, BuilderUtils.newResource(2 * GB, 1), 1));
-    fs.allocate(appAttemptId2, ask2, emptyId,
+    fs.allocate(appAttemptId2, ask2, null, emptyId,
         null, null, NULL_UPDATE_REQUESTS);
 
     // Trigger container assignment
@@ -1087,13 +1087,13 @@ public class TestFifoScheduler {
 
     // Get the allocation for the applications and verify headroom
     Allocation allocation1 =
-        fs.allocate(appAttemptId1, emptyAsk, emptyId,
+        fs.allocate(appAttemptId1, emptyAsk, null, emptyId,
             null, null, NULL_UPDATE_REQUESTS);
     Assert.assertEquals("Allocation headroom", 1 * GB, allocation1
         .getResourceLimit().getMemorySize());
 
     Allocation allocation2 =
-        fs.allocate(appAttemptId2, emptyAsk, emptyId,
+        fs.allocate(appAttemptId2, emptyAsk, null, emptyId,
             null, null, NULL_UPDATE_REQUESTS);
     Assert.assertEquals("Allocation headroom", 1 * GB, allocation2
         .getResourceLimit().getMemorySize());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38af2379/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/TestSingleConstraintAppPlacementAllocator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/TestSingleConstraintAppPlacementAllocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/TestSingleConstraintAppPlacementAllocator.java
new file mode 100644
index 0000000..479d2c1
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/TestSingleConstraintAppPlacementAllocator.java
@@ -0,0 +1,403 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement;
+
+import com.google.common.collect.ImmutableSet;
+import org.apache.hadoop.yarn.api.records.ExecutionType;
+import org.apache.hadoop.yarn.api.records.ExecutionTypeRequest;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceSizing;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraints;
+import org.apache.hadoop.yarn.exceptions.SchedulerInvalidResoureRequestException;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.SchedulingMode;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.TestUtils;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.AllocationTagsManager;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.InvalidAllocationTagsQueryException;
+import org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mockito;
+
+import java.util.function.LongBinaryOperator;
+
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+/**
+ * Test behaviors of single constraint app placement allocator.
+ */
+public class TestSingleConstraintAppPlacementAllocator {
+  private AppSchedulingInfo appSchedulingInfo;
+  private AllocationTagsManager spyAllocationTagsManager;
+  private RMContext rmContext;
+  private SchedulerRequestKey schedulerRequestKey;
+  private SingleConstraintAppPlacementAllocator allocator;
+
+  @Before
+  public void setup() throws Exception {
+    // stub app scheduling info.
+    appSchedulingInfo = mock(AppSchedulingInfo.class);
+    when(appSchedulingInfo.getApplicationId()).thenReturn(
+        TestUtils.getMockApplicationId(1));
+    when(appSchedulingInfo.getApplicationAttemptId()).thenReturn(
+        TestUtils.getMockApplicationAttemptId(1, 1));
+
+    // stub RMContext
+    rmContext = TestUtils.getMockRMContext();
+
+    // Create allocation tags manager
+    AllocationTagsManager allocationTagsManager = new AllocationTagsManager(
+        rmContext);
+    spyAllocationTagsManager = spy(allocationTagsManager);
+    schedulerRequestKey = new SchedulerRequestKey(Priority.newInstance(1), 2L,
+        TestUtils.getMockContainerId(1, 1));
+    rmContext.setAllocationTagsManager(spyAllocationTagsManager);
+
+    // Create allocator
+    allocator = new SingleConstraintAppPlacementAllocator();
+    allocator.initialize(appSchedulingInfo, schedulerRequestKey, rmContext);
+  }
+
+  private void assertValidSchedulingRequest(
+      SchedulingRequest schedulingRequest) {
+    // Create allocator to avoid fields polluted by previous runs
+    allocator = new SingleConstraintAppPlacementAllocator();
+    allocator.initialize(appSchedulingInfo, schedulerRequestKey, rmContext);
+    allocator.updatePendingAsk(schedulerRequestKey, schedulingRequest, false);
+  }
+
+  private void assertInvalidSchedulingRequest(
+      SchedulingRequest schedulingRequest, boolean recreateAllocator) {
+    try {
+      // Create allocator
+      if (recreateAllocator) {
+        allocator = new SingleConstraintAppPlacementAllocator();
+        allocator.initialize(appSchedulingInfo, schedulerRequestKey, rmContext);
+      }
+      allocator.updatePendingAsk(schedulerRequestKey, schedulingRequest, false);
+    } catch (SchedulerInvalidResoureRequestException e) {
+      // Expected
+      return;
+    }
+    Assert.fail(
+        "Expect failure for schedulingRequest=" + schedulingRequest.toString());
+  }
+
+  @Test
+  public void testSchedulingRequestValidation() {
+    // Valid
+    assertValidSchedulingRequest(SchedulingRequest.newBuilder().executionType(
+        ExecutionTypeRequest.newInstance(ExecutionType.GUARANTEED))
+        .allocationRequestId(10L).priority(Priority.newInstance(1))
+        .placementConstraintExpression(PlacementConstraints
+            .targetCardinality(PlacementConstraints.NODE, 0, 1,
+                PlacementConstraints.PlacementTargets
+                    .allocationTagToIntraApp("mapper", "reducer"),
+                PlacementConstraints.PlacementTargets.nodePartition(""))
+            .build()).resourceSizing(
+            ResourceSizing.newInstance(1, Resource.newInstance(1024, 1)))
+        .build());
+    Assert.assertEquals(ImmutableSet.of("mapper", "reducer"),
+        allocator.getTargetAllocationTags());
+    Assert.assertEquals("", allocator.getTargetNodePartition());
+
+    // Valid (with partition)
+    assertValidSchedulingRequest(SchedulingRequest.newBuilder().executionType(
+        ExecutionTypeRequest.newInstance(ExecutionType.GUARANTEED))
+        .allocationRequestId(10L).priority(Priority.newInstance(1))
+        .placementConstraintExpression(PlacementConstraints
+            .targetCardinality(PlacementConstraints.NODE, 0, 1,
+                PlacementConstraints.PlacementTargets
+                    .allocationTagToIntraApp("mapper", "reducer"),
+                PlacementConstraints.PlacementTargets.nodePartition("x"))
+            .build()).resourceSizing(
+            ResourceSizing.newInstance(1, Resource.newInstance(1024, 1)))
+        .build());
+    Assert.assertEquals(ImmutableSet.of("mapper", "reducer"),
+        allocator.getTargetAllocationTags());
+    Assert.assertEquals("x", allocator.getTargetNodePartition());
+
+    // Valid (without specifying node partition)
+    assertValidSchedulingRequest(SchedulingRequest.newBuilder().executionType(
+        ExecutionTypeRequest.newInstance(ExecutionType.GUARANTEED))
+        .allocationRequestId(10L).priority(Priority.newInstance(1))
+        .placementConstraintExpression(PlacementConstraints
+            .targetCardinality(PlacementConstraints.NODE, 0, 1,
+                PlacementConstraints.PlacementTargets
+                    .allocationTagToIntraApp("mapper", "reducer")).build())
+        .resourceSizing(
+            ResourceSizing.newInstance(1, Resource.newInstance(1024, 1)))
+        .build());
+    Assert.assertEquals(ImmutableSet.of("mapper", "reducer"),
+        allocator.getTargetAllocationTags());
+    Assert.assertEquals("", allocator.getTargetNodePartition());
+
+    // Valid (with application Id target)
+    assertValidSchedulingRequest(SchedulingRequest.newBuilder().executionType(
+        ExecutionTypeRequest.newInstance(ExecutionType.GUARANTEED))
+        .allocationRequestId(10L).priority(Priority.newInstance(1))
+        .placementConstraintExpression(PlacementConstraints
+            .targetCardinality(PlacementConstraints.NODE, 0, 1,
+                PlacementConstraints.PlacementTargets
+                    .allocationTagToIntraApp("mapper", "reducer")).build())
+        .resourceSizing(
+            ResourceSizing.newInstance(1, Resource.newInstance(1024, 1)))
+        .build());
+    // Allocation tags should not include application Id
+    Assert.assertEquals(ImmutableSet.of("mapper", "reducer"),
+        allocator.getTargetAllocationTags());
+    Assert.assertEquals("", allocator.getTargetNodePartition());
+
+    // Invalid (without sizing)
+    assertInvalidSchedulingRequest(SchedulingRequest.newBuilder().executionType(
+        ExecutionTypeRequest.newInstance(ExecutionType.GUARANTEED))
+        .allocationRequestId(10L).priority(Priority.newInstance(1))
+        .placementConstraintExpression(PlacementConstraints
+            .targetCardinality(PlacementConstraints.NODE, 0, 1,
+                PlacementConstraints.PlacementTargets
+                    .allocationTagToIntraApp("mapper", "reducer")).build())
+        .build(), true);
+
+    // Invalid (without target tags)
+    assertInvalidSchedulingRequest(SchedulingRequest.newBuilder().executionType(
+        ExecutionTypeRequest.newInstance(ExecutionType.GUARANTEED))
+        .allocationRequestId(10L).priority(Priority.newInstance(1))
+        .placementConstraintExpression(PlacementConstraints
+            .targetCardinality(PlacementConstraints.NODE, 0, 1).build())
+        .build(), true);
+
+    // Invalid (with multiple allocation tags expression specified)
+    assertInvalidSchedulingRequest(SchedulingRequest.newBuilder().executionType(
+        ExecutionTypeRequest.newInstance(ExecutionType.GUARANTEED))
+        .allocationRequestId(10L).priority(Priority.newInstance(1))
+        .placementConstraintExpression(PlacementConstraints
+            .targetCardinality(PlacementConstraints.NODE, 0, 1,
+                PlacementConstraints.PlacementTargets
+                    .allocationTagToIntraApp("mapper"),
+                PlacementConstraints.PlacementTargets
+                    .allocationTagToIntraApp("reducer"),
+                PlacementConstraints.PlacementTargets.nodePartition(""))
+            .build()).resourceSizing(
+            ResourceSizing.newInstance(1, Resource.newInstance(1024, 1)))
+        .build(), true);
+
+    // Invalid (with multiple node partition target expression specified)
+    assertInvalidSchedulingRequest(SchedulingRequest.newBuilder().executionType(
+        ExecutionTypeRequest.newInstance(ExecutionType.GUARANTEED))
+        .allocationRequestId(10L).priority(Priority.newInstance(1))
+        .placementConstraintExpression(PlacementConstraints
+            .targetCardinality(PlacementConstraints.NODE, 0, 1,
+                PlacementConstraints.PlacementTargets
+                    .allocationTagToIntraApp("mapper"),
+                PlacementConstraints.PlacementTargets
+                    .allocationTagToIntraApp(""),
+                PlacementConstraints.PlacementTargets.nodePartition("x"))
+            .build()).resourceSizing(
+            ResourceSizing.newInstance(1, Resource.newInstance(1024, 1)))
+        .build(), true);
+
+    // Invalid (not anti-affinity cardinality)
+    assertInvalidSchedulingRequest(SchedulingRequest.newBuilder().executionType(
+        ExecutionTypeRequest.newInstance(ExecutionType.GUARANTEED))
+        .allocationRequestId(10L).priority(Priority.newInstance(1))
+        .placementConstraintExpression(PlacementConstraints
+            .targetCardinality(PlacementConstraints.NODE, 1, 2,
+                PlacementConstraints.PlacementTargets
+                    .allocationTagToIntraApp("mapper"),
+                PlacementConstraints.PlacementTargets.nodePartition(""))
+            .build()).resourceSizing(
+            ResourceSizing.newInstance(1, Resource.newInstance(1024, 1)))
+        .build(), true);
+
+    // Invalid (not anti-affinity cardinality)
+    assertInvalidSchedulingRequest(SchedulingRequest.newBuilder().executionType(
+        ExecutionTypeRequest.newInstance(ExecutionType.GUARANTEED))
+        .allocationRequestId(10L).priority(Priority.newInstance(1))
+        .placementConstraintExpression(PlacementConstraints
+            .targetCardinality(PlacementConstraints.NODE, 0, 2,
+                PlacementConstraints.PlacementTargets
+                    .allocationTagToIntraApp("mapper"),
+                PlacementConstraints.PlacementTargets.nodePartition(""))
+            .build()).resourceSizing(
+            ResourceSizing.newInstance(1, Resource.newInstance(1024, 1)))
+        .build(), true);
+
+    // Invalid (not NODE scope)
+    assertInvalidSchedulingRequest(SchedulingRequest.newBuilder().executionType(
+        ExecutionTypeRequest.newInstance(ExecutionType.GUARANTEED))
+        .allocationRequestId(10L).priority(Priority.newInstance(1))
+        .placementConstraintExpression(PlacementConstraints
+            .targetCardinality(PlacementConstraints.RACK, 0, 1,
+                PlacementConstraints.PlacementTargets
+                    .allocationTagToIntraApp("mapper", "reducer"),
+                PlacementConstraints.PlacementTargets.nodePartition(""))
+            .build()).resourceSizing(
+            ResourceSizing.newInstance(1, Resource.newInstance(1024, 1)))
+        .build(), true);
+
+    // Invalid (not GUARANTEED)
+    assertInvalidSchedulingRequest(SchedulingRequest.newBuilder().executionType(
+        ExecutionTypeRequest.newInstance(ExecutionType.OPPORTUNISTIC))
+        .allocationRequestId(10L).priority(Priority.newInstance(1))
+        .placementConstraintExpression(PlacementConstraints
+            .targetCardinality(PlacementConstraints.NODE, 0, 1,
+                PlacementConstraints.PlacementTargets
+                    .allocationTagToIntraApp("mapper", "reducer"),
+                PlacementConstraints.PlacementTargets.nodePartition(""))
+            .build()).resourceSizing(
+            ResourceSizing.newInstance(1, Resource.newInstance(1024, 1)))
+        .build(), true);
+  }
+
+  @Test
+  public void testSchedulingRequestUpdate() {
+    SchedulingRequest schedulingRequest =
+        SchedulingRequest.newBuilder().executionType(
+            ExecutionTypeRequest.newInstance(ExecutionType.GUARANTEED))
+            .allocationRequestId(10L).priority(Priority.newInstance(1))
+            .placementConstraintExpression(PlacementConstraints
+                .targetCardinality(PlacementConstraints.NODE, 0, 1,
+                    PlacementConstraints.PlacementTargets
+                        .allocationTagToIntraApp("mapper", "reducer"),
+                    PlacementConstraints.PlacementTargets.nodePartition(""))
+                .build()).resourceSizing(
+            ResourceSizing.newInstance(1, Resource.newInstance(1024, 1)))
+            .build();
+    allocator.updatePendingAsk(schedulerRequestKey, schedulingRequest, false);
+
+    // Update allocator with exactly same scheduling request, should succeeded.
+    allocator.updatePendingAsk(schedulerRequestKey, schedulingRequest, false);
+
+    // Update allocator with scheduling request different at #allocations,
+    // should succeeded.
+    schedulingRequest.getResourceSizing().setNumAllocations(10);
+    allocator.updatePendingAsk(schedulerRequestKey, schedulingRequest, false);
+
+    // Update allocator with scheduling request different at resource,
+    // should failed.
+    schedulingRequest.getResourceSizing().setResources(
+        Resource.newInstance(2048, 1));
+    assertInvalidSchedulingRequest(schedulingRequest, false);
+
+    // Update allocator with a different placement target (allocator tag),
+    // should failed
+    schedulingRequest = SchedulingRequest.newBuilder().executionType(
+        ExecutionTypeRequest.newInstance(ExecutionType.GUARANTEED))
+        .allocationRequestId(10L).priority(Priority.newInstance(1))
+        .placementConstraintExpression(PlacementConstraints
+            .targetCardinality(PlacementConstraints.NODE, 0, 1,
+                PlacementConstraints.PlacementTargets
+                    .allocationTagToIntraApp("mapper"),
+                PlacementConstraints.PlacementTargets.nodePartition(""))
+            .build()).resourceSizing(
+            ResourceSizing.newInstance(1, Resource.newInstance(1024, 1)))
+        .build();
+    assertInvalidSchedulingRequest(schedulingRequest, false);
+
+    // Update allocator with recover == true
+    int existingNumAllocations =
+        allocator.getSchedulingRequest().getResourceSizing()
+            .getNumAllocations();
+    schedulingRequest = SchedulingRequest.newBuilder().executionType(
+        ExecutionTypeRequest.newInstance(ExecutionType.GUARANTEED))
+        .allocationRequestId(10L).priority(Priority.newInstance(1))
+        .placementConstraintExpression(PlacementConstraints
+            .targetCardinality(PlacementConstraints.NODE, 0, 1,
+                PlacementConstraints.PlacementTargets
+                    .allocationTagToIntraApp("mapper", "reducer"),
+                PlacementConstraints.PlacementTargets.nodePartition(""))
+            .build()).resourceSizing(
+            ResourceSizing.newInstance(1, Resource.newInstance(1024, 1)))
+        .build();
+    allocator.updatePendingAsk(schedulerRequestKey, schedulingRequest, true);
+    Assert.assertEquals(existingNumAllocations + 1,
+        allocator.getSchedulingRequest().getResourceSizing()
+            .getNumAllocations());
+  }
+
+  @Test
+  public void testFunctionality() throws InvalidAllocationTagsQueryException {
+    SchedulingRequest schedulingRequest =
+        SchedulingRequest.newBuilder().executionType(
+            ExecutionTypeRequest.newInstance(ExecutionType.GUARANTEED))
+            .allocationRequestId(10L).priority(Priority.newInstance(1))
+            .placementConstraintExpression(PlacementConstraints
+                .targetCardinality(PlacementConstraints.NODE, 0, 1,
+                    PlacementConstraints.PlacementTargets
+                        .allocationTagToIntraApp("mapper", "reducer"),
+                    PlacementConstraints.PlacementTargets.nodePartition(""))
+                .build()).resourceSizing(
+            ResourceSizing.newInstance(1, Resource.newInstance(1024, 1)))
+            .build();
+    allocator.updatePendingAsk(schedulerRequestKey, schedulingRequest, false);
+    allocator.canAllocate(NodeType.NODE_LOCAL,
+        TestUtils.getMockNode("host1", "/rack1", 123, 1024));
+    verify(spyAllocationTagsManager, Mockito.times(1)).getNodeCardinalityByOp(
+        eq(NodeId.fromString("host1:123")), eq(TestUtils.getMockApplicationId(1)),
+        eq(ImmutableSet.of("mapper", "reducer")),
+        any(LongBinaryOperator.class));
+
+    allocator = new SingleConstraintAppPlacementAllocator();
+    allocator.initialize(appSchedulingInfo, schedulerRequestKey, rmContext);
+    // Valid (with partition)
+    schedulingRequest = SchedulingRequest.newBuilder().executionType(
+        ExecutionTypeRequest.newInstance(ExecutionType.GUARANTEED))
+        .allocationRequestId(10L).priority(Priority.newInstance(1))
+        .placementConstraintExpression(PlacementConstraints
+            .targetCardinality(PlacementConstraints.NODE, 0, 1,
+                PlacementConstraints.PlacementTargets
+                    .allocationTagToIntraApp("mapper", "reducer"),
+                PlacementConstraints.PlacementTargets.nodePartition("x"))
+            .build()).resourceSizing(
+            ResourceSizing.newInstance(1, Resource.newInstance(1024, 1)))
+        .build();
+    allocator.updatePendingAsk(schedulerRequestKey, schedulingRequest, false);
+    allocator.canAllocate(NodeType.NODE_LOCAL,
+        TestUtils.getMockNode("host1", "/rack1", 123, 1024));
+    verify(spyAllocationTagsManager, Mockito.atLeast(1)).getNodeCardinalityByOp(
+        eq(NodeId.fromString("host1:123")),
+        eq(TestUtils.getMockApplicationId(1)), eq(ImmutableSet
+            .of("mapper", "reducer")), any(LongBinaryOperator.class));
+
+    SchedulerNode node1 = mock(SchedulerNode.class);
+    when(node1.getPartition()).thenReturn("x");
+    when(node1.getNodeID()).thenReturn(NodeId.fromString("host1:123"));
+
+    Assert.assertTrue(allocator
+        .precheckNode(node1, SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY));
+
+    SchedulerNode node2 = mock(SchedulerNode.class);
+    when(node1.getPartition()).thenReturn("");
+    when(node1.getNodeID()).thenReturn(NodeId.fromString("host2:123"));
+    Assert.assertFalse(allocator
+        .precheckNode(node2, SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY));
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[17/32] hadoop git commit: YARN-7682. Expose canSatisfyConstraints utility function to validate a placement against a constraint. (Panagiotis Garefalakis via asuresh)

Posted by as...@apache.org.
YARN-7682. Expose canSatisfyConstraints utility function to validate a placement against a constraint. (Panagiotis Garefalakis via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bdba01f7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bdba01f7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bdba01f7

Branch: refs/heads/trunk
Commit: bdba01f73b58d2228e808c6f61377f101b6bac1c
Parents: a52d11f
Author: Arun Suresh <as...@apache.org>
Authored: Wed Jan 3 08:00:50 2018 -0800
Committer: Arun Suresh <as...@apache.org>
Committed: Wed Jan 31 01:30:17 2018 -0800

----------------------------------------------------------------------
 .../constraint/PlacementConstraintsUtil.java    | 132 +++++++++
 .../algorithm/DefaultPlacementAlgorithm.java    |  55 +---
 .../TestPlacementConstraintsUtil.java           | 287 +++++++++++++++++++
 .../constraint/TestPlacementProcessor.java      | 204 +++++++++++--
 4 files changed, 601 insertions(+), 77 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bdba01f7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java
new file mode 100644
index 0000000..956a3c9
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java
@@ -0,0 +1,132 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint;
+
+import java.util.Iterator;
+import java.util.Set;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TargetExpression;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TargetExpression.TargetType;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.AbstractConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.SingleConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraintTransformations.SingleConstraintTransformer;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraints;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.algorithm.DefaultPlacementAlgorithm;
+
+/**
+ * This class contains various static methods used by the Placement Algorithms
+ * to simplify constrained placement.
+ * (see also {@link DefaultPlacementAlgorithm}).
+ */
+@Public
+@Unstable
+public final class PlacementConstraintsUtil {
+
+  // Suppresses default constructor, ensuring non-instantiability.
+  private PlacementConstraintsUtil() {
+  }
+
+  /**
+   * Returns true if **single** application constraint with associated
+   * allocationTags and scope is satisfied by a specific scheduler Node.
+   *
+   * @param appId the application id
+   * @param sc the placement constraint
+   * @param te the target expression
+   * @param node the scheduler node
+   * @param tm the allocation tags store
+   * @return true if single application constraint is satisfied by node
+   * @throws InvalidAllocationTagsQueryException
+   */
+  private static boolean canSatisfySingleConstraintExpression(
+      ApplicationId appId, SingleConstraint sc, TargetExpression te,
+      SchedulerNode node, AllocationTagsManager tm)
+      throws InvalidAllocationTagsQueryException {
+    long minScopeCardinality = 0;
+    long maxScopeCardinality = 0;
+    if (sc.getScope() == PlacementConstraints.NODE) {
+      minScopeCardinality = tm.getNodeCardinalityByOp(node.getNodeID(), appId,
+          te.getTargetValues(), Long::max);
+      maxScopeCardinality = tm.getNodeCardinalityByOp(node.getNodeID(), appId,
+          te.getTargetValues(), Long::min);
+    } else if (sc.getScope() == PlacementConstraints.RACK) {
+      minScopeCardinality = tm.getRackCardinalityByOp(node.getRackName(), appId,
+          te.getTargetValues(), Long::max);
+      maxScopeCardinality = tm.getRackCardinalityByOp(node.getRackName(), appId,
+          te.getTargetValues(), Long::min);
+    }
+    // Make sure Anti-affinity satisfies hard upper limit
+    maxScopeCardinality = sc.getMaxCardinality() == 0 ? maxScopeCardinality - 1
+        : maxScopeCardinality;
+
+    return (minScopeCardinality >= sc.getMinCardinality()
+        && maxScopeCardinality < sc.getMaxCardinality());
+  }
+
+  /**
+   * Returns true if all application constraints with associated allocationTags
+   * are **currently** satisfied by a specific scheduler Node.
+   * To do so the method retrieves and goes through all application constraint
+   * expressions and checks if the specific allocation is between the allowed
+   * min-max cardinality values under the constraint scope (Node/Rack/etc).
+   *
+   * @param appId the application id
+   * @param allocationTags the allocation tags set
+   * @param node the scheduler node
+   * @param pcm the placement constraints store
+   * @param tagsManager the allocation tags store
+   * @return true if all application constraints are satisfied by node
+   * @throws InvalidAllocationTagsQueryException
+   */
+  public static boolean canSatisfyConstraints(ApplicationId appId,
+      Set<String> allocationTags, SchedulerNode node,
+      PlacementConstraintManager pcm, AllocationTagsManager tagsManager)
+      throws InvalidAllocationTagsQueryException {
+    PlacementConstraint constraint = pcm.getConstraint(appId, allocationTags);
+    if (constraint == null) {
+      return true;
+    }
+    // Transform to SimpleConstraint
+    SingleConstraintTransformer singleTransformer =
+        new SingleConstraintTransformer(constraint);
+    constraint = singleTransformer.transform();
+    AbstractConstraint sConstraintExpr = constraint.getConstraintExpr();
+    SingleConstraint single = (SingleConstraint) sConstraintExpr;
+    // Iterate through TargetExpressions
+    Iterator<TargetExpression> expIt = single.getTargetExpressions().iterator();
+    while (expIt.hasNext()) {
+      TargetExpression currentExp = expIt.next();
+      // Supporting AllocationTag Expressions for now
+      if (currentExp.getTargetType().equals(TargetType.ALLOCATION_TAG)) {
+        // Check if conditions are met
+        if (!canSatisfySingleConstraintExpression(appId, single, currentExp,
+            node, tagsManager)) {
+          return false;
+        }
+      }
+    }
+    // return true if all targetExpressions are satisfied
+    return true;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bdba01f7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/DefaultPlacementAlgorithm.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/DefaultPlacementAlgorithm.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/DefaultPlacementAlgorithm.java
index 395c156..9ed9ab1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/DefaultPlacementAlgorithm.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/DefaultPlacementAlgorithm.java
@@ -19,19 +19,16 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.algor
 
 import java.util.Iterator;
 import java.util.List;
-import java.util.Set;
 
 import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.SchedulingRequest;
-import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
-import org.apache.hadoop.yarn.api.resource.PlacementConstraintTransformations;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.AllocationTagsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.InvalidAllocationTagsQueryException;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.PlacementConstraintManager;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.PlacementConstraintsUtil;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api.ConstraintPlacementAlgorithm;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api.ConstraintPlacementAlgorithmInput;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api.ConstraintPlacementAlgorithmOutput;
@@ -65,58 +62,14 @@ public class DefaultPlacementAlgorithm implements ConstraintPlacementAlgorithm {
             .getNodes(filter);
   }
 
-  /**
-   * TODO: Method will be moved to PlacementConstraintsUtil class (YARN-7682)
-   * @param applicationId
-   * @param allocationTags
-   * @param nodeId
-   * @param tagsManager
-   * @return boolean
-   * @throws InvalidAllocationTagsQueryException
-   */
-  public boolean canAssign(ApplicationId applicationId,
-      Set<String> allocationTags, NodeId nodeId,
-      AllocationTagsManager tagsManager)
-      throws InvalidAllocationTagsQueryException {
-    PlacementConstraint constraint =
-        constraintManager.getConstraint(applicationId, allocationTags);
-    if (constraint == null) {
-      return true;
-    }
-    // TODO: proper transformations
-    // Currently works only for simple anti-affinity
-    // NODE scope target expressions
-    PlacementConstraintTransformations.SpecializedConstraintTransformer transformer =
-        new PlacementConstraintTransformations.SpecializedConstraintTransformer(
-            constraint);
-    PlacementConstraint transform = transformer.transform();
-    PlacementConstraint.TargetConstraint targetConstraint =
-        (PlacementConstraint.TargetConstraint) transform.getConstraintExpr();
-    // Assume a single target expression tag;
-    // The Sample Algorithm assumes a constraint will always be a simple
-    // Target Constraint with a single entry in the target set.
-    // As mentioned in the class javadoc - This algorithm should be
-    // used mostly for testing and validating end-2-end workflow.
-    String targetTag = targetConstraint.getTargetExpressions().iterator().next()
-        .getTargetValues().iterator().next();
-    // TODO: Assuming anti-affinity constraint
-    long nodeCardinality =
-        tagsManager.getNodeCardinality(nodeId, applicationId, targetTag);
-    if (nodeCardinality != 0) {
-      return false;
-    }
-    // return true if it is a valid placement
-    return true;
-  }
-
   public boolean attemptPlacementOnNode(ApplicationId appId,
       SchedulingRequest schedulingRequest, SchedulerNode schedulerNode)
       throws InvalidAllocationTagsQueryException {
     int numAllocs = schedulingRequest.getResourceSizing().getNumAllocations();
     if (numAllocs > 0) {
-      if (canAssign(appId,
-          schedulingRequest.getAllocationTags(), schedulerNode.getNodeID(),
-          tagsManager)) {
+      if (PlacementConstraintsUtil.canSatisfyConstraints(appId,
+          schedulingRequest.getAllocationTags(), schedulerNode,
+          constraintManager, tagsManager)) {
         return true;
       }
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bdba01f7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementConstraintsUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementConstraintsUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementConstraintsUtil.java
new file mode 100644
index 0000000..7492233
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementConstraintsUtil.java
@@ -0,0 +1,287 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint;
+
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.NODE;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.RACK;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.targetIn;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.targetNotIn;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.PlacementTargets.allocationTag;
+
+import java.util.AbstractMap;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraints;
+import org.apache.hadoop.yarn.server.resourcemanager.MockNodes;
+import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.TestUtils;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode;
+import org.apache.hadoop.yarn.server.utils.BuilderUtils;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import com.google.common.collect.ImmutableSet;
+
+/**
+ * Test the PlacementConstraint Utility class functionality.
+ */
+public class TestPlacementConstraintsUtil {
+
+  private List<RMNode> rmNodes;
+  private RMContext rmContext;
+  private static final int GB = 1024;
+  private ApplicationId appId1;
+  private PlacementConstraint c1, c2, c3, c4;
+  private Set<String> sourceTag1, sourceTag2;
+  private Map<Set<String>, PlacementConstraint> constraintMap1, constraintMap2;
+
+  @Before
+  public void setup() {
+    MockRM rm = new MockRM();
+    rm.start();
+    MockNodes.resetHostIds();
+    rmNodes = MockNodes.newNodes(2, 2, Resource.newInstance(4096, 4));
+    for (RMNode rmNode : rmNodes) {
+      rm.getRMContext().getRMNodes().putIfAbsent(rmNode.getNodeID(), rmNode);
+    }
+    rmContext = rm.getRMContext();
+
+    // Build appIDs, constraints, source tags, and constraint map.
+    long ts = System.currentTimeMillis();
+    appId1 = BuilderUtils.newApplicationId(ts, 123);
+
+    c1 = PlacementConstraints.build(targetIn(NODE, allocationTag("hbase-m")));
+    c2 = PlacementConstraints.build(targetIn(RACK, allocationTag("hbase-rs")));
+    c3 = PlacementConstraints
+        .build(targetNotIn(NODE, allocationTag("hbase-m")));
+    c4 = PlacementConstraints
+        .build(targetNotIn(RACK, allocationTag("hbase-rs")));
+
+    sourceTag1 = new HashSet<>(Arrays.asList("spark"));
+    sourceTag2 = new HashSet<>(Arrays.asList("zk"));
+
+    constraintMap1 = Stream
+        .of(new AbstractMap.SimpleEntry<>(sourceTag1, c1),
+            new AbstractMap.SimpleEntry<>(sourceTag2, c2))
+        .collect(Collectors.toMap(AbstractMap.SimpleEntry::getKey,
+            AbstractMap.SimpleEntry::getValue));
+    constraintMap2 = Stream
+        .of(new AbstractMap.SimpleEntry<>(sourceTag1, c3),
+            new AbstractMap.SimpleEntry<>(sourceTag2, c4))
+        .collect(Collectors.toMap(AbstractMap.SimpleEntry::getKey,
+            AbstractMap.SimpleEntry::getValue));
+  }
+
+  @Test
+  public void testNodeAffinityAssignment()
+      throws InvalidAllocationTagsQueryException {
+    PlacementConstraintManagerService pcm =
+        new MemoryPlacementConstraintManager();
+    AllocationTagsManager tm = new AllocationTagsManager(rmContext);
+    // Register App1 with affinity constraint map
+    pcm.registerApplication(appId1, constraintMap1);
+    // No containers are running so all 'zk' and 'spark' allocations should fail
+    // on every cluster NODE
+    Iterator<RMNode> nodeIterator = rmNodes.iterator();
+    while (nodeIterator.hasNext()) {
+      RMNode currentNode = nodeIterator.next();
+      FiCaSchedulerNode schedulerNode = TestUtils.getMockNode(
+          currentNode.getHostName(), currentNode.getRackName(), 123, 4 * GB);
+      Assert.assertFalse(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
+          sourceTag1, schedulerNode, pcm, tm));
+      Assert.assertFalse(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
+          sourceTag2, schedulerNode, pcm, tm));
+    }
+    /**
+     * Now place container:
+     * Node0:123 (Rack1):
+     *    container_app1_1 (hbase-m)
+     */
+    RMNode n0_r1 = rmNodes.get(0);
+    RMNode n1_r1 = rmNodes.get(1);
+    RMNode n2_r2 = rmNodes.get(2);
+    RMNode n3_r2 = rmNodes.get(3);
+    FiCaSchedulerNode schedulerNode0 = TestUtils
+        .getMockNode(n0_r1.getHostName(), n0_r1.getRackName(), 123, 4 * GB);
+    FiCaSchedulerNode schedulerNode1 = TestUtils
+        .getMockNode(n1_r1.getHostName(), n1_r1.getRackName(), 123, 4 * GB);
+    FiCaSchedulerNode schedulerNode2 = TestUtils
+        .getMockNode(n2_r2.getHostName(), n2_r2.getRackName(), 123, 4 * GB);
+    FiCaSchedulerNode schedulerNode3 = TestUtils
+        .getMockNode(n3_r2.getHostName(), n3_r2.getRackName(), 123, 4 * GB);
+    // 1 Containers on node 0 with allocationTag 'hbase-m'
+    ContainerId hbase_m = ContainerId
+        .newContainerId(ApplicationAttemptId.newInstance(appId1, 0), 0);
+    tm.addContainer(n0_r1.getNodeID(), hbase_m, ImmutableSet.of("hbase-m"));
+
+    // 'spark' placement on Node0 should now SUCCEED
+    Assert.assertTrue(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
+        sourceTag1, schedulerNode0, pcm, tm));
+    // FAIL on the rest of the nodes
+    Assert.assertFalse(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
+        sourceTag1, schedulerNode1, pcm, tm));
+    Assert.assertFalse(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
+        sourceTag1, schedulerNode2, pcm, tm));
+    Assert.assertFalse(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
+        sourceTag1, schedulerNode3, pcm, tm));
+  }
+
+  @Test
+  public void testRackAffinityAssignment()
+      throws InvalidAllocationTagsQueryException {
+    PlacementConstraintManagerService pcm =
+        new MemoryPlacementConstraintManager();
+    AllocationTagsManager tm = new AllocationTagsManager(rmContext);
+    // Register App1 with affinity constraint map
+    pcm.registerApplication(appId1, constraintMap1);
+    /**
+     * Now place container:
+     * Node0:123 (Rack1):
+     *    container_app1_1 (hbase-rs)
+     */
+    RMNode n0_r1 = rmNodes.get(0);
+    RMNode n1_r1 = rmNodes.get(1);
+    RMNode n2_r2 = rmNodes.get(2);
+    RMNode n3_r2 = rmNodes.get(3);
+    // 1 Containers on Node0-Rack1 with allocationTag 'hbase-rs'
+    ContainerId hbase_m = ContainerId
+        .newContainerId(ApplicationAttemptId.newInstance(appId1, 0), 0);
+    tm.addContainer(n0_r1.getNodeID(), hbase_m, ImmutableSet.of("hbase-rs"));
+
+    FiCaSchedulerNode schedulerNode0 = TestUtils
+        .getMockNode(n0_r1.getHostName(), n0_r1.getRackName(), 123, 4 * GB);
+    FiCaSchedulerNode schedulerNode1 = TestUtils
+        .getMockNode(n1_r1.getHostName(), n1_r1.getRackName(), 123, 4 * GB);
+    FiCaSchedulerNode schedulerNode2 = TestUtils
+        .getMockNode(n2_r2.getHostName(), n2_r2.getRackName(), 123, 4 * GB);
+    FiCaSchedulerNode schedulerNode3 = TestUtils
+        .getMockNode(n3_r2.getHostName(), n3_r2.getRackName(), 123, 4 * GB);
+    // 'zk' placement on Rack1 should now SUCCEED
+    Assert.assertTrue(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
+        sourceTag2, schedulerNode0, pcm, tm));
+    Assert.assertTrue(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
+        sourceTag2, schedulerNode1, pcm, tm));
+
+    // FAIL on the rest of the RACKs
+    Assert.assertFalse(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
+        sourceTag2, schedulerNode2, pcm, tm));
+    Assert.assertFalse(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
+        sourceTag2, schedulerNode3, pcm, tm));
+  }
+
+  @Test
+  public void testNodeAntiAffinityAssignment()
+      throws InvalidAllocationTagsQueryException {
+    PlacementConstraintManagerService pcm =
+        new MemoryPlacementConstraintManager();
+    AllocationTagsManager tm = new AllocationTagsManager(rmContext);
+    // Register App1 with anti-affinity constraint map
+    pcm.registerApplication(appId1, constraintMap2);
+    /**
+     * place container:
+     * Node0:123 (Rack1):
+     *    container_app1_1 (hbase-m)
+     */
+    RMNode n0_r1 = rmNodes.get(0);
+    RMNode n1_r1 = rmNodes.get(1);
+    RMNode n2_r2 = rmNodes.get(2);
+    RMNode n3_r2 = rmNodes.get(3);
+    FiCaSchedulerNode schedulerNode0 = TestUtils
+        .getMockNode(n0_r1.getHostName(), n0_r1.getRackName(), 123, 4 * GB);
+    FiCaSchedulerNode schedulerNode1 = TestUtils
+        .getMockNode(n1_r1.getHostName(), n1_r1.getRackName(), 123, 4 * GB);
+    FiCaSchedulerNode schedulerNode2 = TestUtils
+        .getMockNode(n2_r2.getHostName(), n2_r2.getRackName(), 123, 4 * GB);
+    FiCaSchedulerNode schedulerNode3 = TestUtils
+        .getMockNode(n3_r2.getHostName(), n3_r2.getRackName(), 123, 4 * GB);
+    // 1 Containers on node 0 with allocationTag 'hbase-m'
+    ContainerId hbase_m = ContainerId
+        .newContainerId(ApplicationAttemptId.newInstance(appId1, 0), 0);
+    tm.addContainer(n0_r1.getNodeID(), hbase_m, ImmutableSet.of("hbase-m"));
+
+    // 'spark' placement on Node0 should now FAIL
+    Assert.assertFalse(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
+        sourceTag1, schedulerNode0, pcm, tm));
+    // SUCCEED on the rest of the nodes
+    Assert.assertTrue(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
+        sourceTag1, schedulerNode1, pcm, tm));
+    Assert.assertTrue(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
+        sourceTag1, schedulerNode2, pcm, tm));
+    Assert.assertTrue(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
+        sourceTag1, schedulerNode3, pcm, tm));
+  }
+
+  @Test
+  public void testRackAntiAffinityAssignment()
+      throws InvalidAllocationTagsQueryException {
+    AllocationTagsManager tm = new AllocationTagsManager(rmContext);
+    PlacementConstraintManagerService pcm =
+        new MemoryPlacementConstraintManager();
+    // Register App1 with anti-affinity constraint map
+    pcm.registerApplication(appId1, constraintMap2);
+    /**
+     * Place container:
+     * Node0:123 (Rack1):
+     *    container_app1_1 (hbase-rs)
+     */
+    RMNode n0_r1 = rmNodes.get(0);
+    RMNode n1_r1 = rmNodes.get(1);
+    RMNode n2_r2 = rmNodes.get(2);
+    RMNode n3_r2 = rmNodes.get(3);
+    // 1 Containers on Node0-Rack1 with allocationTag 'hbase-rs'
+    ContainerId hbase_m = ContainerId
+        .newContainerId(ApplicationAttemptId.newInstance(appId1, 0), 0);
+    tm.addContainer(n0_r1.getNodeID(), hbase_m, ImmutableSet.of("hbase-rs"));
+
+    FiCaSchedulerNode schedulerNode0 = TestUtils
+        .getMockNode(n0_r1.getHostName(), n0_r1.getRackName(), 123, 4 * GB);
+    FiCaSchedulerNode schedulerNode1 = TestUtils
+        .getMockNode(n1_r1.getHostName(), n1_r1.getRackName(), 123, 4 * GB);
+    FiCaSchedulerNode schedulerNode2 = TestUtils
+        .getMockNode(n2_r2.getHostName(), n2_r2.getRackName(), 123, 4 * GB);
+    FiCaSchedulerNode schedulerNode3 = TestUtils
+        .getMockNode(n3_r2.getHostName(), n3_r2.getRackName(), 123, 4 * GB);
+
+    // 'zk' placement on Rack1 should FAIL
+    Assert.assertFalse(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
+        sourceTag2, schedulerNode0, pcm, tm));
+    Assert.assertFalse(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
+        sourceTag2, schedulerNode1, pcm, tm));
+
+    // SUCCEED on the rest of the RACKs
+    Assert.assertTrue(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
+        sourceTag2, schedulerNode2, pcm, tm));
+    Assert.assertTrue(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
+        sourceTag2, schedulerNode3, pcm, tm));
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bdba01f7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementProcessor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementProcessor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementProcessor.java
index 87dd5b7..c260fe0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementProcessor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementProcessor.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.yarn.api.records.RejectionReason;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceSizing;
 import org.apache.hadoop.yarn.api.records.SchedulingRequest;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
 import org.apache.hadoop.yarn.api.resource.PlacementConstraints;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.Dispatcher;
@@ -48,16 +49,21 @@ import org.junit.Test;
 
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
+import java.util.Map;
 import java.util.Set;
 import java.util.stream.Collectors;
 
 import static java.lang.Thread.sleep;
 import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.NODE;
 import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.PlacementTargets.allocationTag;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.targetCardinality;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.targetIn;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.targetNotIn;
 
 /**
  * This tests end2end workflow of the constraint placement framework.
@@ -104,7 +110,7 @@ public class TestPlacementProcessor {
   }
 
   @Test(timeout = 300000)
-  public void testPlacement() throws Exception {
+  public void testAntiAffinityPlacement() throws Exception {
     HashMap<NodeId, MockNM> nodes = new HashMap<>();
     MockNM nm1 = new MockNM("h1:1234", 4096, rm.getResourceTrackerService());
     nodes.put(nm1.getNodeId(), nm1);
@@ -120,44 +126,174 @@ public class TestPlacementProcessor {
     nm4.registerNode();
 
     RMApp app1 = rm.submitApp(1 * GB, "app", "user", null, "default");
+    // Containers with allocationTag 'foo' are restricted to 1 per NODE
     MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm2,
-        Collections.singletonMap(
-            Collections.singleton("foo"),
+        Collections.singletonMap(Collections.singleton("foo"),
             PlacementConstraints.build(
-                PlacementConstraints.targetNotIn(NODE, allocationTag("foo")))
-        ));
+                PlacementConstraints.targetNotIn(NODE, allocationTag("foo")))));
     am1.addSchedulingRequest(
-        Arrays.asList(
-            schedulingRequest(1, 1, 1, 512, "foo"),
+        Arrays.asList(schedulingRequest(1, 1, 1, 512, "foo"),
             schedulingRequest(1, 2, 1, 512, "foo"),
             schedulingRequest(1, 3, 1, 512, "foo"),
-            schedulingRequest(1, 5, 1, 512, "foo"))
-    );
+            schedulingRequest(1, 5, 1, 512, "foo")));
     AllocateResponse allocResponse = am1.schedule(); // send the request
     List<Container> allocatedContainers = new ArrayList<>();
     allocatedContainers.addAll(allocResponse.getAllocatedContainers());
 
     // kick the scheduler
-
-    while (allocatedContainers.size() < 4) {
-      nm1.nodeHeartbeat(true);
-      nm2.nodeHeartbeat(true);
-      nm3.nodeHeartbeat(true);
-      nm4.nodeHeartbeat(true);
-      LOG.info("Waiting for containers to be created for app 1...");
-      sleep(1000);
-      allocResponse = am1.schedule();
-      allocatedContainers.addAll(allocResponse.getAllocatedContainers());
-    }
+    waitForContainerAllocation(nodes.values(), am1, allocatedContainers, 4);
 
     Assert.assertEquals(4, allocatedContainers.size());
-    Set<NodeId> nodeIds = allocatedContainers.stream()
-        .map(x -> x.getNodeId()).collect(Collectors.toSet());
-    // Ensure unique nodes
+    Set<NodeId> nodeIds = allocatedContainers.stream().map(x -> x.getNodeId())
+        .collect(Collectors.toSet());
+    // Ensure unique nodes (antiaffinity)
     Assert.assertEquals(4, nodeIds.size());
   }
 
   @Test(timeout = 300000)
+  public void testCardinalityPlacement() throws Exception {
+    HashMap<NodeId, MockNM> nodes = new HashMap<>();
+    MockNM nm1 = new MockNM("h1:1234", 4096, rm.getResourceTrackerService());
+    nodes.put(nm1.getNodeId(), nm1);
+    MockNM nm2 = new MockNM("h2:1234", 4096, rm.getResourceTrackerService());
+    nodes.put(nm2.getNodeId(), nm2);
+    MockNM nm3 = new MockNM("h3:1234", 4096, rm.getResourceTrackerService());
+    nodes.put(nm3.getNodeId(), nm3);
+    MockNM nm4 = new MockNM("h4:1234", 4096, rm.getResourceTrackerService());
+    nodes.put(nm4.getNodeId(), nm4);
+    nm1.registerNode();
+    nm2.registerNode();
+    nm3.registerNode();
+    nm4.registerNode();
+
+    RMApp app1 = rm.submitApp(1 * GB, "app", "user", null, "default");
+    // Containers with allocationTag 'foo' should not exceed 4 per NODE
+    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm2,
+        Collections.singletonMap(Collections.singleton("foo"),
+            PlacementConstraints.build(PlacementConstraints
+                .targetCardinality(NODE, 0, 4, allocationTag("foo")))));
+    am1.addSchedulingRequest(
+        Arrays.asList(schedulingRequest(1, 1, 1, 512, "foo"),
+            schedulingRequest(1, 2, 1, 512, "foo"),
+            schedulingRequest(1, 3, 1, 512, "foo"),
+            schedulingRequest(1, 4, 1, 512, "foo"),
+            schedulingRequest(1, 5, 1, 512, "foo"),
+            schedulingRequest(1, 6, 1, 512, "foo"),
+            schedulingRequest(1, 7, 1, 512, "foo"),
+            schedulingRequest(1, 8, 1, 512, "foo")));
+    AllocateResponse allocResponse = am1.schedule(); // send the request
+    List<Container> allocatedContainers = new ArrayList<>();
+    allocatedContainers.addAll(allocResponse.getAllocatedContainers());
+
+    // kick the scheduler
+    waitForContainerAllocation(nodes.values(), am1, allocatedContainers, 8);
+
+    Assert.assertEquals(8, allocatedContainers.size());
+    Map<NodeId, Long> nodeIdContainerIdMap =
+        allocatedContainers.stream().collect(
+            Collectors.groupingBy(c -> c.getNodeId(), Collectors.counting()));
+    // Ensure no more than 4 containers per node
+    for (NodeId n : nodeIdContainerIdMap.keySet()) {
+      Assert.assertTrue(nodeIdContainerIdMap.get(n) < 5);
+    }
+  }
+
+  @Test(timeout = 300000)
+  public void testAffinityPlacement() throws Exception {
+    HashMap<NodeId, MockNM> nodes = new HashMap<>();
+    MockNM nm1 = new MockNM("h1:1234", 4096, rm.getResourceTrackerService());
+    nodes.put(nm1.getNodeId(), nm1);
+    MockNM nm2 = new MockNM("h2:1234", 4096, rm.getResourceTrackerService());
+    nodes.put(nm2.getNodeId(), nm2);
+    MockNM nm3 = new MockNM("h3:1234", 4096, rm.getResourceTrackerService());
+    nodes.put(nm3.getNodeId(), nm3);
+    MockNM nm4 = new MockNM("h4:1234", 4096, rm.getResourceTrackerService());
+    nodes.put(nm4.getNodeId(), nm4);
+    nm1.registerNode();
+    nm2.registerNode();
+    nm3.registerNode();
+    nm4.registerNode();
+
+    RMApp app1 = rm.submitApp(1 * GB, "app", "user", null, "default");
+    // Containers with allocationTag 'foo' should be placed where
+    // containers with allocationTag 'bar' are already running
+    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm2,
+        Collections.singletonMap(Collections.singleton("foo"),
+            PlacementConstraints.build(
+                PlacementConstraints.targetIn(NODE, allocationTag("bar")))));
+    am1.addSchedulingRequest(
+        Arrays.asList(schedulingRequest(1, 1, 1, 512, "bar"),
+            schedulingRequest(1, 2, 1, 512, "foo"),
+            schedulingRequest(1, 3, 1, 512, "foo"),
+            schedulingRequest(1, 4, 1, 512, "foo"),
+            schedulingRequest(1, 5, 1, 512, "foo")));
+    AllocateResponse allocResponse = am1.schedule(); // send the request
+    List<Container> allocatedContainers = new ArrayList<>();
+    allocatedContainers.addAll(allocResponse.getAllocatedContainers());
+
+    // kick the scheduler
+    waitForContainerAllocation(nodes.values(), am1, allocatedContainers, 5);
+
+    Assert.assertEquals(5, allocatedContainers.size());
+    Set<NodeId> nodeIds = allocatedContainers.stream().map(x -> x.getNodeId())
+        .collect(Collectors.toSet());
+    // Ensure all containers end up on the same node (affinity)
+    Assert.assertEquals(1, nodeIds.size());
+  }
+
+  @Test(timeout = 300000)
+  public void testComplexPlacement() throws Exception {
+    HashMap<NodeId, MockNM> nodes = new HashMap<>();
+    MockNM nm1 = new MockNM("h1:1234", 4096, rm.getResourceTrackerService());
+    nodes.put(nm1.getNodeId(), nm1);
+    MockNM nm2 = new MockNM("h2:1234", 4096, rm.getResourceTrackerService());
+    nodes.put(nm2.getNodeId(), nm2);
+    MockNM nm3 = new MockNM("h3:1234", 4096, rm.getResourceTrackerService());
+    nodes.put(nm3.getNodeId(), nm3);
+    MockNM nm4 = new MockNM("h4:1234", 4096, rm.getResourceTrackerService());
+    nodes.put(nm4.getNodeId(), nm4);
+    nm1.registerNode();
+    nm2.registerNode();
+    nm3.registerNode();
+    nm4.registerNode();
+
+    RMApp app1 = rm.submitApp(1 * GB, "app", "user", null, "default");
+    Map<Set<String>, PlacementConstraint> constraintMap = new HashMap<>();
+    // Containers with allocationTag 'bar' should not exceed 1 per NODE
+    constraintMap.put(Collections.singleton("bar"),
+        PlacementConstraints.build(targetNotIn(NODE, allocationTag("bar"))));
+    // Containers with allocationTag 'foo' should be placed where 'bar' exists
+    constraintMap.put(Collections.singleton("foo"),
+        PlacementConstraints.build(targetIn(NODE, allocationTag("bar"))));
+    // Containers with allocationTag 'foo' should not exceed 2 per NODE
+    constraintMap.put(Collections.singleton("foo"), PlacementConstraints
+        .build(targetCardinality(NODE, 0, 2, allocationTag("foo"))));
+    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm2, constraintMap);
+    am1.addSchedulingRequest(
+        Arrays.asList(schedulingRequest(1, 1, 1, 512, "bar"),
+            schedulingRequest(1, 2, 1, 512, "bar"),
+            schedulingRequest(1, 3, 1, 512, "foo"),
+            schedulingRequest(1, 4, 1, 512, "foo"),
+            schedulingRequest(1, 5, 1, 512, "foo"),
+            schedulingRequest(1, 6, 1, 512, "foo")));
+    AllocateResponse allocResponse = am1.schedule(); // send the request
+    List<Container> allocatedContainers = new ArrayList<>();
+    allocatedContainers.addAll(allocResponse.getAllocatedContainers());
+
+    // kick the scheduler
+    waitForContainerAllocation(nodes.values(), am1, allocatedContainers, 6);
+
+    Assert.assertEquals(6, allocatedContainers.size());
+    Map<NodeId, Long> nodeIdContainerIdMap =
+        allocatedContainers.stream().collect(
+            Collectors.groupingBy(c -> c.getNodeId(), Collectors.counting()));
+    // Ensure no more than 3 containers per node (1 'bar', 2 'foo')
+    for (NodeId n : nodeIdContainerIdMap.keySet()) {
+      Assert.assertTrue(nodeIdContainerIdMap.get(n) < 4);
+    }
+  }
+
+  @Test(timeout = 300000)
   public void testSchedulerRejection() throws Exception {
     HashMap<NodeId, MockNM> nodes = new HashMap<>();
     MockNM nm1 = new MockNM("h1:1234", 4096, rm.getResourceTrackerService());
@@ -174,6 +310,7 @@ public class TestPlacementProcessor {
     nm4.registerNode();
 
     RMApp app1 = rm.submitApp(1 * GB, "app", "user", null, "default");
+    // Containers with allocationTag 'foo' are restricted to 1 per NODE
     MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm2,
         Collections.singletonMap(
             Collections.singleton("foo"),
@@ -196,7 +333,6 @@ public class TestPlacementProcessor {
     rejectedReqs.addAll(allocResponse.getRejectedSchedulingRequests());
 
     // kick the scheduler
-
     while (allocCount < 11) {
       nm1.nodeHeartbeat(true);
       nm2.nodeHeartbeat(true);
@@ -253,9 +389,10 @@ public class TestPlacementProcessor {
     nm2.registerNode();
     nm3.registerNode();
     nm4.registerNode();
-    // No not register nm5 yet..
+    // Do not register nm5 yet..
 
     RMApp app1 = rm.submitApp(1 * GB, "app", "user", null, "default");
+    // Containers with allocationTag 'foo' are restricted to 1 per NODE
     MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm2,
         Collections.singletonMap(
             Collections.singleton("foo"),
@@ -323,6 +460,7 @@ public class TestPlacementProcessor {
     nm4.registerNode();
 
     RMApp app1 = rm.submitApp(1 * GB, "app", "user", null, "default");
+    // Containers with allocationTag 'foo' are restricted to 1 per NODE
     MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm2,
         Collections.singletonMap(
             Collections.singleton("foo"),
@@ -346,7 +484,6 @@ public class TestPlacementProcessor {
     rejectedReqs.addAll(allocResponse.getRejectedSchedulingRequests());
 
     // kick the scheduler
-
     while (allocCount < 11) {
       nm1.nodeHeartbeat(true);
       nm2.nodeHeartbeat(true);
@@ -373,6 +510,21 @@ public class TestPlacementProcessor {
         rej.getReason());
   }
 
+  private static void waitForContainerAllocation(Collection<MockNM> nodes,
+      MockAM am, List<Container> allocatedContainers, int containerNum)
+      throws Exception {
+    while (allocatedContainers.size() < containerNum) {
+      for (MockNM node : nodes) {
+        node.nodeHeartbeat(true);
+      }
+      LOG.info("Waiting for containers to be created for "
+          + am.getApplicationAttemptId().getApplicationId() + "...");
+      sleep(1000);
+      AllocateResponse allocResponse = am.schedule();
+      allocatedContainers.addAll(allocResponse.getAllocatedContainers());
+    }
+  }
+
   protected static SchedulingRequest schedulingRequest(
       int priority, long allocReqId, int cores, int mem, String... tags) {
     return schedulingRequest(priority, allocReqId, cores, mem,


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[23/32] hadoop git commit: YARN-6599. Support anti-affinity constraint via AppPlacementAllocator. (Wangda Tan via asuresh)

Posted by as...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/38af2379/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java
index 73b4f9e..24c5a5e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint;
 import java.util.Iterator;
 import java.util.Set;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
@@ -30,9 +32,12 @@ import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TargetExpression;
 import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TargetExpression.TargetType;
 import org.apache.hadoop.yarn.api.resource.PlacementConstraintTransformations.SingleConstraintTransformer;
 import org.apache.hadoop.yarn.api.resource.PlacementConstraints;
+import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.algorithm.DefaultPlacementAlgorithm;
 
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.NODE_PARTITION;
+
 /**
  * This class contains various static methods used by the Placement Algorithms
  * to simplify constrained placement.
@@ -41,16 +46,20 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.algori
 @Public
 @Unstable
 public final class PlacementConstraintsUtil {
+  private static final Log LOG =
+      LogFactory.getLog(PlacementConstraintsUtil.class);
 
   // Suppresses default constructor, ensuring non-instantiability.
   private PlacementConstraintsUtil() {
   }
 
   /**
-   * Returns true if **single** application constraint with associated
+   * Returns true if **single** placement constraint with associated
    * allocationTags and scope is satisfied by a specific scheduler Node.
    *
-   * @param appId the application id
+   * @param targetApplicationId the application id, which could be override by
+   *                           target application id specified inside allocation
+   *                           tags.
    * @param sc the placement constraint
    * @param te the target expression
    * @param node the scheduler node
@@ -59,32 +68,123 @@ public final class PlacementConstraintsUtil {
    * @throws InvalidAllocationTagsQueryException
    */
   private static boolean canSatisfySingleConstraintExpression(
-      ApplicationId appId, SingleConstraint sc, TargetExpression te,
-      SchedulerNode node, AllocationTagsManager tm)
+      ApplicationId targetApplicationId, SingleConstraint sc,
+      TargetExpression te, SchedulerNode node, AllocationTagsManager tm)
       throws InvalidAllocationTagsQueryException {
     long minScopeCardinality = 0;
     long maxScopeCardinality = 0;
+    
+    // Optimizations to only check cardinality if necessary.
+    int desiredMinCardinality = sc.getMinCardinality();
+    int desiredMaxCardinality = sc.getMaxCardinality();
+    boolean checkMinCardinality = desiredMinCardinality > 0;
+    boolean checkMaxCardinality = desiredMaxCardinality < Integer.MAX_VALUE;
+
     if (sc.getScope().equals(PlacementConstraints.NODE)) {
-      minScopeCardinality = tm.getNodeCardinalityByOp(node.getNodeID(), appId,
-          te.getTargetValues(), Long::max);
-      maxScopeCardinality = tm.getNodeCardinalityByOp(node.getNodeID(), appId,
-          te.getTargetValues(), Long::min);
+      if (checkMinCardinality) {
+        minScopeCardinality = tm.getNodeCardinalityByOp(node.getNodeID(),
+            targetApplicationId, te.getTargetValues(), Long::max);
+      }
+      if (checkMaxCardinality) {
+        maxScopeCardinality = tm.getNodeCardinalityByOp(node.getNodeID(),
+            targetApplicationId, te.getTargetValues(), Long::min);
+      }
     } else if (sc.getScope().equals(PlacementConstraints.RACK)) {
-      minScopeCardinality = tm.getRackCardinalityByOp(node.getRackName(), appId,
-          te.getTargetValues(), Long::max);
-      maxScopeCardinality = tm.getRackCardinalityByOp(node.getRackName(), appId,
-          te.getTargetValues(), Long::min);
+      if (checkMinCardinality) {
+        minScopeCardinality = tm.getRackCardinalityByOp(node.getRackName(),
+            targetApplicationId, te.getTargetValues(), Long::max);
+      }
+      if (checkMaxCardinality) {
+        maxScopeCardinality = tm.getRackCardinalityByOp(node.getRackName(),
+            targetApplicationId, te.getTargetValues(), Long::min);
+      }
     }
     // Make sure Anti-affinity satisfies hard upper limit
-    maxScopeCardinality = sc.getMaxCardinality() == 0 ? maxScopeCardinality - 1
+    maxScopeCardinality = desiredMaxCardinality == 0 ? maxScopeCardinality - 1
         : maxScopeCardinality;
 
-    return (minScopeCardinality >= sc.getMinCardinality()
-        && maxScopeCardinality < sc.getMaxCardinality());
+    return (desiredMinCardinality <= 0
+        || minScopeCardinality >= desiredMinCardinality) && (
+        desiredMaxCardinality == Integer.MAX_VALUE
+            || maxScopeCardinality < desiredMaxCardinality);
+  }
+
+  private static boolean canSatisfyNodePartitionConstraintExpresssion(
+      TargetExpression targetExpression, SchedulerNode schedulerNode) {
+    Set<String> values = targetExpression.getTargetValues();
+    if (values == null || values.isEmpty()) {
+      return schedulerNode.getPartition().equals(
+          RMNodeLabelsManager.NO_LABEL);
+    } else{
+      String nodePartition = values.iterator().next();
+      if (!nodePartition.equals(schedulerNode.getPartition())) {
+        return false;
+      }
+    }
+
+    return true;
+  }
+
+  private static boolean canSatisfySingleConstraint(ApplicationId applicationId,
+      SingleConstraint singleConstraint, SchedulerNode schedulerNode,
+      AllocationTagsManager tagsManager)
+      throws InvalidAllocationTagsQueryException {
+    // Iterate through TargetExpressions
+    Iterator<TargetExpression> expIt =
+        singleConstraint.getTargetExpressions().iterator();
+    while (expIt.hasNext()) {
+      TargetExpression currentExp = expIt.next();
+      // Supporting AllocationTag Expressions for now
+      if (currentExp.getTargetType().equals(TargetType.ALLOCATION_TAG)) {
+        // Check if conditions are met
+        if (!canSatisfySingleConstraintExpression(applicationId,
+            singleConstraint, currentExp, schedulerNode, tagsManager)) {
+          return false;
+        }
+      } else if (currentExp.getTargetType().equals(TargetType.NODE_ATTRIBUTE)
+          && currentExp.getTargetKey().equals(NODE_PARTITION)) {
+        // This is a node partition expression, check it.
+        canSatisfyNodePartitionConstraintExpresssion(currentExp, schedulerNode);
+      }
+    }
+    // return true if all targetExpressions are satisfied
+    return true;
+  }
+
+  /**
+   * Returns true if all placement constraints are **currently** satisfied by a
+   * specific scheduler Node..
+   *
+   * To do so the method retrieves and goes through all application constraint
+   * expressions and checks if the specific allocation is between the allowed
+   * min-max cardinality values under the constraint scope (Node/Rack/etc).
+   *
+   * @param applicationId applicationId,
+   * @param placementConstraint placement constraint.
+   * @param node the scheduler node
+   * @param tagsManager the allocation tags store
+   * @return true if all application constraints are satisfied by node
+   * @throws InvalidAllocationTagsQueryException
+   */
+  public static boolean canSatisfySingleConstraint(ApplicationId applicationId,
+      PlacementConstraint placementConstraint, SchedulerNode node,
+      AllocationTagsManager tagsManager)
+      throws InvalidAllocationTagsQueryException {
+    if (placementConstraint == null) {
+      return true;
+    }
+    // Transform to SimpleConstraint
+    SingleConstraintTransformer singleTransformer =
+        new SingleConstraintTransformer(placementConstraint);
+    placementConstraint = singleTransformer.transform();
+    AbstractConstraint sConstraintExpr = placementConstraint.getConstraintExpr();
+    SingleConstraint single = (SingleConstraint) sConstraintExpr;
+
+    return canSatisfySingleConstraint(applicationId, single, node, tagsManager);
   }
 
   /**
-   * Returns true if all application constraints with associated allocationTags
+   * Returns true if all placement constraints with associated allocationTags
    * are **currently** satisfied by a specific scheduler Node.
    * To do so the method retrieves and goes through all application constraint
    * expressions and checks if the specific allocation is between the allowed
@@ -98,41 +198,12 @@ public final class PlacementConstraintsUtil {
    * @return true if all application constraints are satisfied by node
    * @throws InvalidAllocationTagsQueryException
    */
-  public static boolean canSatisfyConstraints(ApplicationId appId,
+  public static boolean canSatisfySingleConstraint(ApplicationId appId,
       Set<String> allocationTags, SchedulerNode node,
       PlacementConstraintManager pcm, AllocationTagsManager tagsManager)
       throws InvalidAllocationTagsQueryException {
     PlacementConstraint constraint = pcm.getConstraint(appId, allocationTags);
-    if (constraint == null) {
-      return true;
-    }
-    // Transform to SimpleConstraint
-    SingleConstraintTransformer singleTransformer =
-        new SingleConstraintTransformer(constraint);
-    constraint = singleTransformer.transform();
-    AbstractConstraint sConstraintExpr = constraint.getConstraintExpr();
-    SingleConstraint single = (SingleConstraint) sConstraintExpr;
-    // Iterate through TargetExpressions
-    Iterator<TargetExpression> expIt = single.getTargetExpressions().iterator();
-    while (expIt.hasNext()) {
-      TargetExpression currentExp = expIt.next();
-      // Supporting AllocationTag Expressions for now
-      if (currentExp.getTargetType().equals(TargetType.ALLOCATION_TAG)) {
-        // If source and tag allocation tags are the same, we do not enforce
-        // constraints with minimum cardinality.
-        if (currentExp.getTargetValues().equals(allocationTags)
-            && single.getMinCardinality() > 0) {
-          return true;
-        }
-        // Check if conditions are met
-        if (!canSatisfySingleConstraintExpression(appId, single, currentExp,
-            node, tagsManager)) {
-          return false;
-        }
-      }
-    }
-    // return true if all targetExpressions are satisfied
-    return true;
+    return canSatisfySingleConstraint(appId, constraint, node, tagsManager);
   }
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38af2379/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/DefaultPlacementAlgorithm.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/DefaultPlacementAlgorithm.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/DefaultPlacementAlgorithm.java
index 9ed9ab1..eb3fe88 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/DefaultPlacementAlgorithm.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/DefaultPlacementAlgorithm.java
@@ -67,7 +67,7 @@ public class DefaultPlacementAlgorithm implements ConstraintPlacementAlgorithm {
       throws InvalidAllocationTagsQueryException {
     int numAllocs = schedulingRequest.getResourceSizing().getNumAllocations();
     if (numAllocs > 0) {
-      if (PlacementConstraintsUtil.canSatisfyConstraints(appId,
+      if (PlacementConstraintsUtil.canSatisfySingleConstraint(appId,
           schedulingRequest.getAllocationTags(), schedulerNode,
           constraintManager, tagsManager)) {
         return true;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38af2379/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/PlacementProcessor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/PlacementProcessor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/PlacementProcessor.java
index 8e9c79c..2a6b889 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/PlacementProcessor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/PlacementProcessor.java
@@ -188,12 +188,18 @@ public class PlacementProcessor implements ApplicationMasterServiceProcessor {
   @Override
   public void allocate(ApplicationAttemptId appAttemptId,
       AllocateRequest request, AllocateResponse response) throws YarnException {
+    // Copy the scheduling request since we will clear it later after sending
+    // to dispatcher
     List<SchedulingRequest> schedulingRequests =
-        request.getSchedulingRequests();
+        new ArrayList<>(request.getSchedulingRequests());
     dispatchRequestsForPlacement(appAttemptId, schedulingRequests);
     reDispatchRetryableRequests(appAttemptId);
     schedulePlacedRequests(appAttemptId);
 
+    // Remove SchedulingRequest from AllocateRequest to avoid SchedulingRequest
+    // added to scheduler.
+    request.setSchedulingRequests(Collections.emptyList());
+
     nextAMSProcessor.allocate(appAttemptId, request, response);
 
     handleRejectedRequests(appAttemptId, response);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38af2379/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
index e2a62ec..1f85814 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.yarn.api.records.ReservationId;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceOption;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
@@ -830,9 +831,9 @@ public class FairScheduler extends
 
   @Override
   public Allocation allocate(ApplicationAttemptId appAttemptId,
-      List<ResourceRequest> ask, List<ContainerId> release,
-      List<String> blacklistAdditions, List<String> blacklistRemovals,
-      ContainerUpdates updateRequests) {
+      List<ResourceRequest> ask, List<SchedulingRequest> schedulingRequests,
+      List<ContainerId> release, List<String> blacklistAdditions,
+      List<String> blacklistRemovals, ContainerUpdates updateRequests) {
 
     // Make sure this application exists
     FSAppAttempt application = getSchedulerApp(appAttemptId);
@@ -857,7 +858,9 @@ public class FairScheduler extends
     handleContainerUpdates(application, updateRequests);
 
     // Sanity check
-    normalizeRequests(ask);
+    normalizeResourceRequests(ask);
+
+    // TODO, normalize SchedulingRequest
 
     // Record container allocation start time
     application.recordContainerRequestTime(getClock().getTime());
@@ -879,6 +882,7 @@ public class FairScheduler extends
         // Update application requests
         application.updateResourceRequests(ask);
 
+        // TODO, handle SchedulingRequest
         application.showRequests();
       }
     } finally {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38af2379/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
index 59b9608..7ac9027 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.yarn.api.records.QueueState;
 import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.factories.RecordFactory;
@@ -320,8 +321,8 @@ public class FifoScheduler extends
 
   @Override
   public Allocation allocate(ApplicationAttemptId applicationAttemptId,
-      List<ResourceRequest> ask, List<ContainerId> release,
-      List<String> blacklistAdditions, List<String> blacklistRemovals,
+      List<ResourceRequest> ask, List<SchedulingRequest> schedulingRequests,
+      List<ContainerId> release, List<String> blacklistAdditions, List<String> blacklistRemovals,
       ContainerUpdates updateRequests) {
     FifoAppAttempt application = getApplicationAttempt(applicationAttemptId);
     if (application == null) {
@@ -342,7 +343,7 @@ public class FifoScheduler extends
     }
 
     // Sanity check
-    normalizeRequests(ask);
+    normalizeResourceRequests(ask);
 
     // Release containers
     releaseContainers(release, application);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38af2379/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/AppPlacementAllocator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/AppPlacementAllocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/AppPlacementAllocator.java
index 5c49450..72a6c4c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/AppPlacementAllocator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/AppPlacementAllocator.java
@@ -19,6 +19,8 @@
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement;
 
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
@@ -29,7 +31,6 @@ import org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey;
 
 import java.util.Collection;
 import java.util.Iterator;
-import java.util.List;
 import java.util.Map;
 
 /**
@@ -50,13 +51,18 @@ import java.util.Map;
  * requests.
  * </p>
  */
-public interface AppPlacementAllocator<N extends SchedulerNode> {
+public abstract class AppPlacementAllocator<N extends SchedulerNode> {
+  protected AppSchedulingInfo appSchedulingInfo;
+  protected SchedulerRequestKey schedulerRequestKey;
+  protected RMContext rmContext;
+
   /**
    * Get iterator of preferred node depends on requirement and/or availability
    * @param candidateNodeSet input CandidateNodeSet
    * @return iterator of preferred node
    */
-  Iterator<N> getPreferredNodeIterator(CandidateNodeSet<N> candidateNodeSet);
+  public abstract Iterator<N> getPreferredNodeIterator(
+      CandidateNodeSet<N> candidateNodeSet);
 
   /**
    * Replace existing pending asks by the new requests
@@ -66,15 +72,29 @@ public interface AppPlacementAllocator<N extends SchedulerNode> {
    * requests for preempted container
    * @return true if total pending resource changed
    */
-  PendingAskUpdateResult updatePendingAsk(
+  public abstract PendingAskUpdateResult updatePendingAsk(
       Collection<ResourceRequest> requests,
       boolean recoverPreemptedRequestForAContainer);
 
   /**
+   * Replace existing pending asks by the new SchedulingRequest
+   *
+   * @param schedulerRequestKey                  scheduler request key
+   * @param schedulingRequest                    new asks
+   * @param recoverPreemptedRequestForAContainer if we're recovering resource
+   *                                             requests for preempted container
+   * @return true if total pending resource changed
+   */
+  public abstract PendingAskUpdateResult updatePendingAsk(
+      SchedulerRequestKey schedulerRequestKey,
+      SchedulingRequest schedulingRequest,
+      boolean recoverPreemptedRequestForAContainer);
+
+  /**
    * Get pending ResourceRequests by given schedulerRequestKey
    * @return Map of resourceName to ResourceRequest
    */
-  Map<String, ResourceRequest> getResourceRequests();
+  public abstract Map<String, ResourceRequest> getResourceRequests();
 
   /**
    * Get pending ask for given resourceName. If there's no such pendingAsk,
@@ -83,7 +103,7 @@ public interface AppPlacementAllocator<N extends SchedulerNode> {
    * @param resourceName resourceName
    * @return PendingAsk
    */
-  PendingAsk getPendingAsk(String resourceName);
+  public abstract PendingAsk getPendingAsk(String resourceName);
 
   /**
    * Get #pending-allocations for given resourceName. If there's no such
@@ -92,7 +112,7 @@ public interface AppPlacementAllocator<N extends SchedulerNode> {
    * @param resourceName resourceName
    * @return #pending-allocations
    */
-  int getOutstandingAsksCount(String resourceName);
+  public abstract int getOutstandingAsksCount(String resourceName);
 
   /**
    * Notify container allocated.
@@ -103,7 +123,7 @@ public interface AppPlacementAllocator<N extends SchedulerNode> {
    *         the container. This will be used by scheduler to recover requests.
    *         Please refer to {@link ContainerRequest} for more details.
    */
-  ContainerRequest allocate(SchedulerRequestKey schedulerKey,
+  public abstract ContainerRequest allocate(SchedulerRequestKey schedulerKey,
       NodeType type, SchedulerNode node);
 
   /**
@@ -112,7 +132,7 @@ public interface AppPlacementAllocator<N extends SchedulerNode> {
    * @param node which node we will allocate on
    * @return true if we has pending requirement
    */
-  boolean canAllocate(NodeType type, SchedulerNode node);
+  public abstract boolean canAllocate(NodeType type, SchedulerNode node);
 
   /**
    * Can delay to give locality?
@@ -123,16 +143,16 @@ public interface AppPlacementAllocator<N extends SchedulerNode> {
    * @param resourceName resourceName
    * @return can/cannot
    */
-  boolean canDelayTo(String resourceName);
+  public abstract boolean canDelayTo(String resourceName);
 
   /**
-   * Does this {@link AppPlacementAllocator} accept resources on nodePartition?
+   * Does this {@link AppPlacementAllocator} accept resources on given node?
    *
-   * @param nodePartition nodePartition
+   * @param schedulerNode schedulerNode
    * @param schedulingMode schedulingMode
    * @return accepted/not
    */
-  boolean acceptNodePartition(String nodePartition,
+  public abstract boolean precheckNode(SchedulerNode schedulerNode,
       SchedulingMode schedulingMode);
 
   /**
@@ -142,7 +162,7 @@ public interface AppPlacementAllocator<N extends SchedulerNode> {
    *
    * @return primary requested node partition
    */
-  String getPrimaryRequestedNodePartition();
+  public abstract String getPrimaryRequestedNodePartition();
 
   /**
    * @return number of unique location asks with #pending greater than 0,
@@ -152,18 +172,24 @@ public interface AppPlacementAllocator<N extends SchedulerNode> {
    * and should belong to specific delay scheduling policy impl.
    * See YARN-7457 for more details.
    */
-  int getUniqueLocationAsks();
+  public abstract int getUniqueLocationAsks();
 
   /**
    * Print human-readable requests to LOG debug.
    */
-  void showRequests();
+  public abstract void showRequests();
 
   /**
-   * Set app scheduling info.
+   * Initialize this allocator, this will be called by Factory automatically
    *
-   * @param appSchedulingInfo
-   *          app info object.
+   * @param appSchedulingInfo appSchedulingInfo
+   * @param schedulerRequestKey schedulerRequestKey
+   * @param rmContext rmContext
    */
-  void setAppSchedulingInfo(AppSchedulingInfo appSchedulingInfo);
+  public void initialize(AppSchedulingInfo appSchedulingInfo,
+      SchedulerRequestKey schedulerRequestKey, RMContext rmContext) {
+    this.appSchedulingInfo = appSchedulingInfo;
+    this.rmContext = rmContext;
+    this.schedulerRequestKey = schedulerRequestKey;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38af2379/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/LocalityAppPlacementAllocator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/LocalityAppPlacementAllocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/LocalityAppPlacementAllocator.java
index be1c1cc..a0358b4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/LocalityAppPlacementAllocator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/LocalityAppPlacementAllocator.java
@@ -22,8 +22,9 @@ import org.apache.commons.collections.IteratorUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
+import org.apache.hadoop.yarn.exceptions.SchedulerInvalidResoureRequestException;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
@@ -46,26 +47,18 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
  * containers.
  */
 public class LocalityAppPlacementAllocator <N extends SchedulerNode>
-    implements AppPlacementAllocator<N> {
+    extends AppPlacementAllocator<N> {
   private static final Log LOG =
       LogFactory.getLog(LocalityAppPlacementAllocator.class);
 
   private final Map<String, ResourceRequest> resourceRequestMap =
       new ConcurrentHashMap<>();
-  private AppSchedulingInfo appSchedulingInfo;
   private volatile String primaryRequestedPartition =
       RMNodeLabelsManager.NO_LABEL;
 
   private final ReentrantReadWriteLock.ReadLock readLock;
   private final ReentrantReadWriteLock.WriteLock writeLock;
 
-  public LocalityAppPlacementAllocator(AppSchedulingInfo info) {
-    ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
-    readLock = lock.readLock();
-    writeLock = lock.writeLock();
-    this.appSchedulingInfo = info;
-  }
-
   public LocalityAppPlacementAllocator() {
     ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
     readLock = lock.readLock();
@@ -182,6 +175,19 @@ public class LocalityAppPlacementAllocator <N extends SchedulerNode>
   }
 
   @Override
+  public PendingAskUpdateResult updatePendingAsk(
+      SchedulerRequestKey schedulerRequestKey,
+      SchedulingRequest schedulingRequest,
+      boolean recoverPreemptedRequestForAContainer)
+      throws SchedulerInvalidResoureRequestException {
+    throw new SchedulerInvalidResoureRequestException(this.getClass().getName()
+        + " not be able to handle SchedulingRequest, there exists a "
+        + "ResourceRequest with the same scheduler key=" + schedulerRequestKey
+        + ", please send SchedulingRequest with a different allocationId and "
+        + "priority");
+  }
+
+  @Override
   public Map<String, ResourceRequest> getResourceRequests() {
     return resourceRequestMap;
   }
@@ -362,13 +368,13 @@ public class LocalityAppPlacementAllocator <N extends SchedulerNode>
   }
 
   @Override
-  public boolean acceptNodePartition(String nodePartition,
+  public boolean precheckNode(SchedulerNode schedulerNode,
       SchedulingMode schedulingMode) {
     // We will only look at node label = nodeLabelToLookAt according to
     // schedulingMode and partition of node.
     String nodePartitionToLookAt;
     if (schedulingMode == SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY) {
-      nodePartitionToLookAt = nodePartition;
+      nodePartitionToLookAt = schedulerNode.getPartition();
     } else {
       nodePartitionToLookAt = RMNodeLabelsManager.NO_LABEL;
     }
@@ -425,9 +431,4 @@ public class LocalityAppPlacementAllocator <N extends SchedulerNode>
       writeLock.unlock();
     }
   }
-
-  @Override
-  public void setAppSchedulingInfo(AppSchedulingInfo appSchedulingInfo) {
-    this.appSchedulingInfo = appSchedulingInfo;
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38af2379/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/SingleConstraintAppPlacementAllocator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/SingleConstraintAppPlacementAllocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/SingleConstraintAppPlacementAllocator.java
new file mode 100644
index 0000000..f8f758c
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/SingleConstraintAppPlacementAllocator.java
@@ -0,0 +1,531 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.commons.collections.IteratorUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ExecutionType;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.ResourceSizing;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
+import org.apache.hadoop.yarn.api.records.impl.pb.SchedulingRequestPBImpl;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraints;
+import org.apache.hadoop.yarn.exceptions.SchedulerInvalidResoureRequestException;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.SchedulingMode;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.ContainerRequest;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.PendingAsk;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.AllocationTagsManager;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.InvalidAllocationTagsQueryException;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.PlacementConstraintsUtil;
+import org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey;
+
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.APPLICATION_LABEL_INTRA_APPLICATION;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.NODE_PARTITION;
+
+/**
+ * This is a simple implementation to do affinity or anti-affinity for
+ * inter/intra apps.
+ */
+public class SingleConstraintAppPlacementAllocator<N extends SchedulerNode>
+    extends AppPlacementAllocator<N> {
+  private static final Log LOG =
+      LogFactory.getLog(SingleConstraintAppPlacementAllocator.class);
+
+  private ReentrantReadWriteLock.ReadLock readLock;
+  private ReentrantReadWriteLock.WriteLock writeLock;
+
+  private SchedulingRequest schedulingRequest = null;
+  private String targetNodePartition;
+  private Set<String> targetAllocationTags;
+  private AllocationTagsManager allocationTagsManager;
+
+  public SingleConstraintAppPlacementAllocator() {
+    ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
+    readLock = lock.readLock();
+    writeLock = lock.writeLock();
+  }
+
+  @Override
+  @SuppressWarnings("unchecked")
+  public Iterator<N> getPreferredNodeIterator(
+      CandidateNodeSet<N> candidateNodeSet) {
+    // Now only handle the case that single node in the candidateNodeSet
+    // TODO, Add support to multi-hosts inside candidateNodeSet which is passed
+    // in.
+
+    N singleNode = CandidateNodeSetUtils.getSingleNode(candidateNodeSet);
+    if (null != singleNode) {
+      return IteratorUtils.singletonIterator(singleNode);
+    }
+
+    return IteratorUtils.emptyIterator();
+  }
+
+  @Override
+  public PendingAskUpdateResult updatePendingAsk(
+      Collection<ResourceRequest> requests,
+      boolean recoverPreemptedRequestForAContainer) {
+    if (requests != null && !requests.isEmpty()) {
+      throw new SchedulerInvalidResoureRequestException(
+          this.getClass().getName()
+              + " not be able to handle ResourceRequest, there exists a "
+              + "SchedulingRequest with the same scheduler key="
+              + SchedulerRequestKey.create(requests.iterator().next())
+              + ", please send ResourceRequest with a different allocationId and "
+              + "priority");
+    }
+
+    // Do nothing
+    return null;
+  }
+
+  private PendingAskUpdateResult internalUpdatePendingAsk(
+      SchedulingRequest newSchedulingRequest, boolean recoverContainer) {
+    // When it is a recover container, there must exists an schedulingRequest.
+    if (recoverContainer && schedulingRequest == null) {
+      throw new SchedulerInvalidResoureRequestException("Trying to recover a "
+          + "container request=" + newSchedulingRequest.toString() + ", however"
+          + "there's no existing scheduling request, this should not happen.");
+    }
+
+    if (schedulingRequest != null) {
+      // If we have an old scheduling request, we will make sure that no changes
+      // made except sizing.
+      // To avoid unnecessary copy of the data structure, we do this by
+      // replacing numAllocations with old numAllocations in the
+      // newSchedulingRequest#getResourceSizing, and compare the two objects.
+      ResourceSizing sizing = newSchedulingRequest.getResourceSizing();
+      int existingNumAllocations =
+          schedulingRequest.getResourceSizing().getNumAllocations();
+
+      // When it is a recovered container request, just set
+      // #newAllocations = #existingAllocations + 1;
+      int newNumAllocations;
+      if (recoverContainer) {
+        newNumAllocations = existingNumAllocations + 1;
+      } else {
+        newNumAllocations = sizing.getNumAllocations();
+      }
+      sizing.setNumAllocations(existingNumAllocations);
+
+      // Compare two objects
+      if (!schedulingRequest.equals(newSchedulingRequest)) {
+        // Rollback #numAllocations
+        sizing.setNumAllocations(newNumAllocations);
+        throw new SchedulerInvalidResoureRequestException(
+            "Invalid updated SchedulingRequest added to scheduler, "
+                + " we only allows changing numAllocations for the updated "
+                + "SchedulingRequest. Old=" + schedulingRequest.toString()
+                + " new=" + newSchedulingRequest.toString()
+                + ", if any fields need to be updated, please cancel the "
+                + "old request (by setting numAllocations to 0) and send a "
+                + "SchedulingRequest with different combination of "
+                + "priority/allocationId");
+      } else {
+        if (newNumAllocations == existingNumAllocations) {
+          // No update on pending asks, return null.
+          return null;
+        }
+      }
+
+      // Rollback #numAllocations
+      sizing.setNumAllocations(newNumAllocations);
+
+      // Basic sanity check
+      if (newNumAllocations < 0) {
+        throw new SchedulerInvalidResoureRequestException(
+            "numAllocation in ResourceSizing field must be >= 0, "
+                + "updating schedulingRequest failed.");
+      }
+
+      PendingAskUpdateResult updateResult = new PendingAskUpdateResult(
+          new PendingAsk(schedulingRequest.getResourceSizing()),
+          new PendingAsk(newSchedulingRequest.getResourceSizing()),
+          targetNodePartition, targetNodePartition);
+
+      // Ok, now everything is same except numAllocation, update numAllocation.
+      this.schedulingRequest.getResourceSizing().setNumAllocations(
+          newNumAllocations);
+      LOG.info(
+          "Update numAllocation from old=" + existingNumAllocations + " to new="
+              + newNumAllocations);
+
+      return updateResult;
+    }
+
+    // For a new schedulingRequest, we need to validate if we support its asks.
+    // This will update internal partitions, etc. after the SchedulingRequest is
+    // valid.
+    validateAndSetSchedulingRequest(newSchedulingRequest);
+
+    return new PendingAskUpdateResult(null,
+        new PendingAsk(newSchedulingRequest.getResourceSizing()), null,
+        targetNodePartition);
+  }
+
+  @Override
+  public PendingAskUpdateResult updatePendingAsk(
+      SchedulerRequestKey schedulerRequestKey,
+      SchedulingRequest newSchedulingRequest,
+      boolean recoverPreemptedRequestForAContainer) {
+    writeLock.lock();
+    try {
+      return internalUpdatePendingAsk(newSchedulingRequest,
+          recoverPreemptedRequestForAContainer);
+    } finally {
+      writeLock.unlock();
+    }
+  }
+
+  private String throwExceptionWithMetaInfo(String message) {
+    StringBuilder sb = new StringBuilder();
+    sb.append("AppId=").append(appSchedulingInfo.getApplicationId()).append(
+        " Key=").append(this.schedulerRequestKey).append(". Exception message:")
+        .append(message);
+    throw new SchedulerInvalidResoureRequestException(sb.toString());
+  }
+
+  private void validateAndSetSchedulingRequest(SchedulingRequest newSchedulingRequest)
+      throws SchedulerInvalidResoureRequestException {
+    // Check sizing exists
+    if (newSchedulingRequest.getResourceSizing() == null
+        || newSchedulingRequest.getResourceSizing().getResources() == null) {
+      throwExceptionWithMetaInfo(
+          "No ResourceSizing found in the scheduling request, please double "
+              + "check");
+    }
+
+    // Check execution type == GUARANTEED
+    if (newSchedulingRequest.getExecutionType() != null
+        && newSchedulingRequest.getExecutionType().getExecutionType()
+        != ExecutionType.GUARANTEED) {
+      throwExceptionWithMetaInfo(
+          "Only GUARANTEED execution type is supported.");
+    }
+
+    PlacementConstraint constraint =
+        newSchedulingRequest.getPlacementConstraint();
+
+    // We only accept SingleConstraint
+    PlacementConstraint.AbstractConstraint ac = constraint.getConstraintExpr();
+    if (!(ac instanceof PlacementConstraint.SingleConstraint)) {
+      throwExceptionWithMetaInfo(
+          "Only accepts " + PlacementConstraint.SingleConstraint.class.getName()
+              + " as constraint-expression. Rejecting the new added "
+              + "constraint-expression.class=" + ac.getClass().getName());
+    }
+
+    PlacementConstraint.SingleConstraint singleConstraint =
+        (PlacementConstraint.SingleConstraint) ac;
+
+    // Make sure it is an anti-affinity request (actually this implementation
+    // should be able to support both affinity / anti-affinity without much
+    // effort. Considering potential test effort required. Limit to
+    // anti-affinity to intra-app and scope is node.
+    if (!singleConstraint.getScope().equals(PlacementConstraints.NODE)) {
+      throwExceptionWithMetaInfo(
+          "Only support scope=" + PlacementConstraints.NODE
+              + "now. PlacementConstraint=" + singleConstraint);
+    }
+
+    if (singleConstraint.getMinCardinality() != 0
+        || singleConstraint.getMaxCardinality() != 1) {
+      throwExceptionWithMetaInfo(
+          "Only support anti-affinity, which is: minCardinality=0, "
+              + "maxCardinality=1");
+    }
+
+    Set<PlacementConstraint.TargetExpression> targetExpressionSet =
+        singleConstraint.getTargetExpressions();
+    if (targetExpressionSet == null || targetExpressionSet.isEmpty()) {
+      throwExceptionWithMetaInfo(
+          "TargetExpression should not be null or empty");
+    }
+
+    // Set node partition
+    String nodePartition = null;
+
+    // Target allocation tags
+    Set<String> targetAllocationTags = null;
+
+    for (PlacementConstraint.TargetExpression targetExpression : targetExpressionSet) {
+      // Handle node partition
+      if (targetExpression.getTargetType().equals(
+          PlacementConstraint.TargetExpression.TargetType.NODE_ATTRIBUTE)) {
+        // For node attribute target, we only support Partition now. And once
+        // YARN-3409 is merged, we will support node attribute.
+        if (!targetExpression.getTargetKey().equals(NODE_PARTITION)) {
+          throwExceptionWithMetaInfo("When TargetType="
+              + PlacementConstraint.TargetExpression.TargetType.NODE_ATTRIBUTE
+              + " only " + NODE_PARTITION + " is accepted as TargetKey.");
+        }
+
+        if (nodePartition != null) {
+          // This means we have duplicated node partition entry inside placement
+          // constraint, which might be set by mistake.
+          throwExceptionWithMetaInfo(
+              "Only one node partition targetExpression is allowed");
+        }
+
+        Set<String> values = targetExpression.getTargetValues();
+        if (values == null || values.isEmpty()) {
+          nodePartition = RMNodeLabelsManager.NO_LABEL;
+          continue;
+        }
+
+        if (values.size() > 1) {
+          throwExceptionWithMetaInfo("Inside one targetExpression, we only "
+              + "support affinity to at most one node partition now");
+        }
+
+        nodePartition = values.iterator().next();
+      } else if (targetExpression.getTargetType().equals(
+          PlacementConstraint.TargetExpression.TargetType.ALLOCATION_TAG)) {
+        // Handle allocation tags
+        if (targetAllocationTags != null) {
+          // This means we have duplicated AllocationTag expressions entries
+          // inside placement constraint, which might be set by mistake.
+          throwExceptionWithMetaInfo(
+              "Only one AllocationTag targetExpression is allowed");
+        }
+
+        if (targetExpression.getTargetValues() == null || targetExpression
+            .getTargetValues().isEmpty()) {
+          throwExceptionWithMetaInfo("Failed to find allocation tags from "
+              + "TargetExpressions or couldn't find self-app target.");
+        }
+
+        targetAllocationTags = new HashSet<>(
+            targetExpression.getTargetValues());
+
+        if (targetExpression.getTargetKey() == null || !targetExpression
+            .getTargetKey().equals(APPLICATION_LABEL_INTRA_APPLICATION)) {
+          throwExceptionWithMetaInfo(
+              "As of now, the only accepted target key for targetKey of "
+                  + "allocation_tag target expression is: ["
+                  + APPLICATION_LABEL_INTRA_APPLICATION
+                  + "]. Please make changes to placement constraints "
+                  + "accordingly.");
+        }
+      }
+    }
+
+    if (targetAllocationTags == null) {
+      // That means we don't have ALLOCATION_TAG specified
+      throwExceptionWithMetaInfo(
+          "Couldn't find target expression with type == ALLOCATION_TAG, it is "
+              + "required to include one and only one target expression with "
+              + "type == ALLOCATION_TAG");
+
+    }
+
+    if (nodePartition == null) {
+      nodePartition = RMNodeLabelsManager.NO_LABEL;
+    }
+
+    // Validation is done. set local results:
+    this.targetNodePartition = nodePartition;
+    this.targetAllocationTags = targetAllocationTags;
+
+    this.schedulingRequest = new SchedulingRequestPBImpl(
+        ((SchedulingRequestPBImpl) newSchedulingRequest).getProto());
+
+    LOG.info("Successfully added SchedulingRequest to app=" + appSchedulingInfo
+        .getApplicationAttemptId() + " targetAllocationTags=[" + StringUtils
+        .join(",", targetAllocationTags) + "]. nodePartition="
+        + targetNodePartition);
+  }
+
+  @Override
+  @SuppressWarnings("unchecked")
+  public Map<String, ResourceRequest> getResourceRequests() {
+    return Collections.EMPTY_MAP;
+  }
+
+  @Override
+  public PendingAsk getPendingAsk(String resourceName) {
+    readLock.lock();
+    try {
+      if (resourceName.equals("*") && schedulingRequest != null) {
+        return new PendingAsk(schedulingRequest.getResourceSizing());
+      }
+      return PendingAsk.ZERO;
+    } finally {
+      readLock.unlock();
+    }
+
+  }
+
+  @Override
+  public int getOutstandingAsksCount(String resourceName) {
+    readLock.lock();
+    try {
+      if (resourceName.equals("*") && schedulingRequest != null) {
+        return schedulingRequest.getResourceSizing().getNumAllocations();
+      }
+      return 0;
+    } finally {
+      readLock.unlock();
+    }
+  }
+
+  private void decreasePendingNumAllocation() {
+    // Deduct pending #allocations by 1
+    ResourceSizing sizing = schedulingRequest.getResourceSizing();
+    sizing.setNumAllocations(sizing.getNumAllocations() - 1);
+  }
+
+  @Override
+  public ContainerRequest allocate(SchedulerRequestKey schedulerKey,
+      NodeType type, SchedulerNode node) {
+    writeLock.lock();
+    try {
+      // Per container scheduling request, it is just a copy of existing
+      // scheduling request with #allocations=1
+      SchedulingRequest containerSchedulingRequest = new SchedulingRequestPBImpl(
+          ((SchedulingRequestPBImpl) schedulingRequest).getProto());
+      containerSchedulingRequest.getResourceSizing().setNumAllocations(1);
+
+      // Deduct sizing
+      decreasePendingNumAllocation();
+
+      return new ContainerRequest(containerSchedulingRequest);
+    } finally {
+      writeLock.unlock();
+    }
+  }
+
+  private boolean checkCardinalityAndPending(SchedulerNode node) {
+    // Do we still have pending resource?
+    if (schedulingRequest.getResourceSizing().getNumAllocations() <= 0) {
+      return false;
+    }
+
+    // node type will be ignored.
+    try {
+      return PlacementConstraintsUtil.canSatisfySingleConstraint(
+          appSchedulingInfo.getApplicationId(),
+          this.schedulingRequest.getPlacementConstraint(), node,
+          allocationTagsManager);
+    } catch (InvalidAllocationTagsQueryException e) {
+      LOG.warn("Failed to query node cardinality:", e);
+      return false;
+    }
+  }
+
+  @Override
+  public boolean canAllocate(NodeType type, SchedulerNode node) {
+    try {
+      readLock.lock();
+      return checkCardinalityAndPending(node);
+    } finally {
+      readLock.unlock();
+    }
+  }
+
+  @Override
+  public boolean canDelayTo(String resourceName) {
+    return true;
+  }
+
+  @Override
+  public boolean precheckNode(SchedulerNode schedulerNode,
+      SchedulingMode schedulingMode) {
+    // We will only look at node label = nodeLabelToLookAt according to
+    // schedulingMode and partition of node.
+    String nodePartitionToLookAt;
+    if (schedulingMode == SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY) {
+      nodePartitionToLookAt = schedulerNode.getPartition();
+    } else{
+      nodePartitionToLookAt = RMNodeLabelsManager.NO_LABEL;
+    }
+
+    readLock.lock();
+    try {
+      // Check node partition as well as cardinality/pending resources.
+      return this.targetNodePartition.equals(nodePartitionToLookAt)
+          && checkCardinalityAndPending(schedulerNode);
+    } finally {
+      readLock.unlock();
+    }
+
+  }
+
+  @Override
+  public String getPrimaryRequestedNodePartition() {
+    return targetNodePartition;
+  }
+
+  @Override
+  public int getUniqueLocationAsks() {
+    return 1;
+  }
+
+  @Override
+  public void showRequests() {
+    try {
+      readLock.lock();
+      if (schedulingRequest != null) {
+        LOG.info(schedulingRequest.toString());
+      }
+    } finally {
+      readLock.unlock();
+    }
+  }
+
+  @VisibleForTesting
+  SchedulingRequest getSchedulingRequest() {
+    return schedulingRequest;
+  }
+
+  @VisibleForTesting
+  String getTargetNodePartition() {
+    return targetNodePartition;
+  }
+
+  @VisibleForTesting
+  Set<String> getTargetAllocationTags() {
+    return targetAllocationTags;
+  }
+
+  @Override
+  public void initialize(AppSchedulingInfo appSchedulingInfo,
+      SchedulerRequestKey schedulerRequestKey, RMContext rmContext) {
+    super.initialize(appSchedulingInfo, schedulerRequestKey, rmContext);
+    this.allocationTagsManager = rmContext.getAllocationTagsManager();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38af2379/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Application.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Application.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Application.java
index fbde681..7d1140d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Application.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Application.java
@@ -331,8 +331,7 @@ public class Application {
     
     // Get resources from the ResourceManager
     Allocation allocation = resourceManager.getResourceScheduler().allocate(
-        applicationAttemptId, new ArrayList<ResourceRequest>(ask),
-        new ArrayList<ContainerId>(), null, null,
+        applicationAttemptId, new ArrayList<ResourceRequest>(ask), null, new ArrayList<ContainerId>(), null, null,
         new ContainerUpdates());
 
     if (LOG.isInfoEnabled()) {
@@ -431,7 +430,7 @@ public class Application {
     if (type == NodeType.NODE_LOCAL) {
       for (String host : task.getHosts()) {
         if(LOG.isDebugEnabled()) {
-          LOG.debug("updatePendingAsk:" + " application=" + applicationId
+          LOG.debug("updateResourceDemands:" + " application=" + applicationId
             + " type=" + type + " host=" + host
             + " request=" + ((requests == null) ? "null" : requests.get(host)));
         }
@@ -442,7 +441,7 @@ public class Application {
     if (type == NodeType.NODE_LOCAL || type == NodeType.RACK_LOCAL) {
       for (String rack : task.getRacks()) {
         if(LOG.isDebugEnabled()) {
-          LOG.debug("updatePendingAsk:" + " application=" + applicationId
+          LOG.debug("updateResourceDemands:" + " application=" + applicationId
             + " type=" + type + " rack=" + rack
             + " request=" + ((requests == null) ? "null" : requests.get(rack)));
         }
@@ -453,7 +452,7 @@ public class Application {
     updateResourceRequest(requests.get(ResourceRequest.ANY));
     
     if(LOG.isDebugEnabled()) {
-      LOG.debug("updatePendingAsk:" + " application=" + applicationId
+      LOG.debug("updateResourceDemands:" + " application=" + applicationId
         + " #asks=" + ask.size());
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38af2379/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java
index 975abe6..9fa2c40 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.yarn.server.resourcemanager;
 import java.lang.reflect.UndeclaredThrowableException;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -37,14 +38,17 @@ import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRespo
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ExecutionType;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.api.records.ExecutionTypeRequest;
+import org.apache.hadoop.yarn.api.records.ResourceSizing;
 import org.apache.hadoop.yarn.api.records.SchedulingRequest;
 import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
 import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraints;
 import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
@@ -281,6 +285,53 @@ public class MockAM {
     }
     return allocate(req);
   }
+
+  public AllocateResponse allocate(List<ResourceRequest> resourceRequest,
+      List<SchedulingRequest> newSchedulingRequests, List<ContainerId> releases)
+      throws Exception {
+    final AllocateRequest req =
+        AllocateRequest.newInstance(0, 0F, resourceRequest,
+            releases, null);
+    if (newSchedulingRequests != null) {
+      addSchedulingRequest(newSchedulingRequests);
+    }
+    if (!schedulingRequests.isEmpty()) {
+      req.setSchedulingRequests(schedulingRequests);
+      schedulingRequests.clear();
+    }
+    return allocate(req);
+  }
+
+  public AllocateResponse allocateIntraAppAntiAffinity(
+      ResourceSizing resourceSizing, Priority priority, long allocationId,
+      Set<String> allocationTags, String... targetTags) throws Exception {
+    return this.allocate(null,
+        Arrays.asList(SchedulingRequest.newBuilder().executionType(
+            ExecutionTypeRequest.newInstance(ExecutionType.GUARANTEED))
+            .allocationRequestId(allocationId).priority(priority)
+            .allocationTags(allocationTags).placementConstraintExpression(
+                PlacementConstraints
+                    .targetCardinality(PlacementConstraints.NODE, 0, 1,
+                        PlacementConstraints.PlacementTargets
+                            .allocationTagToIntraApp(targetTags)).build())
+            .resourceSizing(resourceSizing).build()), null);
+  }
+
+  public AllocateResponse allocateIntraAppAntiAffinity(
+      String nodePartition, ResourceSizing resourceSizing, Priority priority,
+      long allocationId, String... tags) throws Exception {
+    return this.allocate(null,
+        Arrays.asList(SchedulingRequest.newBuilder().executionType(
+            ExecutionTypeRequest.newInstance(ExecutionType.GUARANTEED))
+            .allocationRequestId(allocationId).priority(priority)
+            .placementConstraintExpression(PlacementConstraints
+                .targetCardinality(PlacementConstraints.NODE, 0, 1,
+                    PlacementConstraints.PlacementTargets
+                        .allocationTagToIntraApp(tags),
+                    PlacementConstraints.PlacementTargets
+                        .nodePartition(nodePartition)).build())
+            .resourceSizing(resourceSizing).build()), null);
+  }
   
   public AllocateResponse sendContainerResizingRequest(
       List<UpdateContainerRequest> updateRequests) throws Exception {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38af2379/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java
index 0e4f308..4a5c671 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java
@@ -474,7 +474,7 @@ public class TestRMAppAttemptTransitions {
 
     assertEquals(expectedState, applicationAttempt.getAppAttemptState());
     verify(scheduler, times(expectedAllocateCount)).allocate(
-        any(ApplicationAttemptId.class), any(List.class), any(List.class),
+        any(ApplicationAttemptId.class), any(List.class), eq(null), any(List.class),
         any(List.class), any(List.class), any(ContainerUpdates.class));
 
     assertEquals(0,applicationAttempt.getJustFinishedContainers().size());
@@ -495,7 +495,7 @@ public class TestRMAppAttemptTransitions {
     // Check events
     verify(applicationMasterLauncher).handle(any(AMLauncherEvent.class));
     verify(scheduler, times(2)).allocate(any(ApplicationAttemptId.class),
-        any(List.class), any(List.class), any(List.class), any(List.class),
+        any(List.class), any(List.class), any(List.class), any(List.class), any(List.class),
         any(ContainerUpdates.class));
     verify(nmTokenManager).clearNodeSetForAttempt(
       applicationAttempt.getAppAttemptId());
@@ -643,7 +643,7 @@ public class TestRMAppAttemptTransitions {
     when(allocation.getContainers()).
         thenReturn(Collections.singletonList(container));
     when(scheduler.allocate(any(ApplicationAttemptId.class), any(List.class),
-        any(List.class), any(List.class), any(List.class),
+        any(List.class), any(List.class), any(List.class), any(List.class),
         any(ContainerUpdates.class))).
     thenReturn(allocation);
     RMContainer rmContainer = mock(RMContainerImpl.class);
@@ -1161,7 +1161,7 @@ public class TestRMAppAttemptTransitions {
     when(allocation.getContainers()).
         thenReturn(Collections.singletonList(amContainer));
     when(scheduler.allocate(any(ApplicationAttemptId.class), any(List.class),
-        any(List.class), any(List.class), any(List.class),
+        any(List.class), any(List.class), any(List.class), any(List.class),
         any(ContainerUpdates.class)))
         .thenReturn(allocation);
     RMContainer rmContainer = mock(RMContainerImpl.class);
@@ -1636,7 +1636,7 @@ public class TestRMAppAttemptTransitions {
   public void testScheduleTransitionReplaceAMContainerRequestWithDefaults() {
     YarnScheduler mockScheduler = mock(YarnScheduler.class);
     when(mockScheduler.allocate(any(ApplicationAttemptId.class),
-        any(List.class), any(List.class), any(List.class), any(List.class),
+        any(List.class), any(List.class), any(List.class), any(List.class), any(List.class),
         any(ContainerUpdates.class)))
         .thenAnswer(new Answer<Allocation>() {
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38af2379/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java
index b927870..2bf6a21 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java
@@ -33,6 +33,7 @@ import java.util.ArrayList;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 
+import com.google.common.collect.ImmutableSet;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
@@ -420,9 +421,10 @@ public class TestRMContainerImpl {
     when(rmContext.getYarnConfiguration()).thenReturn(conf);
 
     /* First container: ALLOCATED -> KILLED */
-    RMContainer rmContainer = new RMContainerImpl(container,
+    RMContainerImpl rmContainer = new RMContainerImpl(container,
         SchedulerRequestKey.extractFrom(container), appAttemptId,
         nodeId, "user", rmContext);
+    rmContainer.setAllocationTags(ImmutableSet.of("mapper"));
 
     Assert.assertEquals(0,
         tagsManager.getNodeCardinalityByOp(nodeId, appId, null, Long::max));
@@ -448,6 +450,7 @@ public class TestRMContainerImpl {
     Assert.assertEquals(0,
         tagsManager.getNodeCardinalityByOp(nodeId, appId, null, Long::max));
 
+    rmContainer.setAllocationTags(ImmutableSet.of("mapper"));
     rmContainer.handle(new RMContainerEvent(containerId,
         RMContainerEventType.START));
 
@@ -468,6 +471,7 @@ public class TestRMContainerImpl {
     rmContainer = new RMContainerImpl(container,
         SchedulerRequestKey.extractFrom(container), appAttemptId,
         nodeId, "user", rmContext);
+    rmContainer.setAllocationTags(ImmutableSet.of("mapper"));
 
     Assert.assertEquals(0,
         tagsManager.getNodeCardinalityByOp(nodeId, appId, null, Long::max));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38af2379/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAppSchedulingInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAppSchedulingInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAppSchedulingInfo.java
index 3692b29..b7b0eb7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAppSchedulingInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAppSchedulingInfo.java
@@ -46,7 +46,7 @@ public class TestAppSchedulingInfo {
     doReturn("test").when(queue).getQueueName();
     AppSchedulingInfo appSchedulingInfo = new AppSchedulingInfo(appAttemptId,
         "test", queue, null, 0, new ResourceUsage(),
-        new HashMap<String, String>());
+        new HashMap<String, String>(), null);
 
     appSchedulingInfo.updatePlacesBlacklistedByApp(new ArrayList<String>(),
         new ArrayList<String>());
@@ -118,7 +118,7 @@ public class TestAppSchedulingInfo {
     doReturn(mock(QueueMetrics.class)).when(queue).getMetrics();
     AppSchedulingInfo  info = new AppSchedulingInfo(
         appAttemptId, "test", queue, mock(ActiveUsersManager.class), 0,
-        new ResourceUsage(), new HashMap<String, String>());
+        new ResourceUsage(), new HashMap<>(), null);
     Assert.assertEquals(0, info.getSchedulerKeys().size());
 
     Priority pri1 = Priority.newInstance(1);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38af2379/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerTestBase.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerTestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerTestBase.java
new file mode 100644
index 0000000..5cea3a2
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerTestBase.java
@@ -0,0 +1,79 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
+
+import com.google.common.collect.Sets;
+import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
+import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
+import org.junit.Assert;
+
+import java.util.Set;
+
+public class CapacitySchedulerTestBase {
+  protected final int GB = 1024;
+
+  protected static final String A = CapacitySchedulerConfiguration.ROOT + ".a";
+  protected static final String B = CapacitySchedulerConfiguration.ROOT + ".b";
+  protected static final String A1 = A + ".a1";
+  protected static final String A2 = A + ".a2";
+  protected static final String B1 = B + ".b1";
+  protected static final String B2 = B + ".b2";
+  protected static final String B3 = B + ".b3";
+  protected static float A_CAPACITY = 10.5f;
+  protected static float B_CAPACITY = 89.5f;
+  protected static final String P1 = CapacitySchedulerConfiguration.ROOT + ".p1";
+  protected static final String P2 = CapacitySchedulerConfiguration.ROOT + ".p2";
+  protected static final String X1 = P1 + ".x1";
+  protected static final String X2 = P1 + ".x2";
+  protected static final String Y1 = P2 + ".y1";
+  protected static final String Y2 = P2 + ".y2";
+  protected static float A1_CAPACITY = 30;
+  protected static float A2_CAPACITY = 70;
+  protected static float B1_CAPACITY = 79.2f;
+  protected static float B2_CAPACITY = 0.8f;
+  protected static float B3_CAPACITY = 20;
+
+
+  @SuppressWarnings("unchecked")
+  protected <E> Set<E> toSet(E... elements) {
+    Set<E> set = Sets.newHashSet(elements);
+    return set;
+  }
+
+  protected void checkPendingResource(MockRM rm, String queueName, int memory,
+      String label) {
+    CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
+    CSQueue queue = cs.getQueue(queueName);
+    Assert.assertEquals(
+        memory,
+        queue.getQueueResourceUsage()
+            .getPending(label == null ? RMNodeLabelsManager.NO_LABEL : label)
+            .getMemorySize());
+  }
+
+
+  protected void checkPendingResourceGreaterThanZero(MockRM rm, String queueName,
+      String label) {
+    CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
+    CSQueue queue = cs.getQueue(queueName);
+    Assert.assertTrue(queue.getQueueResourceUsage()
+        .getPending(label == null ? RMNodeLabelsManager.NO_LABEL : label)
+        .getMemorySize() > 0);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38af2379/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
index 7628312..79898bb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
@@ -103,7 +103,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.TestAMAuthorization.MockRMW
 import org.apache.hadoop.yarn.server.resourcemanager.TestAMAuthorization.MyContainerManager;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.NullRMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
-import org.apache.hadoop.yarn.server.resourcemanager.placement.UserGroupMappingPlacementRule;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppMetrics;
@@ -167,33 +166,10 @@ import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Sets;
 
-public class TestCapacityScheduler {
+public class TestCapacityScheduler extends CapacitySchedulerTestBase {
   private static final Log LOG = LogFactory.getLog(TestCapacityScheduler.class);
-  private final int GB = 1024;
   private final static ContainerUpdates NULL_UPDATE_REQUESTS =
       new ContainerUpdates();
-
-  private static final String A = CapacitySchedulerConfiguration.ROOT + ".a";
-  private static final String B = CapacitySchedulerConfiguration.ROOT + ".b";
-  private static final String A1 = A + ".a1";
-  private static final String A2 = A + ".a2";
-  private static final String B1 = B + ".b1";
-  private static final String B2 = B + ".b2";
-  private static final String B3 = B + ".b3";
-  private static float A_CAPACITY = 10.5f;
-  private static float B_CAPACITY = 89.5f;
-  private static final String P1 = CapacitySchedulerConfiguration.ROOT + ".p1";
-  private static final String P2 = CapacitySchedulerConfiguration.ROOT + ".p2";
-  private static final String X1 = P1 + ".x1";
-  private static final String X2 = P1 + ".x2";
-  private static final String Y1 = P2 + ".y1";
-  private static final String Y2 = P2 + ".y2";
-  private static float A1_CAPACITY = 30;
-  private static float A2_CAPACITY = 70;
-  private static float B1_CAPACITY = 79.2f;
-  private static float B2_CAPACITY = 0.8f;
-  private static float B3_CAPACITY = 20;
-
   private ResourceManager resourceManager = null;
   private RMContext mockContext;
 
@@ -1116,12 +1092,12 @@ public class TestCapacityScheduler {
     cs.handle(addAttemptEvent);
 
     // Verify the blacklist can be updated independent of requesting containers
-    cs.allocate(appAttemptId, Collections.<ResourceRequest>emptyList(),
+    cs.allocate(appAttemptId, Collections.<ResourceRequest>emptyList(), null,
         Collections.<ContainerId>emptyList(),
         Collections.singletonList(host), null, NULL_UPDATE_REQUESTS);
     Assert.assertTrue(cs.getApplicationAttempt(appAttemptId)
         .isPlaceBlacklisted(host));
-    cs.allocate(appAttemptId, Collections.<ResourceRequest>emptyList(),
+    cs.allocate(appAttemptId, Collections.<ResourceRequest>emptyList(), null,
         Collections.<ContainerId>emptyList(), null,
         Collections.singletonList(host), NULL_UPDATE_REQUESTS);
     Assert.assertFalse(cs.getApplicationAttempt(appAttemptId)
@@ -1217,8 +1193,7 @@ public class TestCapacityScheduler {
 
     //This will allocate for app1
     cs.allocate(appAttemptId1,
-        Collections.<ResourceRequest>singletonList(r1),
-        Collections.<ContainerId>emptyList(),
+        Collections.<ResourceRequest>singletonList(r1), null, Collections.<ContainerId>emptyList(),
         null, null, NULL_UPDATE_REQUESTS);
 
     //And this will result in container assignment for app1
@@ -1234,8 +1209,7 @@ public class TestCapacityScheduler {
     //Now, allocate for app2 (this would be the first/AM allocation)
     ResourceRequest r2 = TestUtils.createResourceRequest(ResourceRequest.ANY, 1*GB, 1, true, priority, recordFactory);
     cs.allocate(appAttemptId2,
-        Collections.<ResourceRequest>singletonList(r2),
-        Collections.<ContainerId>emptyList(),
+        Collections.<ResourceRequest>singletonList(r2), null, Collections.<ContainerId>emptyList(),
         null, null, NULL_UPDATE_REQUESTS);
 
     //In this case we do not perform container assignment because we want to
@@ -3481,12 +3455,6 @@ public class TestCapacityScheduler {
         + "queue-a's max capacity will be violated if container allocated");
   }
 
-  @SuppressWarnings("unchecked")
-  private <E> Set<E> toSet(E... elements) {
-    Set<E> set = Sets.newHashSet(elements);
-    return set;
-  }
-
   @Test
   public void testQueueHierarchyPendingResourceUpdate() throws Exception {
     Configuration conf =
@@ -3618,26 +3586,6 @@ public class TestCapacityScheduler {
     checkPendingResource(rm, "root", 0 * GB, "x");
   }
 
-  private void checkPendingResource(MockRM rm, String queueName, int memory,
-      String label) {
-    CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
-    CSQueue queue = cs.getQueue(queueName);
-    Assert.assertEquals(
-        memory,
-        queue.getQueueResourceUsage()
-            .getPending(label == null ? RMNodeLabelsManager.NO_LABEL : label)
-            .getMemorySize());
-  }
-
-  private void checkPendingResourceGreaterThanZero(MockRM rm, String queueName,
-      String label) {
-    CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
-    CSQueue queue = cs.getQueue(queueName);
-    Assert.assertTrue(queue.getQueueResourceUsage()
-        .getPending(label == null ? RMNodeLabelsManager.NO_LABEL : label)
-        .getMemorySize() > 0);
-  }
-
   // Test verifies AM Used resource for LeafQueue when AM ResourceRequest is
   // lesser than minimumAllocation
   @Test(timeout = 30000)
@@ -3707,7 +3655,7 @@ public class TestCapacityScheduler {
 
     Allocation allocate =
         cs.allocate(appAttemptId, Collections.<ResourceRequest> emptyList(),
-            Collections.<ContainerId> emptyList(), null, null,
+            null, Collections.<ContainerId> emptyList(), null, null,
             NULL_UPDATE_REQUESTS);
 
     Assert.assertNotNull(attempt);
@@ -3724,7 +3672,7 @@ public class TestCapacityScheduler {
 
     allocate =
         cs.allocate(appAttemptId, Collections.<ResourceRequest> emptyList(),
-            Collections.<ContainerId> emptyList(), null, null,
+            null, Collections.<ContainerId> emptyList(), null, null,
             NULL_UPDATE_REQUESTS);
 
     // All resources should be sent as headroom
@@ -4250,8 +4198,7 @@ public class TestCapacityScheduler {
       y1Req = TestUtils.createResourceRequest(
           ResourceRequest.ANY, 1 * GB, 1, true, priority, recordFactory);
       cs.allocate(appAttemptId3,
-          Collections.<ResourceRequest>singletonList(y1Req),
-          Collections.<ContainerId>emptyList(),
+          Collections.<ResourceRequest>singletonList(y1Req), null, Collections.<ContainerId>emptyList(),
           null, null, NULL_UPDATE_REQUESTS);
       CapacityScheduler.schedule(cs);
     }
@@ -4264,8 +4211,7 @@ public class TestCapacityScheduler {
       x1Req = TestUtils.createResourceRequest(
           ResourceRequest.ANY, 1 * GB, 1, true, priority, recordFactory);
       cs.allocate(appAttemptId1,
-          Collections.<ResourceRequest>singletonList(x1Req),
-          Collections.<ContainerId>emptyList(),
+          Collections.<ResourceRequest>singletonList(x1Req), null, Collections.<ContainerId>emptyList(),
           null, null, NULL_UPDATE_REQUESTS);
       CapacityScheduler.schedule(cs);
     }
@@ -4277,8 +4223,7 @@ public class TestCapacityScheduler {
     x2Req = TestUtils.createResourceRequest(
         ResourceRequest.ANY, 2 * GB, 1, true, priority, recordFactory);
     cs.allocate(appAttemptId2,
-        Collections.<ResourceRequest>singletonList(x2Req),
-        Collections.<ContainerId>emptyList(),
+        Collections.<ResourceRequest>singletonList(x2Req), null, Collections.<ContainerId>emptyList(),
         null, null, NULL_UPDATE_REQUESTS);
     CapacityScheduler.schedule(cs);
     assertEquals("X2 Used Resource should be 0", 0,
@@ -4289,8 +4234,7 @@ public class TestCapacityScheduler {
     x1Req = TestUtils.createResourceRequest(
         ResourceRequest.ANY, 1 * GB, 1, true, priority, recordFactory);
     cs.allocate(appAttemptId1,
-        Collections.<ResourceRequest>singletonList(x1Req),
-        Collections.<ContainerId>emptyList(),
+        Collections.<ResourceRequest>singletonList(x1Req), null, Collections.<ContainerId>emptyList(),
         null, null, NULL_UPDATE_REQUESTS);
     CapacityScheduler.schedule(cs);
     assertEquals("X1 Used Resource should be 7 GB", 7 * GB,
@@ -4303,8 +4247,7 @@ public class TestCapacityScheduler {
       y1Req = TestUtils.createResourceRequest(
           ResourceRequest.ANY, 1 * GB, 1, true, priority, recordFactory);
       cs.allocate(appAttemptId3,
-          Collections.<ResourceRequest>singletonList(y1Req),
-          Collections.<ContainerId>emptyList(),
+          Collections.<ResourceRequest>singletonList(y1Req), null, Collections.<ContainerId>emptyList(),
           null, null, NULL_UPDATE_REQUESTS);
       CapacityScheduler.schedule(cs);
     }
@@ -4363,7 +4306,7 @@ public class TestCapacityScheduler {
         ResourceRequest.ANY, 2 * GB, 1, true, priority, recordFactory);
     //This will allocate for app1
     cs.allocate(appAttemptId1, Collections.<ResourceRequest>singletonList(r1),
-        Collections.<ContainerId>emptyList(),
+        null, Collections.<ContainerId>emptyList(),
         null, null, NULL_UPDATE_REQUESTS).getContainers().size();
     CapacityScheduler.schedule(cs);
     ResourceRequest r2 = null;
@@ -4371,8 +4314,7 @@ public class TestCapacityScheduler {
       r2 = TestUtils.createResourceRequest(
           ResourceRequest.ANY, 1 * GB, 1, true, priority, recordFactory);
       cs.allocate(appAttemptId2,
-          Collections.<ResourceRequest>singletonList(r2),
-          Collections.<ContainerId>emptyList(),
+          Collections.<ResourceRequest>singletonList(r2), null, Collections.<ContainerId>emptyList(),
           null, null, NULL_UPDATE_REQUESTS);
       CapacityScheduler.schedule(cs);
     }
@@ -4385,12 +4327,12 @@ public class TestCapacityScheduler {
     r2 = TestUtils.createResourceRequest(
         ResourceRequest.ANY, 1 * GB, 1, true, priority, recordFactory);
     cs.allocate(appAttemptId1, Collections.<ResourceRequest>singletonList(r1),
-        Collections.<ContainerId>emptyList(),
+        null, Collections.<ContainerId>emptyList(),
         null, null, NULL_UPDATE_REQUESTS).getContainers().size();
     CapacityScheduler.schedule(cs);
 
     cs.allocate(appAttemptId2, Collections.<ResourceRequest>singletonList(r2),
-        Collections.<ContainerId>emptyList(), null, null, NULL_UPDATE_REQUESTS);
+        null, Collections.<ContainerId>emptyList(), null, null, NULL_UPDATE_REQUESTS);
     CapacityScheduler.schedule(cs);
     //Check blocked Resource
     assertEquals("A Used Resource should be 2 GB", 2 * GB,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38af2379/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java
index eddf8c8..18cd942 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java
@@ -106,7 +106,7 @@ public class TestCapacitySchedulerAsyncScheduling {
         CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_MAXIMUM_THREAD,
         numThreads);
     conf.setInt(CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_PREFIX
-        + ".scheduling-interval-ms", 100);
+        + ".scheduling-interval-ms", 0);
 
     final RMNodeLabelsManager mgr = new NullRMNodeLabelsManager();
     mgr.init(conf);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[26/32] hadoop git commit: YARN-7763. Allow Constraints specified in the SchedulingRequest to override application level constraints. (Weiwei Yang via asuresh)

Posted by as...@apache.org.
YARN-7763. Allow Constraints specified in the SchedulingRequest to override application level constraints. (Weiwei Yang via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8bf7c444
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8bf7c444
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8bf7c444

Branch: refs/heads/trunk
Commit: 8bf7c444368f48f63f8011cf155f551c6b51ee21
Parents: 28fe7f3
Author: Arun Suresh <as...@apache.org>
Authored: Sun Jan 21 19:11:17 2018 -0800
Committer: Arun Suresh <as...@apache.org>
Committed: Wed Jan 31 01:30:17 2018 -0800

----------------------------------------------------------------------
 .../scheduler/capacity/CapacityScheduler.java   |  4 +-
 .../constraint/PlacementConstraintsUtil.java    | 98 +++++++++++---------
 .../algorithm/DefaultPlacementAlgorithm.java    |  4 +-
 .../SingleConstraintAppPlacementAllocator.java  | 10 +-
 .../TestPlacementConstraintsUtil.java           | 94 ++++++++++++-------
 5 files changed, 123 insertions(+), 87 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8bf7c444/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index 429f9f3..a096e2f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -2621,9 +2621,9 @@ public class CapacityScheduler extends
         // Validate placement constraint is satisfied before
         // committing the request.
         try {
-          if (!PlacementConstraintsUtil.canSatisfySingleConstraint(
+          if (!PlacementConstraintsUtil.canSatisfyConstraints(
               appAttempt.getApplicationId(),
-              schedulingRequest.getAllocationTags(), schedulerNode,
+              schedulingRequest, schedulerNode,
               rmContext.getPlacementConstraintManager(),
               rmContext.getAllocationTagsManager())) {
             LOG.debug("Failed to allocate container for application "

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8bf7c444/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java
index ff5cb67..c07c16f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java
@@ -25,6 +25,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
 import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
 import org.apache.hadoop.yarn.api.resource.PlacementConstraint.AbstractConstraint;
 import org.apache.hadoop.yarn.api.resource.PlacementConstraint.SingleConstraint;
@@ -54,7 +55,7 @@ public final class PlacementConstraintsUtil {
   }
 
   /**
-   * Returns true if **single** placement constraint with associated
+   * Returns true if <b>single</b> placement constraint with associated
    * allocationTags and scope is satisfied by a specific scheduler Node.
    *
    * @param targetApplicationId the application id, which could be override by
@@ -148,59 +149,70 @@ public final class PlacementConstraintsUtil {
     return true;
   }
 
-  /**
-   * Returns true if all placement constraints are **currently** satisfied by a
-   * specific scheduler Node..
-   *
-   * To do so the method retrieves and goes through all application constraint
-   * expressions and checks if the specific allocation is between the allowed
-   * min-max cardinality values under the constraint scope (Node/Rack/etc).
-   *
-   * @param applicationId applicationId,
-   * @param placementConstraint placement constraint.
-   * @param node the scheduler node
-   * @param tagsManager the allocation tags store
-   * @return true if all application constraints are satisfied by node
-   * @throws InvalidAllocationTagsQueryException
-   */
-  public static boolean canSatisfySingleConstraint(ApplicationId applicationId,
-      PlacementConstraint placementConstraint, SchedulerNode node,
-      AllocationTagsManager tagsManager)
+  private static boolean canSatisfyConstraints(ApplicationId appId,
+      PlacementConstraint constraint, SchedulerNode node,
+      AllocationTagsManager atm)
       throws InvalidAllocationTagsQueryException {
-    if (placementConstraint == null) {
+    if (constraint == null) {
       return true;
     }
-    // Transform to SimpleConstraint
+
+    // If this is a single constraint, transform to SingleConstraint
     SingleConstraintTransformer singleTransformer =
-        new SingleConstraintTransformer(placementConstraint);
-    placementConstraint = singleTransformer.transform();
-    AbstractConstraint sConstraintExpr = placementConstraint.getConstraintExpr();
-    SingleConstraint single = (SingleConstraint) sConstraintExpr;
+        new SingleConstraintTransformer(constraint);
+    constraint = singleTransformer.transform();
+    AbstractConstraint sConstraintExpr = constraint.getConstraintExpr();
 
-    return canSatisfySingleConstraint(applicationId, single, node, tagsManager);
+    // TODO handle other type of constraints, e.g CompositeConstraint
+    if (sConstraintExpr instanceof SingleConstraint) {
+      SingleConstraint single = (SingleConstraint) sConstraintExpr;
+      return canSatisfySingleConstraint(appId, single, node, atm);
+    } else {
+      throw new InvalidAllocationTagsQueryException(
+          "Unsupported type of constraint.");
+    }
   }
 
   /**
-   * Returns true if all placement constraints with associated allocationTags
-   * are **currently** satisfied by a specific scheduler Node.
-   * To do so the method retrieves and goes through all application constraint
-   * expressions and checks if the specific allocation is between the allowed
-   * min-max cardinality values under the constraint scope (Node/Rack/etc).
+   * Returns true if the placement constraint for a given scheduling request
+   * is <b>currently</b> satisfied by the specific scheduler node. This method
+   * first validates the constraint specified in the request; if not specified,
+   * then it validates application level constraint if exists; otherwise, it
+   * validates the global constraint if exists.
+   * <p/>
+   * This method only checks whether a scheduling request can be placed
+   * on a node with respect to the certain placement constraint. It gives no
+   * guarantee that asked allocations can be eventually allocated because
+   * it doesn't check resource, that needs to be further decided by a scheduler.
    *
-   * @param appId the application id
-   * @param allocationTags the allocation tags set
-   * @param node the scheduler node
-   * @param pcm the placement constraints store
-   * @param tagsManager the allocation tags store
-   * @return true if all application constraints are satisfied by node
+   * @param applicationId application id
+   * @param request scheduling request
+   * @param schedulerNode node
+   * @param pcm placement constraint manager
+   * @param atm allocation tags manager
+   * @return true if the given node satisfies the constraint of the request
    * @throws InvalidAllocationTagsQueryException
    */
-  public static boolean canSatisfySingleConstraint(ApplicationId appId,
-      Set<String> allocationTags, SchedulerNode node,
-      PlacementConstraintManager pcm, AllocationTagsManager tagsManager)
+  public static boolean canSatisfyConstraints(ApplicationId applicationId,
+      SchedulingRequest request, SchedulerNode schedulerNode,
+      PlacementConstraintManager pcm, AllocationTagsManager atm)
       throws InvalidAllocationTagsQueryException {
-    PlacementConstraint constraint = pcm.getConstraint(appId, allocationTags);
-    return canSatisfySingleConstraint(appId, constraint, node, tagsManager);
-  }
+    // TODO do proper merge on different level of constraints, see YARN-7778.
 
+    // Request level constraint
+    PlacementConstraint constraint = request.getPlacementConstraint();
+    if (constraint == null) {
+      // Application level constraint
+      constraint = pcm.getConstraint(applicationId,
+          request.getAllocationTags());
+      if (constraint == null) {
+        // Global level constraint
+        constraint = pcm.getGlobalConstraint(request.getAllocationTags());
+        if (constraint == null) {
+          return true;
+        }
+      }
+    }
+    return canSatisfyConstraints(applicationId, constraint, schedulerNode, atm);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8bf7c444/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/DefaultPlacementAlgorithm.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/DefaultPlacementAlgorithm.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/DefaultPlacementAlgorithm.java
index a0749f5..cf2ed15 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/DefaultPlacementAlgorithm.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/DefaultPlacementAlgorithm.java
@@ -71,8 +71,8 @@ public class DefaultPlacementAlgorithm implements ConstraintPlacementAlgorithm {
       throws InvalidAllocationTagsQueryException {
     int numAllocs = schedulingRequest.getResourceSizing().getNumAllocations();
     if (numAllocs > 0) {
-      if (PlacementConstraintsUtil.canSatisfySingleConstraint(appId,
-          schedulingRequest.getAllocationTags(), schedulerNode,
+      if (PlacementConstraintsUtil.canSatisfyConstraints(appId,
+          schedulingRequest, schedulerNode,
           constraintManager, tagsManager)) {
         return true;
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8bf7c444/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/SingleConstraintAppPlacementAllocator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/SingleConstraintAppPlacementAllocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/SingleConstraintAppPlacementAllocator.java
index dd30b61..9e7d71c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/SingleConstraintAppPlacementAllocator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/SingleConstraintAppPlacementAllocator.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.ContainerR
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.PendingAsk;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.AllocationTagsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.InvalidAllocationTagsQueryException;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.PlacementConstraintManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.PlacementConstraintsUtil;
 import org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey;
 
@@ -72,6 +73,7 @@ public class SingleConstraintAppPlacementAllocator<N extends SchedulerNode>
   private String targetNodePartition;
   private Set<String> targetAllocationTags;
   private AllocationTagsManager allocationTagsManager;
+  private PlacementConstraintManager placementConstraintManager;
 
   public SingleConstraintAppPlacementAllocator() {
     ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
@@ -437,10 +439,9 @@ public class SingleConstraintAppPlacementAllocator<N extends SchedulerNode>
 
     // node type will be ignored.
     try {
-      return PlacementConstraintsUtil.canSatisfySingleConstraint(
-          appSchedulingInfo.getApplicationId(),
-          this.schedulingRequest.getPlacementConstraint(), node,
-          allocationTagsManager);
+      return PlacementConstraintsUtil.canSatisfyConstraints(
+          appSchedulingInfo.getApplicationId(), schedulingRequest, node,
+          placementConstraintManager, allocationTagsManager);
     } catch (InvalidAllocationTagsQueryException e) {
       LOG.warn("Failed to query node cardinality:", e);
       return false;
@@ -527,5 +528,6 @@ public class SingleConstraintAppPlacementAllocator<N extends SchedulerNode>
       SchedulerRequestKey schedulerRequestKey, RMContext rmContext) {
     super.initialize(appSchedulingInfo, schedulerRequestKey, rmContext);
     this.allocationTagsManager = rmContext.getAllocationTagsManager();
+    this.placementConstraintManager = rmContext.getPlacementConstraintManager();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8bf7c444/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementConstraintsUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementConstraintsUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementConstraintsUtil.java
index 8ad726e..a5460c2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementConstraintsUtil.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementConstraintsUtil.java
@@ -32,6 +32,7 @@ import java.util.Map;
 import java.util.Set;
 import java.util.stream.Collectors;
 import java.util.stream.Stream;
+import java.util.concurrent.atomic.AtomicLong;
 
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
@@ -39,6 +40,10 @@ import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
 import org.apache.hadoop.yarn.api.resource.PlacementConstraints;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.ExecutionTypeRequest;
+import org.apache.hadoop.yarn.api.records.ResourceSizing;
 import org.apache.hadoop.yarn.server.resourcemanager.MockNodes;
 import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
@@ -64,6 +69,7 @@ public class TestPlacementConstraintsUtil {
   private PlacementConstraint c1, c2, c3, c4;
   private Set<String> sourceTag1, sourceTag2;
   private Map<Set<String>, PlacementConstraint> constraintMap1, constraintMap2;
+  private AtomicLong requestID = new AtomicLong(0);
 
   @Before
   public void setup() {
@@ -102,6 +108,22 @@ public class TestPlacementConstraintsUtil {
             AbstractMap.SimpleEntry::getValue));
   }
 
+  private SchedulingRequest createSchedulingRequest(Set<String> allocationTags,
+      PlacementConstraint constraint) {
+    return SchedulingRequest
+        .newInstance(requestID.incrementAndGet(),
+            Priority.newInstance(0),
+            ExecutionTypeRequest.newInstance(),
+            allocationTags,
+            ResourceSizing.newInstance(Resource.newInstance(1024, 3)),
+            constraint);
+  }
+
+  private SchedulingRequest createSchedulingRequest(Set<String>
+      allocationTags) {
+    return createSchedulingRequest(allocationTags, null);
+  }
+
   @Test
   public void testNodeAffinityAssignment()
       throws InvalidAllocationTagsQueryException {
@@ -117,10 +139,10 @@ public class TestPlacementConstraintsUtil {
       RMNode currentNode = nodeIterator.next();
       FiCaSchedulerNode schedulerNode = TestUtils.getMockNode(
           currentNode.getHostName(), currentNode.getRackName(), 123, 4 * GB);
-      Assert.assertFalse(PlacementConstraintsUtil.canSatisfySingleConstraint(appId1,
-          sourceTag1, schedulerNode, pcm, tm));
-      Assert.assertFalse(PlacementConstraintsUtil.canSatisfySingleConstraint(appId1,
-          sourceTag2, schedulerNode, pcm, tm));
+      Assert.assertFalse(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
+          createSchedulingRequest(sourceTag1), schedulerNode, pcm, tm));
+      Assert.assertFalse(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
+          createSchedulingRequest(sourceTag2), schedulerNode, pcm, tm));
     }
     /**
      * Now place container:
@@ -145,15 +167,15 @@ public class TestPlacementConstraintsUtil {
     tm.addContainer(n0_r1.getNodeID(), hbase_m, ImmutableSet.of("hbase-m"));
 
     // 'spark' placement on Node0 should now SUCCEED
-    Assert.assertTrue(PlacementConstraintsUtil.canSatisfySingleConstraint(appId1,
-        sourceTag1, schedulerNode0, pcm, tm));
+    Assert.assertTrue(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
+        createSchedulingRequest(sourceTag1), schedulerNode0, pcm, tm));
     // FAIL on the rest of the nodes
-    Assert.assertFalse(PlacementConstraintsUtil.canSatisfySingleConstraint(appId1,
-        sourceTag1, schedulerNode1, pcm, tm));
-    Assert.assertFalse(PlacementConstraintsUtil.canSatisfySingleConstraint(appId1,
-        sourceTag1, schedulerNode2, pcm, tm));
-    Assert.assertFalse(PlacementConstraintsUtil.canSatisfySingleConstraint(appId1,
-        sourceTag1, schedulerNode3, pcm, tm));
+    Assert.assertFalse(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
+        createSchedulingRequest(sourceTag1), schedulerNode1, pcm, tm));
+    Assert.assertFalse(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
+        createSchedulingRequest(sourceTag1), schedulerNode2, pcm, tm));
+    Assert.assertFalse(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
+        createSchedulingRequest(sourceTag1), schedulerNode3, pcm, tm));
   }
 
   @Test
@@ -187,16 +209,16 @@ public class TestPlacementConstraintsUtil {
     FiCaSchedulerNode schedulerNode3 = TestUtils
         .getMockNode(n3_r2.getHostName(), n3_r2.getRackName(), 123, 4 * GB);
     // 'zk' placement on Rack1 should now SUCCEED
-    Assert.assertTrue(PlacementConstraintsUtil.canSatisfySingleConstraint(appId1,
-        sourceTag2, schedulerNode0, pcm, tm));
-    Assert.assertTrue(PlacementConstraintsUtil.canSatisfySingleConstraint(appId1,
-        sourceTag2, schedulerNode1, pcm, tm));
+    Assert.assertTrue(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
+        createSchedulingRequest(sourceTag2), schedulerNode0, pcm, tm));
+    Assert.assertTrue(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
+        createSchedulingRequest(sourceTag2), schedulerNode1, pcm, tm));
 
     // FAIL on the rest of the RACKs
-    Assert.assertFalse(PlacementConstraintsUtil.canSatisfySingleConstraint(appId1,
-        sourceTag2, schedulerNode2, pcm, tm));
-    Assert.assertFalse(PlacementConstraintsUtil.canSatisfySingleConstraint(appId1,
-        sourceTag2, schedulerNode3, pcm, tm));
+    Assert.assertFalse(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
+        createSchedulingRequest(sourceTag2), schedulerNode2, pcm, tm));
+    Assert.assertFalse(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
+        createSchedulingRequest(sourceTag2), schedulerNode3, pcm, tm));
   }
 
   @Test
@@ -230,15 +252,15 @@ public class TestPlacementConstraintsUtil {
     tm.addContainer(n0_r1.getNodeID(), hbase_m, ImmutableSet.of("hbase-m"));
 
     // 'spark' placement on Node0 should now FAIL
-    Assert.assertFalse(PlacementConstraintsUtil.canSatisfySingleConstraint(appId1,
-        sourceTag1, schedulerNode0, pcm, tm));
+    Assert.assertFalse(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
+        createSchedulingRequest(sourceTag1), schedulerNode0, pcm, tm));
     // SUCCEED on the rest of the nodes
-    Assert.assertTrue(PlacementConstraintsUtil.canSatisfySingleConstraint(appId1,
-        sourceTag1, schedulerNode1, pcm, tm));
-    Assert.assertTrue(PlacementConstraintsUtil.canSatisfySingleConstraint(appId1,
-        sourceTag1, schedulerNode2, pcm, tm));
-    Assert.assertTrue(PlacementConstraintsUtil.canSatisfySingleConstraint(appId1,
-        sourceTag1, schedulerNode3, pcm, tm));
+    Assert.assertTrue(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
+        createSchedulingRequest(sourceTag1), schedulerNode1, pcm, tm));
+    Assert.assertTrue(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
+        createSchedulingRequest(sourceTag1), schedulerNode2, pcm, tm));
+    Assert.assertTrue(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
+        createSchedulingRequest(sourceTag1), schedulerNode3, pcm, tm));
   }
 
   @Test
@@ -273,15 +295,15 @@ public class TestPlacementConstraintsUtil {
         .getMockNode(n3_r2.getHostName(), n3_r2.getRackName(), 123, 4 * GB);
 
     // 'zk' placement on Rack1 should FAIL
-    Assert.assertFalse(PlacementConstraintsUtil.canSatisfySingleConstraint(appId1,
-        sourceTag2, schedulerNode0, pcm, tm));
-    Assert.assertFalse(PlacementConstraintsUtil.canSatisfySingleConstraint(appId1,
-        sourceTag2, schedulerNode1, pcm, tm));
+    Assert.assertFalse(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
+        createSchedulingRequest(sourceTag2), schedulerNode0, pcm, tm));
+    Assert.assertFalse(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
+        createSchedulingRequest(sourceTag2), schedulerNode1, pcm, tm));
 
     // SUCCEED on the rest of the RACKs
-    Assert.assertTrue(PlacementConstraintsUtil.canSatisfySingleConstraint(appId1,
-        sourceTag2, schedulerNode2, pcm, tm));
-    Assert.assertTrue(PlacementConstraintsUtil.canSatisfySingleConstraint(appId1,
-        sourceTag2, schedulerNode3, pcm, tm));
+    Assert.assertTrue(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
+        createSchedulingRequest(sourceTag2), schedulerNode2, pcm, tm));
+    Assert.assertTrue(PlacementConstraintsUtil.canSatisfyConstraints(appId1,
+        createSchedulingRequest(sourceTag2), schedulerNode3, pcm, tm));
   }
 }
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[18/32] hadoop git commit: YARN-7681. Double-check placement constraints in scheduling phase before actual allocation is made. (Weiwei Yang via asuresh)

Posted by as...@apache.org.
YARN-7681. Double-check placement constraints in scheduling phase before actual allocation is made. (Weiwei Yang via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4eda58c1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4eda58c1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4eda58c1

Branch: refs/heads/trunk
Commit: 4eda58c13641c14c4b248843a2589781cbcd343f
Parents: bdba01f
Author: Arun Suresh <as...@apache.org>
Authored: Wed Jan 10 09:04:30 2018 -0800
Committer: Arun Suresh <as...@apache.org>
Committed: Wed Jan 31 01:30:17 2018 -0800

----------------------------------------------------------------------
 .../scheduler/capacity/CapacityScheduler.java   | 23 ++++++++++++++++++++
 1 file changed, 23 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4eda58c1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index e682d0f..d2713c8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -124,6 +124,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.ResourceCo
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.SchedulerContainer;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.InvalidAllocationTagsQueryException;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.PlacementConstraintsUtil;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptAddedSchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptRemovedSchedulerEvent;
@@ -2574,6 +2576,27 @@ public class CapacityScheduler extends
         ResourceCommitRequest<FiCaSchedulerApp, FiCaSchedulerNode>
             resourceCommitRequest = createResourceCommitRequest(
             appAttempt, schedulingRequest, schedulerNode);
+
+        // Validate placement constraint is satisfied before
+        // committing the request.
+        try {
+          if (!PlacementConstraintsUtil.canSatisfyConstraints(
+              appAttempt.getApplicationId(),
+              schedulingRequest.getAllocationTags(),
+              schedulerNode,
+              rmContext.getPlacementConstraintManager(),
+              rmContext.getAllocationTagsManager())) {
+            LOG.debug("Failed to allocate container for application "
+                + appAttempt.getApplicationId() + " on node "
+                + schedulerNode.getNodeName()
+                + " because this allocation violates the"
+                + " placement constraint.");
+            return false;
+          }
+        } catch (InvalidAllocationTagsQueryException e) {
+          LOG.warn("Unable to allocate container", e);
+          return false;
+        }
         return tryCommit(getClusterResource(), resourceCommitRequest, false);
       }
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[16/32] hadoop git commit: YARN-7613. Implement Basic algorithm for constraint based placement. (Panagiotis Garefalakis via asuresh)

Posted by as...@apache.org.
YARN-7613. Implement Basic algorithm for constraint based placement. (Panagiotis Garefalakis via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a52d11fb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a52d11fb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a52d11fb

Branch: refs/heads/trunk
Commit: a52d11fb8c103f14e42692600a058ba3b56e2ecf
Parents: f9af15d
Author: Arun Suresh <as...@apache.org>
Authored: Wed Dec 27 22:59:22 2017 -0800
Committer: Arun Suresh <as...@apache.org>
Committed: Wed Jan 31 01:30:17 2018 -0800

----------------------------------------------------------------------
 .../hadoop/yarn/conf/YarnConfiguration.java     |   4 +
 .../src/main/resources/yarn-default.xml         |   8 +-
 .../rmcontainer/RMContainerImpl.java            |  10 +-
 .../constraint/AllocationTagsManager.java       | 121 ++++++++++---
 .../algorithm/DefaultPlacementAlgorithm.java    | 172 +++++++++++++++++++
 .../iterators/PopularTagsIterator.java          |  71 ++++++++
 .../algorithm/iterators/SerialIterator.java     |  53 ++++++
 .../algorithm/iterators/package-info.java       |  29 ++++
 .../constraint/algorithm/package-info.java      |  29 ++++
 .../constraint/processor/BatchedRequests.java   |  45 ++++-
 .../processor/PlacementProcessor.java           |  32 ++--
 .../processor/SamplePlacementAlgorithm.java     | 144 ----------------
 .../constraint/TestAllocationTagsManager.java   | 156 ++++++++++++-----
 .../TestBatchedRequestsIterators.java           |  82 +++++++++
 .../constraint/TestPlacementProcessor.java      |   4 +-
 15 files changed, 721 insertions(+), 239 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a52d11fb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 8fb3c2e..367b1ae 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -536,6 +536,10 @@ public class YarnConfiguration extends Configuration {
   public static final String RM_PLACEMENT_CONSTRAINTS_ALGORITHM_CLASS =
       RM_PREFIX + "placement-constraints.algorithm.class";
 
+  /** Used for BasicPlacementAlgorithm - default SERIAL. **/
+  public static final String RM_PLACEMENT_CONSTRAINTS_ALGORITHM_ITERATOR =
+      RM_PREFIX + "placement-constraints.algorithm.iterator";
+
   public static final String RM_PLACEMENT_CONSTRAINTS_ENABLED =
       RM_PREFIX + "placement-constraints.enabled";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a52d11fb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 6d52ace..509a040 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -145,7 +145,13 @@
   <property>
     <description>Constraint Placement Algorithm to be used.</description>
     <name>yarn.resourcemanager.placement-constraints.algorithm.class</name>
-    <value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.processor.SamplePlacementAlgorithm</value>
+    <value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.algorithm.DefaultPlacementAlgorithm</value>
+  </property>
+
+  <property>
+    <description>Placement Algorithm Requests Iterator to be used.</description>
+    <name>yarn.resourcemanager.placement-constraints.algorithm.iterator</name>
+    <value>SERIAL</value>
   </property>
 
   <property>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a52d11fb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
index c873509..2c4ef7b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
@@ -579,9 +579,8 @@ public class RMContainerImpl implements RMContainer {
     public void transition(RMContainerImpl container, RMContainerEvent event) {
       // Notify placementManager
       container.rmContext.getAllocationTagsManager().addContainer(
-          container.getNodeId(),
-          container.getApplicationAttemptId().getApplicationId(),
-          container.getContainerId(), container.getAllocationTags());
+          container.getNodeId(), container.getContainerId(),
+          container.getAllocationTags());
 
       container.eventHandler.handle(new RMAppAttemptEvent(
           container.appAttemptId, RMAppAttemptEventType.CONTAINER_ALLOCATED));
@@ -696,9 +695,8 @@ public class RMContainerImpl implements RMContainer {
     public void transition(RMContainerImpl container, RMContainerEvent event) {
       // Notify placementManager
       container.rmContext.getAllocationTagsManager().removeContainer(
-          container.getNodeId(),
-          container.getApplicationAttemptId().getApplicationId(),
-          container.getContainerId(), container.getAllocationTags());
+          container.getNodeId(), container.getContainerId(),
+          container.getAllocationTags());
 
       RMContainerFinishedEvent finishedEvent = (RMContainerFinishedEvent) event;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a52d11fb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
index 7b0b959..4bb3e79 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
@@ -24,6 +24,7 @@ import com.google.common.annotations.VisibleForTesting;
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.NodeId;
@@ -54,24 +55,27 @@ public class AllocationTagsManager {
   private final RMContext rmContext;
 
   // Application's tags to Node
-  private Map<ApplicationId, NodeToCountedTags> perAppNodeMappings =
+  private Map<ApplicationId, TypeToCountedTags> perAppNodeMappings =
       new HashMap<>();
   // Application's tags to Rack
-  private Map<ApplicationId, NodeToCountedTags> perAppRackMappings =
+  private Map<ApplicationId, TypeToCountedTags> perAppRackMappings =
       new HashMap<>();
+  // Application's Temporary containers mapping
+  private Map<ApplicationId, Map<NodeId, Map<ContainerId, Set<String>>>>
+      appTempMappings = new HashMap<>();
 
   // Global tags to node mapping (used to fast return aggregated tags
   // cardinality across apps)
-  private NodeToCountedTags<NodeId> globalNodeMapping = new NodeToCountedTags();
+  private TypeToCountedTags<NodeId> globalNodeMapping = new TypeToCountedTags();
   // Global tags to Rack mapping
-  private NodeToCountedTags<String> globalRackMapping = new NodeToCountedTags();
+  private TypeToCountedTags<String> globalRackMapping = new TypeToCountedTags();
 
   /**
    * Generic store mapping type <T> to counted tags.
    * Currently used both for NodeId to Tag, Count and Rack to Tag, Count
    */
   @VisibleForTesting
-  static class NodeToCountedTags<T> {
+  static class TypeToCountedTags<T> {
     // Map<Type, Map<Tag, Count>>
     private Map<T, Map<String, Long>> typeToTagsWithCount = new HashMap<>();
 
@@ -209,25 +213,31 @@ public class AllocationTagsManager {
   }
 
   @VisibleForTesting
-  Map<ApplicationId, NodeToCountedTags> getPerAppNodeMappings() {
+  Map<ApplicationId, TypeToCountedTags> getPerAppNodeMappings() {
     return perAppNodeMappings;
   }
 
   @VisibleForTesting
-  Map<ApplicationId, NodeToCountedTags> getPerAppRackMappings() {
+  Map<ApplicationId, TypeToCountedTags> getPerAppRackMappings() {
     return perAppRackMappings;
   }
 
   @VisibleForTesting
-  NodeToCountedTags getGlobalNodeMapping() {
+  TypeToCountedTags getGlobalNodeMapping() {
     return globalNodeMapping;
   }
 
   @VisibleForTesting
-  NodeToCountedTags getGlobalRackMapping() {
+  TypeToCountedTags getGlobalRackMapping() {
     return globalRackMapping;
   }
 
+  @VisibleForTesting
+  public Map<NodeId, Map<ContainerId, Set<String>>> getAppTempMappings(
+      ApplicationId applicationId) {
+    return appTempMappings.get(applicationId);
+  }
+
   public AllocationTagsManager(RMContext context) {
     ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
     readLock = lock.readLock();
@@ -235,18 +245,52 @@ public class AllocationTagsManager {
     rmContext = context;
   }
 
+  //
+
+  /**
+   * Method adds a temporary fake-container tag to Node mapping.
+   * Used by the constrained placement algorithm to keep track of containers
+   * that are currently placed on nodes but are not yet allocated.
+   * @param nodeId
+   * @param applicationId
+   * @param allocationTags
+   */
+  public void addTempContainer(NodeId nodeId, ApplicationId applicationId,
+      Set<String> allocationTags) {
+    ContainerId tmpContainer = ContainerId.newContainerId(
+        ApplicationAttemptId.newInstance(applicationId, 1), System.nanoTime());
+
+    writeLock.lock();
+    try {
+      Map<NodeId, Map<ContainerId, Set<String>>> appTempMapping =
+          appTempMappings.computeIfAbsent(applicationId, k -> new HashMap<>());
+      Map<ContainerId, Set<String>> containerTempMapping =
+          appTempMapping.computeIfAbsent(nodeId, k -> new HashMap<>());
+      containerTempMapping.put(tmpContainer, allocationTags);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Added TEMP container=" + tmpContainer + " with tags=["
+            + StringUtils.join(allocationTags, ",") + "]");
+      }
+    } finally {
+      writeLock.unlock();
+    }
+
+    addContainer(nodeId, tmpContainer, allocationTags);
+  }
+
   /**
    * Notify container allocated on a node.
    *
    * @param nodeId         allocated node.
-   * @param applicationId  applicationId
    * @param containerId    container id.
    * @param allocationTags allocation tags, see
    *                       {@link SchedulingRequest#getAllocationTags()}
    *                       application_id will be added to allocationTags.
    */
-  public void addContainer(NodeId nodeId, ApplicationId applicationId,
-      ContainerId containerId, Set<String> allocationTags) {
+  public void addContainer(NodeId nodeId, ContainerId containerId,
+      Set<String> allocationTags) {
+    ApplicationId applicationId =
+        containerId.getApplicationAttemptId().getApplicationId();
     String applicationIdTag =
         AllocationTagsNamespaces.APP_ID + applicationId.toString();
 
@@ -260,10 +304,10 @@ public class AllocationTagsManager {
 
     writeLock.lock();
     try {
-      NodeToCountedTags perAppTagsMapping = perAppNodeMappings
-          .computeIfAbsent(applicationId, k -> new NodeToCountedTags());
-      NodeToCountedTags perAppRackTagsMapping = perAppRackMappings
-          .computeIfAbsent(applicationId, k -> new NodeToCountedTags());
+      TypeToCountedTags perAppTagsMapping = perAppNodeMappings
+          .computeIfAbsent(applicationId, k -> new TypeToCountedTags());
+      TypeToCountedTags perAppRackTagsMapping = perAppRackMappings
+          .computeIfAbsent(applicationId, k -> new TypeToCountedTags());
       // Covering test-cases where context is mocked
       String nodeRack = (rmContext.getRMNodes() != null
           && rmContext.getRMNodes().get(nodeId) != null)
@@ -294,12 +338,13 @@ public class AllocationTagsManager {
    * Notify container removed.
    *
    * @param nodeId         nodeId
-   * @param applicationId  applicationId
    * @param containerId    containerId.
    * @param allocationTags allocation tags for given container
    */
-  public void removeContainer(NodeId nodeId, ApplicationId applicationId,
+  public void removeContainer(NodeId nodeId,
       ContainerId containerId, Set<String> allocationTags) {
+    ApplicationId applicationId =
+        containerId.getApplicationAttemptId().getApplicationId();
     String applicationIdTag =
         AllocationTagsNamespaces.APP_ID + applicationId.toString();
     boolean useSet = false;
@@ -313,9 +358,9 @@ public class AllocationTagsManager {
 
     writeLock.lock();
     try {
-      NodeToCountedTags perAppTagsMapping =
+      TypeToCountedTags perAppTagsMapping =
           perAppNodeMappings.get(applicationId);
-      NodeToCountedTags perAppRackTagsMapping =
+      TypeToCountedTags perAppRackTagsMapping =
           perAppRackMappings.get(applicationId);
       if (perAppTagsMapping == null) {
         return;
@@ -354,6 +399,34 @@ public class AllocationTagsManager {
   }
 
   /**
+   * Method removes temporary containers associated with an application
+   * Used by the placement algorithm to clean temporary tags at the end of
+   * a placement cycle.
+   * @param applicationId Application Id.
+   */
+  public void cleanTempContainers(ApplicationId applicationId) {
+
+    if (!appTempMappings.get(applicationId).isEmpty()) {
+      appTempMappings.get(applicationId).entrySet().stream().forEach(nodeE -> {
+        nodeE.getValue().entrySet().stream().forEach(containerE -> {
+          removeContainer(nodeE.getKey(), containerE.getKey(),
+              containerE.getValue());
+        });
+      });
+      writeLock.lock();
+      try {
+        appTempMappings.remove(applicationId);
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Removed TEMP containers of app=" + applicationId);
+        }
+      } finally {
+        writeLock.unlock();
+      }
+    }
+  }
+
+
+  /**
    * Get Node cardinality for a specific tag.
    * When applicationId is null, method returns aggregated cardinality
    *
@@ -378,7 +451,7 @@ public class AllocationTagsManager {
             "Must specify nodeId/tag to query cardinality");
       }
 
-      NodeToCountedTags mapping;
+      TypeToCountedTags mapping;
       if (applicationId != null) {
         mapping = perAppNodeMappings.get(applicationId);
       } else {
@@ -419,7 +492,7 @@ public class AllocationTagsManager {
             "Must specify rack/tag to query cardinality");
       }
 
-      NodeToCountedTags mapping;
+      TypeToCountedTags mapping;
       if (applicationId != null) {
         mapping = perAppRackMappings.get(applicationId);
       } else {
@@ -492,7 +565,7 @@ public class AllocationTagsManager {
             "Must specify nodeId/tags/op to query cardinality");
       }
 
-      NodeToCountedTags mapping;
+      TypeToCountedTags mapping;
       if (applicationId != null) {
         mapping = perAppNodeMappings.get(applicationId);
       } else {
@@ -540,7 +613,7 @@ public class AllocationTagsManager {
             "Must specify rack/tags/op to query cardinality");
       }
 
-      NodeToCountedTags mapping;
+      TypeToCountedTags mapping;
       if (applicationId != null) {
         mapping = perAppRackMappings.get(applicationId);
       } else {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a52d11fb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/DefaultPlacementAlgorithm.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/DefaultPlacementAlgorithm.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/DefaultPlacementAlgorithm.java
new file mode 100644
index 0000000..395c156
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/DefaultPlacementAlgorithm.java
@@ -0,0 +1,172 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.algorithm;
+
+import java.util.Iterator;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraintTransformations;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.AllocationTagsManager;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.InvalidAllocationTagsQueryException;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.PlacementConstraintManager;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api.ConstraintPlacementAlgorithm;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api.ConstraintPlacementAlgorithmInput;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api.ConstraintPlacementAlgorithmOutput;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api.ConstraintPlacementAlgorithmOutputCollector;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api.PlacedSchedulingRequest;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.processor.BatchedRequests;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.processor.NodeCandidateSelector;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Basic placement algorithm.
+ * Supports different Iterators at SchedulingRequest level including:
+ * Serial, PopularTags
+ */
+public class DefaultPlacementAlgorithm implements ConstraintPlacementAlgorithm {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(DefaultPlacementAlgorithm.class);
+
+  private AllocationTagsManager tagsManager;
+  private PlacementConstraintManager constraintManager;
+  private NodeCandidateSelector nodeSelector;
+
+  @Override
+  public void init(RMContext rmContext) {
+    this.tagsManager = rmContext.getAllocationTagsManager();
+    this.constraintManager = rmContext.getPlacementConstraintManager();
+    this.nodeSelector =
+        filter -> ((AbstractYarnScheduler) (rmContext).getScheduler())
+            .getNodes(filter);
+  }
+
+  /**
+   * TODO: Method will be moved to PlacementConstraintsUtil class (YARN-7682)
+   * @param applicationId
+   * @param allocationTags
+   * @param nodeId
+   * @param tagsManager
+   * @return boolean
+   * @throws InvalidAllocationTagsQueryException
+   */
+  public boolean canAssign(ApplicationId applicationId,
+      Set<String> allocationTags, NodeId nodeId,
+      AllocationTagsManager tagsManager)
+      throws InvalidAllocationTagsQueryException {
+    PlacementConstraint constraint =
+        constraintManager.getConstraint(applicationId, allocationTags);
+    if (constraint == null) {
+      return true;
+    }
+    // TODO: proper transformations
+    // Currently works only for simple anti-affinity
+    // NODE scope target expressions
+    PlacementConstraintTransformations.SpecializedConstraintTransformer transformer =
+        new PlacementConstraintTransformations.SpecializedConstraintTransformer(
+            constraint);
+    PlacementConstraint transform = transformer.transform();
+    PlacementConstraint.TargetConstraint targetConstraint =
+        (PlacementConstraint.TargetConstraint) transform.getConstraintExpr();
+    // Assume a single target expression tag;
+    // The Sample Algorithm assumes a constraint will always be a simple
+    // Target Constraint with a single entry in the target set.
+    // As mentioned in the class javadoc - This algorithm should be
+    // used mostly for testing and validating end-2-end workflow.
+    String targetTag = targetConstraint.getTargetExpressions().iterator().next()
+        .getTargetValues().iterator().next();
+    // TODO: Assuming anti-affinity constraint
+    long nodeCardinality =
+        tagsManager.getNodeCardinality(nodeId, applicationId, targetTag);
+    if (nodeCardinality != 0) {
+      return false;
+    }
+    // return true if it is a valid placement
+    return true;
+  }
+
+  public boolean attemptPlacementOnNode(ApplicationId appId,
+      SchedulingRequest schedulingRequest, SchedulerNode schedulerNode)
+      throws InvalidAllocationTagsQueryException {
+    int numAllocs = schedulingRequest.getResourceSizing().getNumAllocations();
+    if (numAllocs > 0) {
+      if (canAssign(appId,
+          schedulingRequest.getAllocationTags(), schedulerNode.getNodeID(),
+          tagsManager)) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+
+  @Override
+  public void place(ConstraintPlacementAlgorithmInput input,
+      ConstraintPlacementAlgorithmOutputCollector collector) {
+    BatchedRequests requests = (BatchedRequests) input;
+    ConstraintPlacementAlgorithmOutput resp =
+        new ConstraintPlacementAlgorithmOutput(requests.getApplicationId());
+    List<SchedulerNode> allNodes = nodeSelector.selectNodes(null);
+
+    Iterator<SchedulingRequest> requestIterator = requests.iterator();
+    while (requestIterator.hasNext()) {
+      SchedulingRequest schedulingRequest = requestIterator.next();
+      Iterator<SchedulerNode> nodeIter = allNodes.iterator();
+      int numAllocs = schedulingRequest.getResourceSizing().getNumAllocations();
+      while (nodeIter.hasNext() && numAllocs > 0) {
+        SchedulerNode node = nodeIter.next();
+        try {
+          if (attemptPlacementOnNode(requests.getApplicationId(),
+              schedulingRequest, node)) {
+            schedulingRequest.getResourceSizing()
+                .setNumAllocations(--numAllocs);
+            PlacedSchedulingRequest placedReq =
+                new PlacedSchedulingRequest(schedulingRequest);
+            placedReq.setPlacementAttempt(requests.getPlacementAttempt());
+            placedReq.getNodes().add(node);
+            resp.getPlacedRequests().add(placedReq);
+            numAllocs =
+                schedulingRequest.getResourceSizing().getNumAllocations();
+            // Add temp-container tags for current placement cycle
+            this.tagsManager.addTempContainer(node.getNodeID(),
+                requests.getApplicationId(),
+                schedulingRequest.getAllocationTags());
+          }
+        } catch (InvalidAllocationTagsQueryException e) {
+          LOG.warn("Got exception from TagManager !", e);
+        }
+      }
+    }
+    // Add all requests whose numAllocations still > 0 to rejected list.
+    requests.getSchedulingRequests().stream()
+        .filter(sReq -> sReq.getResourceSizing().getNumAllocations() > 0)
+        .forEach(rejReq -> resp.getRejectedRequests().add(rejReq));
+    collector.collect(resp);
+    // Clean current temp-container tags
+    this.tagsManager.cleanTempContainers(requests.getApplicationId());
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a52d11fb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/iterators/PopularTagsIterator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/iterators/PopularTagsIterator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/iterators/PopularTagsIterator.java
new file mode 100644
index 0000000..ca3e351
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/iterators/PopularTagsIterator.java
@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.algorithm.iterators;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.NoSuchElementException;
+
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
+
+/**
+ * Traverse Scheduling requests with the most popular tags (count) first.
+ * Currently the count is per Batch but could use TagManager for global count.
+ */
+public class PopularTagsIterator implements Iterator<SchedulingRequest> {
+
+  private final List<SchedulingRequest> schedulingRequestList;
+  private int cursor;
+
+  public PopularTagsIterator(Collection<SchedulingRequest> schedulingRequests) {
+    this.schedulingRequestList = new ArrayList<>(schedulingRequests);
+    // Most popular First
+    Collections.sort(schedulingRequestList,
+        (o1, o2) -> (int) getTagPopularity(o2) - (int) getTagPopularity(o1));
+
+    this.cursor = 0;
+  }
+
+  private long getTagPopularity(SchedulingRequest o1) {
+    long max = 0;
+    for (String tag : o1.getAllocationTags()) {
+      long count = schedulingRequestList.stream()
+          .filter(req -> req.getAllocationTags().contains(tag)).count();
+      if (count > max) {
+        max = count;
+      }
+    }
+    return max;
+  }
+
+  @Override
+  public boolean hasNext() {
+    return (cursor < schedulingRequestList.size());
+  }
+
+  @Override
+  public SchedulingRequest next() {
+    if (hasNext()) {
+      return schedulingRequestList.get(cursor++);
+    }
+    throw new NoSuchElementException();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a52d11fb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/iterators/SerialIterator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/iterators/SerialIterator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/iterators/SerialIterator.java
new file mode 100644
index 0000000..68733a2
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/iterators/SerialIterator.java
@@ -0,0 +1,53 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.algorithm.iterators;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.List;
+import java.util.NoSuchElementException;
+
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
+
+/**
+ * Traverse Scheduling Requests in the same order as they arrive
+ */
+public class SerialIterator implements Iterator<SchedulingRequest> {
+
+  private final List<SchedulingRequest> schedulingRequestList;
+  private int cursor;
+
+  public SerialIterator(Collection<SchedulingRequest> schedulingRequests) {
+    this.schedulingRequestList = new ArrayList<>(schedulingRequests);
+    this.cursor = 0;
+  }
+
+  @Override
+  public boolean hasNext() {
+    return (cursor < schedulingRequestList.size());
+  }
+
+  @Override
+  public SchedulingRequest next() {
+    if (hasNext()) {
+      return schedulingRequestList.get(cursor++);
+    }
+    throw new NoSuchElementException();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a52d11fb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/iterators/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/iterators/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/iterators/package-info.java
new file mode 100644
index 0000000..c84671e
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/iterators/package-info.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Package org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement
+ * contains classes related to scheduling containers using placement
+ * constraints.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.algorithm.iterators;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a52d11fb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/package-info.java
new file mode 100644
index 0000000..bb82077
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/package-info.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Package org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement
+ * contains classes related to scheduling containers using placement
+ * constraints.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.algorithm;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a52d11fb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/BatchedRequests.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/BatchedRequests.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/BatchedRequests.java
index fe92d2f..8b04860 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/BatchedRequests.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/BatchedRequests.java
@@ -21,12 +21,15 @@ import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.SchedulingRequest;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.algorithm.iterators.PopularTagsIterator;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.algorithm.iterators.SerialIterator;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api.ConstraintPlacementAlgorithmInput;
 
 import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
+import java.util.Iterator;
 import java.util.Map;
 import java.util.Set;
 
@@ -35,7 +38,8 @@ import java.util.Set;
  * to place as a batch. The placement algorithm tends to give more optimal
  * placements if more requests are batched together.
  */
-class BatchedRequests implements ConstraintPlacementAlgorithmInput {
+public class BatchedRequests
+    implements ConstraintPlacementAlgorithmInput, Iterable<SchedulingRequest> {
 
   // PlacementAlgorithmOutput attempt - the number of times the requests in this
   // batch has been placed but was rejected by the scheduler.
@@ -44,19 +48,46 @@ class BatchedRequests implements ConstraintPlacementAlgorithmInput {
   private final ApplicationId applicationId;
   private final Collection<SchedulingRequest> requests;
   private final Map<String, Set<NodeId>> blacklist = new HashMap<>();
+  private IteratorType iteratorType;
 
-  BatchedRequests(ApplicationId applicationId,
+  /**
+   * Iterator Type.
+   */
+  public enum IteratorType {
+    SERIAL,
+    POPULAR_TAGS
+  }
+
+  public BatchedRequests(IteratorType type, ApplicationId applicationId,
       Collection<SchedulingRequest> requests, int attempt) {
+    this.iteratorType = type;
     this.applicationId = applicationId;
     this.requests = requests;
     this.placementAttempt = attempt;
   }
 
   /**
+   * Exposes SchedulingRequest Iterator interface which can be used
+   * to traverse requests using different heuristics i.e. Tag Popularity
+   * @return SchedulingRequest Iterator.
+   */
+  @Override
+  public Iterator<SchedulingRequest> iterator() {
+    switch (this.iteratorType) {
+    case SERIAL:
+      return new SerialIterator(requests);
+    case POPULAR_TAGS:
+      return new PopularTagsIterator(requests);
+    default:
+      return null;
+    }
+  }
+
+  /**
    * Get Application Id.
    * @return Application Id.
    */
-  ApplicationId getApplicationId() {
+  public ApplicationId getApplicationId() {
     return applicationId;
   }
 
@@ -73,11 +104,11 @@ class BatchedRequests implements ConstraintPlacementAlgorithmInput {
    * Add a Scheduling request to the batch.
    * @param req Scheduling Request.
    */
-  void addToBatch(SchedulingRequest req) {
+  public void addToBatch(SchedulingRequest req) {
     requests.add(req);
   }
 
-  void addToBlacklist(Set<String> tags, SchedulerNode node) {
+  public void addToBlacklist(Set<String> tags, SchedulerNode node) {
     if (tags != null && !tags.isEmpty()) {
       // We are currently assuming a single allocation tag
       // per scheduler request currently.
@@ -90,7 +121,7 @@ class BatchedRequests implements ConstraintPlacementAlgorithmInput {
    * Get placement attempt.
    * @return PlacementAlgorithmOutput placement Attempt.
    */
-  int getPlacementAttempt() {
+  public int getPlacementAttempt() {
     return placementAttempt;
   }
 
@@ -99,7 +130,7 @@ class BatchedRequests implements ConstraintPlacementAlgorithmInput {
    * @param tag Tag.
    * @return Set of blacklisted Nodes.
    */
-  Set<NodeId> getBlacklist(String tag) {
+  public Set<NodeId> getBlacklist(String tag) {
     return blacklist.getOrDefault(tag, Collections.EMPTY_SET);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a52d11fb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/PlacementProcessor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/PlacementProcessor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/PlacementProcessor.java
index d613d4e..8e9c79c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/PlacementProcessor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/PlacementProcessor.java
@@ -35,8 +35,10 @@ import org.apache.hadoop.yarn.api.records.SchedulingRequest;
 import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.PlacementConstraintManager;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.algorithm.DefaultPlacementAlgorithm;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api.ConstraintPlacementAlgorithm;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api.PlacedSchedulingRequest;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api.SchedulingResponse;
@@ -98,6 +100,7 @@ public class PlacementProcessor implements ApplicationMasterServiceProcessor {
   private Map<ApplicationId, List<SchedulingRequest>> requestsToReject =
       new ConcurrentHashMap<>();
 
+  private BatchedRequests.IteratorType iteratorType;
   private PlacementDispatcher placementDispatcher;
 
 
@@ -122,9 +125,20 @@ public class PlacementProcessor implements ApplicationMasterServiceProcessor {
     if (instances != null && !instances.isEmpty()) {
       algorithm = instances.get(0);
     } else {
-      algorithm = new SamplePlacementAlgorithm();
+      algorithm = new DefaultPlacementAlgorithm();
+    }
+    LOG.info("Placement Algorithm [{}]", algorithm.getClass().getName());
+
+    String iteratorName = ((RMContextImpl) amsContext).getYarnConfiguration()
+        .get(YarnConfiguration.RM_PLACEMENT_CONSTRAINTS_ALGORITHM_ITERATOR,
+            BatchedRequests.IteratorType.SERIAL.name());
+    LOG.info("Placement Algorithm Iterator[{}]", iteratorName);
+    try {
+      iteratorType = BatchedRequests.IteratorType.valueOf(iteratorName);
+    } catch (IllegalArgumentException e) {
+      throw new YarnRuntimeException(
+          "Could not instantiate Placement Algorithm Iterator: ", e);
     }
-    LOG.info("Planning Algorithm [{}]", algorithm.getClass().getName());
 
     int algoPSize = ((RMContextImpl) amsContext).getYarnConfiguration().getInt(
         YarnConfiguration.RM_PLACEMENT_CONSTRAINTS_ALGORITHM_POOL_SIZE,
@@ -188,9 +202,8 @@ public class PlacementProcessor implements ApplicationMasterServiceProcessor {
   private void dispatchRequestsForPlacement(ApplicationAttemptId appAttemptId,
       List<SchedulingRequest> schedulingRequests) {
     if (schedulingRequests != null && !schedulingRequests.isEmpty()) {
-      this.placementDispatcher.dispatch(
-          new BatchedRequests(appAttemptId.getApplicationId(),
-              schedulingRequests, 1));
+      this.placementDispatcher.dispatch(new BatchedRequests(iteratorType,
+          appAttemptId.getApplicationId(), schedulingRequests, 1));
     }
   }
 
@@ -329,11 +342,10 @@ public class PlacementProcessor implements ApplicationMasterServiceProcessor {
       }
     }
     if (!isAdded) {
-      BatchedRequests br =
-          new BatchedRequests(schedulerResponse.getApplicationId(),
-              Collections.singleton(
-                  schedulerResponse.getSchedulingRequest()),
-              placementAttempt + 1);
+      BatchedRequests br = new BatchedRequests(iteratorType,
+          schedulerResponse.getApplicationId(),
+          Collections.singleton(schedulerResponse.getSchedulingRequest()),
+          placementAttempt + 1);
       reqsToRetry.add(br);
       br.addToBlacklist(
           schedulerResponse.getSchedulingRequest().getAllocationTags(),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a52d11fb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/SamplePlacementAlgorithm.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/SamplePlacementAlgorithm.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/SamplePlacementAlgorithm.java
deleted file mode 100644
index 8d49801..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/SamplePlacementAlgorithm.java
+++ /dev/null
@@ -1,144 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.processor;
-
-import org.apache.hadoop.yarn.api.records.SchedulingRequest;
-import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
-import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TargetConstraint;
-import org.apache.hadoop.yarn.api.resource.PlacementConstraintTransformations.SpecializedConstraintTransformer;
-import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.AllocationTagsManager;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.InvalidAllocationTagsQueryException;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.PlacementConstraintManager;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api.ConstraintPlacementAlgorithm;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api.ConstraintPlacementAlgorithmInput;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api.ConstraintPlacementAlgorithmOutput;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api.ConstraintPlacementAlgorithmOutputCollector;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api.PlacedSchedulingRequest;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-
-/**
- * Sample Test algorithm. Assumes anti-affinity always
- * It also assumes the numAllocations in resource sizing is always = 1
- *
- * NOTE: This is just a sample implementation. Not be actually used
- */
-public class SamplePlacementAlgorithm implements ConstraintPlacementAlgorithm {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(SamplePlacementAlgorithm.class);
-
-  private AllocationTagsManager tagsManager;
-  private PlacementConstraintManager constraintManager;
-  private NodeCandidateSelector nodeSelector;
-
-  @Override
-  public void init(RMContext rmContext) {
-    this.tagsManager = rmContext.getAllocationTagsManager();
-    this.constraintManager = rmContext.getPlacementConstraintManager();
-    this.nodeSelector =
-        filter -> ((AbstractYarnScheduler)(rmContext)
-            .getScheduler()).getNodes(filter);
-  }
-
-  @Override
-  public void place(ConstraintPlacementAlgorithmInput input,
-      ConstraintPlacementAlgorithmOutputCollector collector) {
-    BatchedRequests requests = (BatchedRequests)input;
-    ConstraintPlacementAlgorithmOutput resp =
-        new ConstraintPlacementAlgorithmOutput(requests.getApplicationId());
-    List<SchedulerNode> allNodes = nodeSelector.selectNodes(null);
-    Map<String, List<SchedulingRequest>> tagIndexedRequests = new HashMap<>();
-    requests.getSchedulingRequests()
-        .stream()
-        .filter(r -> r.getAllocationTags() != null)
-        .forEach(
-            req -> req.getAllocationTags().forEach(
-                tag -> tagIndexedRequests.computeIfAbsent(tag,
-                    k -> new ArrayList<>()).add(req))
-        );
-    for (Map.Entry<String, List<SchedulingRequest>> entry :
-        tagIndexedRequests.entrySet()) {
-      String tag = entry.getKey();
-      PlacementConstraint constraint =
-          constraintManager.getConstraint(requests.getApplicationId(),
-              Collections.singleton(tag));
-      if (constraint != null) {
-        // Currently works only for simple anti-affinity
-        // NODE scope target expressions
-        SpecializedConstraintTransformer transformer =
-            new SpecializedConstraintTransformer(constraint);
-        PlacementConstraint transform = transformer.transform();
-        TargetConstraint targetConstraint =
-            (TargetConstraint) transform.getConstraintExpr();
-        // Assume a single target expression tag;
-        // The Sample Algorithm assumes a constraint will always be a simple
-        // Target Constraint with a single entry in the target set.
-        // As mentioned in the class javadoc - This algorithm should be
-        // used mostly for testing and validating end-2-end workflow.
-        String targetTag =
-            targetConstraint.getTargetExpressions().iterator().next()
-            .getTargetValues().iterator().next();
-        // iterate over all nodes
-        Iterator<SchedulerNode> nodeIter = allNodes.iterator();
-        List<SchedulingRequest> schedulingRequests = entry.getValue();
-        Iterator<SchedulingRequest> reqIter = schedulingRequests.iterator();
-        while (reqIter.hasNext()) {
-          SchedulingRequest sReq = reqIter.next();
-          int numAllocs = sReq.getResourceSizing().getNumAllocations();
-          while (numAllocs > 0 && nodeIter.hasNext()) {
-            SchedulerNode node = nodeIter.next();
-            long nodeCardinality = 0;
-            try {
-              nodeCardinality = tagsManager.getNodeCardinality(
-                  node.getNodeID(), requests.getApplicationId(),
-                  targetTag);
-              if (nodeCardinality == 0 &&
-                  !requests.getBlacklist(tag).contains(node.getNodeID())) {
-                numAllocs--;
-                sReq.getResourceSizing().setNumAllocations(numAllocs);
-                PlacedSchedulingRequest placedReq =
-                    new PlacedSchedulingRequest(sReq);
-                placedReq.setPlacementAttempt(requests.getPlacementAttempt());
-                placedReq.getNodes().add(node);
-                resp.getPlacedRequests().add(placedReq);
-              }
-            } catch (InvalidAllocationTagsQueryException e) {
-              LOG.warn("Got exception from TagManager !", e);
-            }
-          }
-        }
-      }
-    }
-    // Add all requests whose numAllocations still > 0 to rejected list.
-    requests.getSchedulingRequests().stream()
-        .filter(sReq -> sReq.getResourceSizing().getNumAllocations() > 0)
-        .forEach(rejReq -> resp.getRejectedRequests().add(rejReq));
-    collector.collect(resp);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a52d11fb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestAllocationTagsManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestAllocationTagsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestAllocationTagsManager.java
index 0ce1614..f1d5663 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestAllocationTagsManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestAllocationTagsManager.java
@@ -75,24 +75,24 @@ public class TestAllocationTagsManager {
 
     // 3 Containers from app1
     atm.addContainer(NodeId.fromString("host1:123"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 1),
+        TestUtils.getMockContainerId(1, 1),
         ImmutableSet.of("mapper", "reducer"));
 
     atm.addContainer(NodeId.fromString("host2:123"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 2),
+        TestUtils.getMockContainerId(1, 2),
         ImmutableSet.of("mapper", "reducer"));
 
     atm.addContainer(NodeId.fromString("host1:123"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 3),
+        TestUtils.getMockContainerId(1, 3),
         ImmutableSet.of("service"));
 
     atm.addContainer(NodeId.fromString("host2:123"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 4),
+        TestUtils.getMockContainerId(1, 4),
         ImmutableSet.of("reducer"));
 
     // 1 Container from app2
     atm.addContainer(NodeId.fromString("host2:123"),
-        TestUtils.getMockApplicationId(2), TestUtils.getMockContainerId(2, 3),
+        TestUtils.getMockContainerId(2, 3),
         ImmutableSet.of("service"));
 
     // Get Node Cardinality of app1 on node1, with tag "mapper"
@@ -170,24 +170,21 @@ public class TestAllocationTagsManager {
 
     // Finish all containers:
     atm.removeContainer(NodeId.fromString("host1:123"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 1),
+        TestUtils.getMockContainerId(1, 1),
         ImmutableSet.of("mapper", "reducer"));
 
     atm.removeContainer(NodeId.fromString("host2:123"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 2),
+        TestUtils.getMockContainerId(1, 2),
         ImmutableSet.of("mapper", "reducer"));
 
     atm.removeContainer(NodeId.fromString("host1:123"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 3),
-        ImmutableSet.of("service"));
+        TestUtils.getMockContainerId(1, 3), ImmutableSet.of("service"));
 
     atm.removeContainer(NodeId.fromString("host2:123"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 4),
-        ImmutableSet.of("reducer"));
+        TestUtils.getMockContainerId(1, 4), ImmutableSet.of("reducer"));
 
     atm.removeContainer(NodeId.fromString("host2:123"),
-        TestUtils.getMockApplicationId(2), TestUtils.getMockContainerId(2, 3),
-        ImmutableSet.of("service"));
+        TestUtils.getMockContainerId(2, 3), ImmutableSet.of("service"));
 
     // Expect all cardinality to be 0
     // Get Cardinality of app1 on node1, with tag "mapper"
@@ -270,25 +267,22 @@ public class TestAllocationTagsManager {
 
     // 3 Containers from app1
     atm.addContainer(NodeId.fromString("host1:123"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 1),
+        TestUtils.getMockContainerId(1, 1),
         ImmutableSet.of("mapper", "reducer"));
 
     atm.addContainer(NodeId.fromString("host2:123"),
-        TestUtils.getMockApplicationId(2), TestUtils.getMockContainerId(2, 2),
+        TestUtils.getMockContainerId(2, 2),
         ImmutableSet.of("mapper", "reducer"));
 
     atm.addContainer(NodeId.fromString("host1:123"),
-        TestUtils.getMockApplicationId(2), TestUtils.getMockContainerId(2, 4),
-        ImmutableSet.of("reducer"));
+        TestUtils.getMockContainerId(2, 4), ImmutableSet.of("reducer"));
 
     atm.addContainer(NodeId.fromString("host2:123"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 3),
-        ImmutableSet.of("service"));
+        TestUtils.getMockContainerId(1, 3), ImmutableSet.of("service"));
 
     // 1 Container from app2
     atm.addContainer(NodeId.fromString("host2:123"),
-        TestUtils.getMockApplicationId(2), TestUtils.getMockContainerId(2, 3),
-        ImmutableSet.of("service"));
+        TestUtils.getMockContainerId(2, 3), ImmutableSet.of("service"));
 
     // Get Rack Cardinality of app1 on rack0, with tag "mapper"
     Assert.assertEquals(1, atm.getRackCardinality("rack0",
@@ -325,45 +319,39 @@ public class TestAllocationTagsManager {
 
     // Add a bunch of containers
     atm.addContainer(NodeId.fromString("host1:123"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 1),
+        TestUtils.getMockContainerId(1, 1),
         ImmutableSet.of("mapper", "reducer"));
 
     atm.addContainer(NodeId.fromString("host2:123"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 2),
+        TestUtils.getMockContainerId(1, 2),
         ImmutableSet.of("mapper", "reducer"));
 
     atm.addContainer(NodeId.fromString("host1:123"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 3),
-        ImmutableSet.of("service"));
+        TestUtils.getMockContainerId(1, 3), ImmutableSet.of("service"));
 
     atm.addContainer(NodeId.fromString("host2:123"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 4),
-        ImmutableSet.of("reducer"));
+        TestUtils.getMockContainerId(1, 4), ImmutableSet.of("reducer"));
 
     atm.addContainer(NodeId.fromString("host2:123"),
-        TestUtils.getMockApplicationId(2), TestUtils.getMockContainerId(2, 3),
-        ImmutableSet.of("service"));
+        TestUtils.getMockContainerId(2, 3), ImmutableSet.of("service"));
 
     // Remove all these containers
     atm.removeContainer(NodeId.fromString("host1:123"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 1),
+        TestUtils.getMockContainerId(1, 1),
         ImmutableSet.of("mapper", "reducer"));
 
     atm.removeContainer(NodeId.fromString("host2:123"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 2),
+        TestUtils.getMockContainerId(1, 2),
         ImmutableSet.of("mapper", "reducer"));
 
     atm.removeContainer(NodeId.fromString("host1:123"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 3),
-        ImmutableSet.of("service"));
+        TestUtils.getMockContainerId(1, 3), ImmutableSet.of("service"));
 
     atm.removeContainer(NodeId.fromString("host2:123"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 4),
-        ImmutableSet.of("reducer"));
+        TestUtils.getMockContainerId(1, 4), ImmutableSet.of("reducer"));
 
     atm.removeContainer(NodeId.fromString("host2:123"),
-        TestUtils.getMockApplicationId(2), TestUtils.getMockContainerId(2, 3),
-        ImmutableSet.of("service"));
+        TestUtils.getMockContainerId(2, 3), ImmutableSet.of("service"));
 
     // Check internal data structure
     Assert.assertEquals(0,
@@ -375,6 +363,87 @@ public class TestAllocationTagsManager {
   }
 
   @Test
+  public void testTempContainerAllocations()
+      throws InvalidAllocationTagsQueryException {
+    /**
+     * Construct both TEMP and normal containers: Node1: TEMP container_1_1
+     * (mapper/reducer/app_1) container_1_2 (service/app_1)
+     *
+     * Node2: container_1_3 (reducer/app_1) TEMP container_2_1 (service/app_2)
+     */
+
+    AllocationTagsManager atm = new AllocationTagsManager(rmContext);
+
+    // 3 Containers from app1
+    atm.addTempContainer(NodeId.fromString("host1:123"),
+        TestUtils.getMockApplicationId(1),
+        ImmutableSet.of("mapper", "reducer"));
+
+    atm.addContainer(NodeId.fromString("host1:123"),
+        TestUtils.getMockContainerId(1, 2), ImmutableSet.of("service"));
+
+    atm.addContainer(NodeId.fromString("host2:123"),
+        TestUtils.getMockContainerId(1, 3), ImmutableSet.of("reducer"));
+
+    // 1 Container from app2
+    atm.addTempContainer(NodeId.fromString("host2:123"),
+        TestUtils.getMockApplicationId(2), ImmutableSet.of("service"));
+
+    // Expect tag mappings to be present including temp Tags
+    Assert.assertEquals(1,
+        atm.getNodeCardinalityByOp(NodeId.fromString("host1:123"),
+            TestUtils.getMockApplicationId(1), ImmutableSet.of("mapper"),
+            Long::sum));
+
+    Assert.assertEquals(1,
+        atm.getNodeCardinalityByOp(NodeId.fromString("host1:123"),
+            TestUtils.getMockApplicationId(1), ImmutableSet.of("service"),
+            Long::sum));
+
+    Assert.assertEquals(1,
+        atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"),
+            TestUtils.getMockApplicationId(2), ImmutableSet.of("service"),
+            Long::sum));
+
+    // Do a temp Tag cleanup on app2
+    atm.cleanTempContainers(TestUtils.getMockApplicationId(2));
+    Assert.assertEquals(0,
+        atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"),
+            TestUtils.getMockApplicationId(2), ImmutableSet.of("service"),
+            Long::sum));
+    // Expect app1 to be unaffected
+    Assert.assertEquals(1,
+        atm.getNodeCardinalityByOp(NodeId.fromString("host1:123"),
+            TestUtils.getMockApplicationId(1), ImmutableSet.of("mapper"),
+            Long::sum));
+    // Do a cleanup on app1 as well
+    atm.cleanTempContainers(TestUtils.getMockApplicationId(1));
+    Assert.assertEquals(0,
+        atm.getNodeCardinalityByOp(NodeId.fromString("host1:123"),
+            TestUtils.getMockApplicationId(1), ImmutableSet.of("mapper"),
+            Long::sum));
+
+    // Non temp-tags should be unaffected
+    Assert.assertEquals(1,
+        atm.getNodeCardinalityByOp(NodeId.fromString("host1:123"),
+            TestUtils.getMockApplicationId(1), ImmutableSet.of("service"),
+            Long::sum));
+
+    Assert.assertEquals(0,
+        atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"),
+            TestUtils.getMockApplicationId(2), ImmutableSet.of("service"),
+            Long::sum));
+
+    // Expect app2 with no containers, and app1 with 2 containers across 2 nodes
+    Assert.assertEquals(2,
+        atm.getPerAppNodeMappings().get(TestUtils.getMockApplicationId(1))
+            .getTypeToTagsWithCount().size());
+
+    Assert.assertNull(
+        atm.getPerAppNodeMappings().get(TestUtils.getMockApplicationId(2)));
+  }
+
+  @Test
   public void testQueryCardinalityWithIllegalParameters()
       throws InvalidAllocationTagsQueryException {
     /**
@@ -385,24 +454,21 @@ public class TestAllocationTagsManager {
 
     // Add a bunch of containers
     atm.addContainer(NodeId.fromString("host1:123"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 1),
+        TestUtils.getMockContainerId(1, 1),
         ImmutableSet.of("mapper", "reducer"));
 
     atm.addContainer(NodeId.fromString("host2:123"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 2),
+        TestUtils.getMockContainerId(1, 2),
         ImmutableSet.of("mapper", "reducer"));
 
     atm.addContainer(NodeId.fromString("host1:123"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 3),
-        ImmutableSet.of("service"));
+        TestUtils.getMockContainerId(1, 3), ImmutableSet.of("service"));
 
     atm.addContainer(NodeId.fromString("host2:123"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 4),
-        ImmutableSet.of("reducer"));
+        TestUtils.getMockContainerId(1, 4), ImmutableSet.of("reducer"));
 
     atm.addContainer(NodeId.fromString("host2:123"),
-        TestUtils.getMockApplicationId(2), TestUtils.getMockContainerId(2, 3),
-        ImmutableSet.of("service"));
+        TestUtils.getMockContainerId(2, 3), ImmutableSet.of("service"));
 
     // No node-id
     boolean caughtException = false;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a52d11fb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestBatchedRequestsIterators.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestBatchedRequestsIterators.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestBatchedRequestsIterators.java
new file mode 100644
index 0000000..0e7b715
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestBatchedRequestsIterators.java
@@ -0,0 +1,82 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint;
+
+import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.TestPlacementProcessor.schedulingRequest;
+
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.List;
+
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.processor.BatchedRequests;
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * Test Request Iterator.
+ */
+public class TestBatchedRequestsIterators {
+
+  @Test
+  public void testSerialIterator() throws Exception {
+    List<SchedulingRequest> schedulingRequestList =
+        Arrays.asList(schedulingRequest(1, 1, 1, 512, "foo"),
+            schedulingRequest(1, 2, 1, 512, "foo"),
+            schedulingRequest(1, 3, 1, 512, "foo"),
+            schedulingRequest(1, 4, 1, 512, "foo"));
+
+    BatchedRequests batchedRequests = new BatchedRequests(
+        BatchedRequests.IteratorType.SERIAL, null, schedulingRequestList, 1);
+
+    Iterator<SchedulingRequest> requestIterator = batchedRequests.iterator();
+    long prevAllocId = 0;
+    while (requestIterator.hasNext()) {
+      SchedulingRequest request = requestIterator.next();
+      Assert.assertTrue(request.getAllocationRequestId() > prevAllocId);
+      prevAllocId = request.getAllocationRequestId();
+    }
+  }
+
+  @Test
+  public void testPopularTagsIterator() throws Exception {
+    List<SchedulingRequest> schedulingRequestList =
+        Arrays.asList(schedulingRequest(1, 1, 1, 512, "pri", "foo"),
+            schedulingRequest(1, 2, 1, 512, "bar"),
+            schedulingRequest(1, 3, 1, 512, "foo", "pri"),
+            schedulingRequest(1, 4, 1, 512, "test"),
+            schedulingRequest(1, 5, 1, 512, "pri", "bar"));
+
+    BatchedRequests batchedRequests =
+        new BatchedRequests(BatchedRequests.IteratorType.POPULAR_TAGS, null,
+            schedulingRequestList, 1);
+
+    Iterator<SchedulingRequest> requestIterator = batchedRequests.iterator();
+    long recCcount = 0;
+    while (requestIterator.hasNext()) {
+      SchedulingRequest request = requestIterator.next();
+      if (recCcount < 3) {
+        Assert.assertTrue(request.getAllocationTags().contains("pri"));
+      } else {
+        Assert.assertTrue(request.getAllocationTags().contains("bar")
+            || request.getAllocationTags().contains("test"));
+      }
+      recCcount++;
+    }
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a52d11fb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementProcessor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementProcessor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementProcessor.java
index db8ae15..87dd5b7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementProcessor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementProcessor.java
@@ -373,13 +373,13 @@ public class TestPlacementProcessor {
         rej.getReason());
   }
 
-  private static SchedulingRequest schedulingRequest(
+  protected static SchedulingRequest schedulingRequest(
       int priority, long allocReqId, int cores, int mem, String... tags) {
     return schedulingRequest(priority, allocReqId, cores, mem,
         ExecutionType.GUARANTEED, tags);
   }
 
-  private static SchedulingRequest schedulingRequest(
+  protected static SchedulingRequest schedulingRequest(
       int priority, long allocReqId, int cores, int mem,
       ExecutionType execType, String... tags) {
     return SchedulingRequest.newBuilder()


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[02/32] hadoop git commit: YARN-6597. Add RMContainer recovery test to verify tag population in the AllocationTagsManager. (Panagiotis Garefalakis via asuresh)

Posted by as...@apache.org.
YARN-6597. Add RMContainer recovery test to verify tag population in the AllocationTagsManager. (Panagiotis Garefalakis via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/add993e2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/add993e2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/add993e2

Branch: refs/heads/trunk
Commit: add993e26a3c96f77dfd42086f186a139966019e
Parents: f8c5f5b
Author: Arun Suresh <as...@apache.org>
Authored: Thu Jan 25 23:01:43 2018 -0800
Committer: Arun Suresh <as...@apache.org>
Committed: Wed Jan 31 01:30:17 2018 -0800

----------------------------------------------------------------------
 .../rmcontainer/RMContainerImpl.java            |  8 +++----
 .../rmcontainer/TestRMContainerImpl.java        | 25 ++++++++++++++++++--
 2 files changed, 26 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/add993e2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
index a504221..541621b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.yarn.server.resourcemanager.rmcontainer;
 
 import java.util.Collections;
 import java.util.EnumSet;
-import java.util.List;
 import java.util.Set;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
@@ -40,7 +39,6 @@ import org.apache.hadoop.yarn.api.records.ExecutionType;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
-import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.server.api.protocolrecords.NMContainerStatus;
@@ -533,7 +531,7 @@ public class RMContainerImpl implements RMContainer {
         RMContainerEvent event) {
       NMContainerStatus report =
           ((RMContainerRecoverEvent) event).getContainerReport();
-      // Set the allocation tags from the
+      // Set the allocation tags from the NMContainerStatus
       container.setAllocationTags(report.getAllocationTags());
       // Notify AllocationTagsManager
       container.rmContext.getAllocationTagsManager().addContainer(
@@ -689,7 +687,7 @@ public class RMContainerImpl implements RMContainer {
         // Something wrong happened, kill the container
         LOG.warn("Something wrong happened, container size reported by NM"
             + " is not expected, ContainerID=" + container.getContainerId()
-            + " rm-size-resource:" + rmContainerResource + " nm-size-reosurce:"
+            + " rm-size-resource:" + rmContainerResource + " nm-size-resource:"
             + nmContainerResource);
         container.eventHandler.handle(new RMNodeCleanContainerEvent(
             container.nodeId, container.getContainerId()));
@@ -702,7 +700,7 @@ public class RMContainerImpl implements RMContainer {
 
     @Override
     public void transition(RMContainerImpl container, RMContainerEvent event) {
-      // Notify placementManager
+      // Notify AllocationTagsManager
       container.rmContext.getAllocationTagsManager().removeContainer(
           container.getNodeId(), container.getContainerId(),
           container.getAllocationTags());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/add993e2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java
index 2bf6a21..27c5fbd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java
@@ -49,6 +49,7 @@ import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.DrainDispatcher;
 import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.server.api.protocolrecords.NMContainerStatus;
 import org.apache.hadoop.yarn.server.resourcemanager.MockAM;
 import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
 import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
@@ -171,7 +172,7 @@ public class TestRMContainerImpl {
     assertEquals(containerStatus, cfEvent.getContainerStatus());
     assertEquals(RMAppAttemptEventType.CONTAINER_FINISHED, cfEvent.getType());
     
-    // In RELEASED state. A FINIHSED event may come in.
+    // In RELEASED state. A FINISHED event may come in.
     rmContainer.handle(new RMContainerFinishedEvent(containerId, SchedulerUtils
         .createAbnormalContainerStatus(containerId, "FinishedContainer"),
         RMContainerEventType.FINISHED));
@@ -375,7 +376,7 @@ public class TestRMContainerImpl {
   }
 
   @Test
-  public void testContainerTransitionNotifyPlacementTagsManager()
+  public void testContainerTransitionNotifyAllocationTagsManager()
       throws Exception {
     DrainDispatcher drainDispatcher = new DrainDispatcher();
     EventHandler<RMAppAttemptEvent> appAttemptEventHandler = mock(
@@ -494,5 +495,25 @@ public class TestRMContainerImpl {
 
     Assert.assertEquals(0,
         tagsManager.getNodeCardinalityByOp(nodeId, appId, null, Long::max));
+
+    /* Fourth container: NEW -> RECOVERED */
+    rmContainer = new RMContainerImpl(container,
+        SchedulerRequestKey.extractFrom(container), appAttemptId, nodeId,
+        "user", rmContext);
+    rmContainer.setAllocationTags(ImmutableSet.of("mapper"));
+
+    Assert.assertEquals(0,
+        tagsManager.getNodeCardinalityByOp(nodeId, appId, null, Long::max));
+
+    NMContainerStatus containerStatus = NMContainerStatus
+        .newInstance(containerId, 0, ContainerState.NEW,
+            Resource.newInstance(1024, 1), "recover container", 0,
+            Priority.newInstance(0), 0);
+    containerStatus.setAllocationTags(ImmutableSet.of("mapper"));
+    rmContainer
+        .handle(new RMContainerRecoverEvent(containerId, containerStatus));
+
+    Assert.assertEquals(1,
+        tagsManager.getNodeCardinalityByOp(nodeId, appId, null, Long::max));
   }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[06/32] hadoop git commit: YARN-6594. [API] Introduce SchedulingRequest object. (Konstantinos Karanasos via wangda)

Posted by as...@apache.org.
YARN-6594. [API] Introduce SchedulingRequest object. (Konstantinos Karanasos via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b57e8bc3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b57e8bc3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b57e8bc3

Branch: refs/heads/trunk
Commit: b57e8bc3002a95d2f2f328554d792151cdc1120d
Parents: 33a796d
Author: Wangda Tan <wa...@apache.org>
Authored: Mon Oct 30 16:54:02 2017 -0700
Committer: Arun Suresh <as...@apache.org>
Committed: Wed Jan 31 01:30:17 2018 -0800

----------------------------------------------------------------------
 .../hadoop/yarn/api/records/ResourceSizing.java |  64 +++++
 .../yarn/api/records/SchedulingRequest.java     | 205 ++++++++++++++
 .../src/main/proto/yarn_protos.proto            |  14 +
 .../records/impl/pb/ResourceSizingPBImpl.java   | 117 ++++++++
 .../impl/pb/SchedulingRequestPBImpl.java        | 266 +++++++++++++++++++
 5 files changed, 666 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b57e8bc3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceSizing.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceSizing.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceSizing.java
new file mode 100644
index 0000000..d82be11
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceSizing.java
@@ -0,0 +1,64 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.records;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.util.Records;
+
+/**
+ * {@code ResourceSizing} contains information for the size of a
+ * {@link SchedulingRequest}, such as the number of requested allocations and
+ * the resources for each allocation.
+ */
+@Public
+@Unstable
+public abstract class ResourceSizing {
+
+  @Public
+  @Unstable
+  public static ResourceSizing newInstance(Resource resources) {
+    return ResourceSizing.newInstance(1, resources);
+  }
+
+  @Public
+  @Unstable
+  public static ResourceSizing newInstance(int numAllocations, Resource resources) {
+    ResourceSizing resourceSizing = Records.newRecord(ResourceSizing.class);
+    resourceSizing.setNumAllocations(numAllocations);
+    resourceSizing.setResources(resources);
+    return resourceSizing;
+  }
+
+  @Public
+  @Unstable
+  public abstract int getNumAllocations();
+
+  @Public
+  @Unstable
+  public abstract void setNumAllocations(int numAllocations);
+
+  @Public
+  @Unstable
+  public abstract Resource getResources();
+
+  @Public
+  @Unstable
+  public abstract void setResources(Resource resources);
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b57e8bc3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/SchedulingRequest.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/SchedulingRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/SchedulingRequest.java
new file mode 100644
index 0000000..47a0697
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/SchedulingRequest.java
@@ -0,0 +1,205 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.records;
+
+import java.util.Set;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
+import org.apache.hadoop.yarn.util.Records;
+
+/**
+ * {@code SchedulingRequest} represents a request made by an application to the
+ * {@code ResourceManager} to obtain an allocation. It is similar to the
+ * {@link ResourceRequest}. However, it is more complete than the latter, as it
+ * allows applications to specify allocation tags (e.g., to express that an
+ * allocation belongs to {@code Spark} or is an {@code HBase-master}), as well
+ * as involved {@link PlacementConstraint}s (e.g., anti-affinity between Spark
+ * and HBase allocations).
+ *
+ * The size specification of the allocation is in {@code ResourceSizing}.
+ */
+@Public
+@Unstable
+public abstract class SchedulingRequest {
+
+  @Public
+  @Unstable
+  public static SchedulingRequest newInstance(long allocationRequestId,
+      Priority priority, ExecutionTypeRequest executionType,
+      Set<String> allocationTags, ResourceSizing resourceSizing,
+      PlacementConstraint placementConstraintExpression) {
+    return SchedulingRequest.newBuilder()
+        .allocationRequestId(allocationRequestId).priority(priority)
+        .executionType(executionType).allocationTags(allocationTags)
+        .placementConstraintExpression(placementConstraintExpression).build();
+  }
+
+  @Public
+  @Unstable
+  public static SchedulingRequestBuilder newBuilder() {
+    return new SchedulingRequestBuilder();
+  }
+
+  /**
+   * Class to construct instances of {@link SchedulingRequest} with specific
+   * options.
+   */
+  @Public
+  @Unstable
+  public static final class SchedulingRequestBuilder {
+    private SchedulingRequest schedulingRequest =
+            Records.newRecord(SchedulingRequest.class);
+
+    private SchedulingRequestBuilder() {
+      schedulingRequest.setAllocationRequestId(0);
+      schedulingRequest.setPriority(Priority.newInstance(0));
+      schedulingRequest.setExecutionType(ExecutionTypeRequest.newInstance());
+    }
+
+    /**
+     * Set the <code>allocationRequestId</code> of the request.
+     * 
+     * @see SchedulingRequest#setAllocationRequestId(long)
+     * @param allocationRequestId <code>allocationRequestId</code> of the
+     *          request
+     * @return {@link SchedulingRequest.SchedulingRequestBuilder}
+     */
+    @Public
+    @Unstable
+    public SchedulingRequestBuilder allocationRequestId(
+            long allocationRequestId) {
+      schedulingRequest.setAllocationRequestId(allocationRequestId);
+      return this;
+    }
+
+    /**
+     * Set the <code>priority</code> of the request.
+     *
+     * @param priority <code>priority</code> of the request
+     * @return {@link SchedulingRequest.SchedulingRequestBuilder}
+     * @see SchedulingRequest#setPriority(Priority)
+     */
+    @Public
+    @Unstable
+    public SchedulingRequestBuilder priority(Priority priority) {
+      schedulingRequest.setPriority(priority);
+      return this;
+    }
+
+    /**
+     * Set the <code>executionType</code> of the request.
+     * 
+     * @see SchedulingRequest#setExecutionType(ExecutionTypeRequest)
+     * @param executionType <code>executionType</code> of the request
+     * @return {@link SchedulingRequest.SchedulingRequestBuilder}
+     */
+    @Public
+    @Unstable
+    public SchedulingRequestBuilder executionType(
+        ExecutionTypeRequest executionType) {
+      schedulingRequest.setExecutionType(executionType);
+      return this;
+    }
+    
+    /**
+     * Set the <code>allocationTags</code> of the request.
+     *
+     * @see SchedulingRequest#setAllocationTags(Set)
+     * @param allocationTags <code>allocationsTags</code> of the request
+     * @return {@link SchedulingRequest.SchedulingRequestBuilder}
+     */
+    @Public
+    @Unstable
+    public SchedulingRequestBuilder allocationTags(Set<String> allocationTags) {
+      schedulingRequest.setAllocationTags(allocationTags);
+      return this;
+    }
+
+    /**
+     * Set the <code>executionType</code> of the request.
+     *
+     * @see SchedulingRequest#setResourceSizing(ResourceSizing)
+     * @param resourceSizing <code>resourceSizing</code> of the request
+     * @return {@link SchedulingRequest.SchedulingRequestBuilder}
+     */
+    @Public
+    @Unstable
+    public SchedulingRequestBuilder resourceSizing(
+        ResourceSizing resourceSizing) {
+      schedulingRequest.setResourceSizing(resourceSizing);
+      return this;
+    }
+
+    /**
+     * Set the <code>placementConstraintExpression</code> of the request.
+     *
+     * @see SchedulingRequest#setPlacementConstraint(
+     *      PlacementConstraint)
+     * @param placementConstraintExpression <code>placementConstraints</code> of
+     *          the request
+     * @return {@link SchedulingRequest.SchedulingRequestBuilder}
+     */
+    @Public
+    @Unstable
+    public SchedulingRequestBuilder placementConstraintExpression(
+        PlacementConstraint placementConstraintExpression) {
+      schedulingRequest
+          .setPlacementConstraint(placementConstraintExpression);
+      return this;
+    }
+
+    /**
+     * Return generated {@link SchedulingRequest} object.
+     * 
+     * @return {@link SchedulingRequest}
+     */
+    @Public
+    @Unstable
+    public SchedulingRequest build() {
+      return schedulingRequest;
+    }
+  }
+
+  public abstract long getAllocationRequestId();
+
+  public abstract void setAllocationRequestId(long allocationRequestId);
+
+  public abstract Priority getPriority();
+
+  public abstract void setPriority(Priority priority);
+
+  public abstract ExecutionTypeRequest getExecutionType();
+
+  public abstract void setExecutionType(ExecutionTypeRequest executionType);
+
+  public abstract Set<String> getAllocationTags();
+
+  public abstract void setAllocationTags(Set<String> allocationTags);
+
+  public abstract ResourceSizing getResourceSizing();
+
+  public abstract void setResourceSizing(ResourceSizing resourceSizing);
+
+  public abstract PlacementConstraint getPlacementConstraint();
+
+  public abstract void setPlacementConstraint(
+      PlacementConstraint placementConstraint);
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b57e8bc3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
index ff0d54b..d24f863 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
@@ -405,6 +405,20 @@ message ExecutionTypeRequestProto {
   optional bool enforce_execution_type = 2 [default = false];
 }
 
+message SchedulingRequestProto {
+  optional int64 allocationRequestId = 1 [default = 0];
+  optional PriorityProto priority = 2;
+  optional ExecutionTypeRequestProto executionType = 3;
+  repeated string allocationTags = 4;
+  optional ResourceSizingProto resourceSizing = 5;
+  optional PlacementConstraintProto placementConstraint = 6;
+}
+
+message ResourceSizingProto {
+  optional int32 numAllocations = 1;
+  optional ResourceProto resources = 2;
+}
+
 enum AMCommandProto {
   AM_RESYNC = 1;
   AM_SHUTDOWN = 2;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b57e8bc3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceSizingPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceSizingPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceSizingPBImpl.java
new file mode 100644
index 0000000..05bb3bd
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceSizingPBImpl.java
@@ -0,0 +1,117 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.records.impl.pb;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceSizing;
+import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ResourceSizingProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ResourceSizingProtoOrBuilder;
+
+@Private
+@Unstable
+public class ResourceSizingPBImpl extends ResourceSizing {
+  ResourceSizingProto proto = ResourceSizingProto.getDefaultInstance();
+  ResourceSizingProto.Builder builder = null;
+  boolean viaProto = false;
+
+  private Resource resources = null;
+
+  public ResourceSizingPBImpl() {
+    builder = ResourceSizingProto.newBuilder();
+  }
+
+  public ResourceSizingPBImpl(ResourceSizingProto proto) {
+    this.proto = proto;
+    viaProto = true;
+  }
+
+  public ResourceSizingProto getProto() {
+    mergeLocalToProto();
+    proto = viaProto ? proto : builder.build();
+    viaProto = true;
+    return proto;
+  }
+
+  private void mergeLocalToBuilder() {
+    if (this.resources != null) {
+      builder.setResources(convertToProtoFormat(this.resources));
+    }
+  }
+
+  private void mergeLocalToProto() {
+    if (viaProto) {
+      maybeInitBuilder();
+    }
+    mergeLocalToBuilder();
+    proto = builder.build();
+    viaProto = true;
+  }
+
+  private void maybeInitBuilder() {
+    if (viaProto || builder == null) {
+      builder = ResourceSizingProto.newBuilder(proto);
+    }
+    viaProto = false;
+  }
+
+  @Override
+  public int getNumAllocations() {
+    ResourceSizingProtoOrBuilder p = viaProto ? proto : builder;
+    return (p.getNumAllocations());
+  }
+
+  @Override
+  public void setNumAllocations(int numAllocations) {
+    maybeInitBuilder();
+    builder.setNumAllocations(numAllocations);
+  }
+
+  @Override
+  public Resource getResources() {
+    ResourceSizingProtoOrBuilder p = viaProto ? proto : builder;
+    if (this.resources != null) {
+      return this.resources;
+    }
+    if (!p.hasResources()) {
+      return null;
+    }
+    this.resources = convertFromProtoFormat(p.getResources());
+    return this.resources;
+  }
+
+  @Override
+  public void setResources(Resource resources) {
+    maybeInitBuilder();
+    if (resources == null) {
+      builder.clearResources();
+    }
+    this.resources = resources;
+  }
+
+  private ResourcePBImpl convertFromProtoFormat(ResourceProto r) {
+    return new ResourcePBImpl(r);
+  }
+
+  private ResourceProto convertToProtoFormat(Resource r) {
+    return ((ResourcePBImpl) r).getProto();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b57e8bc3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/SchedulingRequestPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/SchedulingRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/SchedulingRequestPBImpl.java
new file mode 100644
index 0000000..7826b36
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/SchedulingRequestPBImpl.java
@@ -0,0 +1,266 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.records.impl.pb;
+
+import java.util.HashSet;
+import java.util.Set;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.pb.PlacementConstraintFromProtoConverter;
+import org.apache.hadoop.yarn.api.pb.PlacementConstraintToProtoConverter;
+import org.apache.hadoop.yarn.api.records.ExecutionTypeRequest;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.ResourceSizing;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
+import org.apache.hadoop.yarn.proto.YarnProtos.ExecutionTypeRequestProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.PlacementConstraintProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.PriorityProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ResourceSizingProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.SchedulingRequestProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.SchedulingRequestProtoOrBuilder;
+
+@Private
+@Unstable
+public class SchedulingRequestPBImpl extends SchedulingRequest {
+  SchedulingRequestProto proto = SchedulingRequestProto.getDefaultInstance();
+  SchedulingRequestProto.Builder builder = null;
+  boolean viaProto = false;
+
+  private Priority priority = null;
+  private ExecutionTypeRequest executionType = null;
+  private Set<String> allocationTags = null;
+  private ResourceSizing resourceSizing = null;
+  private PlacementConstraint placementConstraint = null;
+
+  public SchedulingRequestPBImpl() {
+    builder = SchedulingRequestProto.newBuilder();
+  }
+
+  public SchedulingRequestPBImpl(SchedulingRequestProto proto) {
+    this.proto = proto;
+    viaProto = true;
+  }
+
+  public SchedulingRequestProto getProto() {
+    mergeLocalToProto();
+    proto = viaProto ? proto : builder.build();
+    viaProto = true;
+    return proto;
+  }
+
+  private void mergeLocalToBuilder() {
+    if (this.priority != null) {
+      builder.setPriority(convertToProtoFormat(this.priority));
+    }
+    if (this.executionType != null) {
+      builder.setExecutionType(convertToProtoFormat(this.executionType));
+    }
+    if (this.allocationTags != null) {
+      builder.clearAllocationTags();
+      builder.addAllAllocationTags(this.allocationTags);
+    }
+    if (this.resourceSizing != null) {
+      builder.setResourceSizing(convertToProtoFormat(this.resourceSizing));
+    }
+    if (this.placementConstraint != null) {
+      builder.setPlacementConstraint(
+          convertToProtoFormat(this.placementConstraint));
+    }
+  }
+
+  private void mergeLocalToProto() {
+    if (viaProto) {
+      maybeInitBuilder();
+    }
+    mergeLocalToBuilder();
+    proto = builder.build();
+    viaProto = true;
+  }
+
+  private void maybeInitBuilder() {
+    if (viaProto || builder == null) {
+      builder = SchedulingRequestProto.newBuilder(proto);
+    }
+    viaProto = false;
+  }
+
+  @Override
+  public long getAllocationRequestId() {
+    SchedulingRequestProtoOrBuilder p = viaProto ? proto : builder;
+    return (p.getAllocationRequestId());
+  }
+
+  @Override
+  public void setAllocationRequestId(long allocationRequestId) {
+    maybeInitBuilder();
+    builder.setAllocationRequestId(allocationRequestId);
+  }
+
+  @Override
+  public Priority getPriority() {
+    SchedulingRequestProtoOrBuilder p = viaProto ? proto : builder;
+    if (this.priority != null) {
+      return this.priority;
+    }
+    if (!p.hasPriority()) {
+      return null;
+    }
+    this.priority = convertFromProtoFormat(p.getPriority());
+    return this.priority;
+  }
+
+  @Override
+  public void setPriority(Priority priority) {
+    maybeInitBuilder();
+    if (priority == null) {
+      builder.clearPriority();
+    }
+    this.priority = priority;
+  }
+
+  @Override
+  public ExecutionTypeRequest getExecutionType() {
+    SchedulingRequestProtoOrBuilder p = viaProto ? proto : builder;
+    if (this.executionType != null) {
+      return this.executionType;
+    }
+    if (!p.hasExecutionType()) {
+      return null;
+    }
+    this.executionType = convertFromProtoFormat(p.getExecutionType());
+    return this.executionType;
+  }
+
+  @Override
+  public void setExecutionType(ExecutionTypeRequest executionType) {
+    maybeInitBuilder();
+    if (executionType == null) {
+      builder.clearExecutionType();
+    }
+    this.executionType = executionType;
+  }
+
+  @Override
+  public Set<String> getAllocationTags() {
+    initAllocationTags();
+    return this.allocationTags;
+  }
+
+  @Override
+  public void setAllocationTags(Set<String> allocationTags) {
+    maybeInitBuilder();
+    builder.clearAllocationTags();
+    this.allocationTags = allocationTags;
+  }
+
+  @Override
+  public ResourceSizing getResourceSizing() {
+    SchedulingRequestProtoOrBuilder p = viaProto ? proto : builder;
+    if (this.resourceSizing != null) {
+      return this.resourceSizing;
+    }
+    if (!p.hasResourceSizing()) {
+      return null;
+    }
+    this.resourceSizing = convertFromProtoFormat(p.getResourceSizing());
+    return this.resourceSizing;
+  }
+
+  @Override
+  public void setResourceSizing(ResourceSizing resourceSizing) {
+    maybeInitBuilder();
+    if (resourceSizing == null) {
+      builder.clearResourceSizing();
+    }
+    this.resourceSizing = resourceSizing;
+  }
+
+  @Override
+  public PlacementConstraint getPlacementConstraint() {
+    SchedulingRequestProtoOrBuilder p = viaProto ? proto : builder;
+    if (this.placementConstraint != null) {
+      return this.placementConstraint;
+    }
+    if (!p.hasPlacementConstraint()) {
+      return null;
+    }
+    this.placementConstraint =
+        convertFromProtoFormat(p.getPlacementConstraint());
+    return this.placementConstraint;
+  }
+
+  @Override
+  public void setPlacementConstraint(PlacementConstraint placementConstraint) {
+    maybeInitBuilder();
+    if (placementConstraint == null) {
+      builder.clearPlacementConstraint();
+    }
+    this.placementConstraint = placementConstraint;
+  }
+
+  private PriorityPBImpl convertFromProtoFormat(PriorityProto p) {
+    return new PriorityPBImpl(p);
+  }
+
+  private PriorityProto convertToProtoFormat(Priority p) {
+    return ((PriorityPBImpl) p).getProto();
+  }
+
+  private ExecutionTypeRequestPBImpl convertFromProtoFormat(
+      ExecutionTypeRequestProto p) {
+    return new ExecutionTypeRequestPBImpl(p);
+  }
+
+  private ExecutionTypeRequestProto convertToProtoFormat(
+      ExecutionTypeRequest p) {
+    return ((ExecutionTypeRequestPBImpl) p).getProto();
+  }
+
+  private ResourceSizingPBImpl convertFromProtoFormat(ResourceSizingProto p) {
+    return new ResourceSizingPBImpl(p);
+  }
+
+  private ResourceSizingProto convertToProtoFormat(ResourceSizing p) {
+    return ((ResourceSizingPBImpl) p).getProto();
+  }
+
+  private PlacementConstraint convertFromProtoFormat(
+      PlacementConstraintProto c) {
+    PlacementConstraintFromProtoConverter fromProtoConverter =
+        new PlacementConstraintFromProtoConverter(c);
+    return fromProtoConverter.convert();
+  }
+
+  private PlacementConstraintProto convertToProtoFormat(PlacementConstraint c) {
+    PlacementConstraintToProtoConverter toProtoConverter =
+        new PlacementConstraintToProtoConverter(c);
+    return toProtoConverter.convert();
+  }
+
+  private void initAllocationTags() {
+    if (this.allocationTags != null) {
+      return;
+    }
+    SchedulingRequestProtoOrBuilder p = viaProto ? proto : builder;
+    this.allocationTags = new HashSet<>();
+    this.allocationTags.addAll(p.getAllocationTagsList());
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[20/32] hadoop git commit: YARN-7807. Assume intra-app anti-affinity as default for scheduling request inside AppPlacementAllocator. (Wangda Tan via asuresh)

Posted by as...@apache.org.
YARN-7807. Assume intra-app anti-affinity as default for scheduling request inside AppPlacementAllocator. (Wangda Tan via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/644afe5f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/644afe5f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/644afe5f

Branch: refs/heads/trunk
Commit: 644afe5fd800ac4f2b873a99f9b3868c3a8c5c40
Parents: a4c539f
Author: Arun Suresh <as...@apache.org>
Authored: Wed Jan 24 12:55:01 2018 -0800
Committer: Arun Suresh <as...@apache.org>
Committed: Wed Jan 31 01:30:17 2018 -0800

----------------------------------------------------------------------
 .../placement/SingleConstraintAppPlacementAllocator.java        | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/644afe5f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/SingleConstraintAppPlacementAllocator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/SingleConstraintAppPlacementAllocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/SingleConstraintAppPlacementAllocator.java
index 9e7d71c..b02cb00 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/SingleConstraintAppPlacementAllocator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/SingleConstraintAppPlacementAllocator.java
@@ -334,14 +334,15 @@ public class SingleConstraintAppPlacementAllocator<N extends SchedulerNode>
         targetAllocationTags = new HashSet<>(
             targetExpression.getTargetValues());
 
-        if (targetExpression.getTargetKey() == null || !targetExpression
+        if (targetExpression.getTargetKey() != null && !targetExpression
             .getTargetKey().equals(APPLICATION_LABEL_INTRA_APPLICATION)) {
           throwExceptionWithMetaInfo(
               "As of now, the only accepted target key for targetKey of "
                   + "allocation_tag target expression is: ["
                   + APPLICATION_LABEL_INTRA_APPLICATION
                   + "]. Please make changes to placement constraints "
-                  + "accordingly.");
+                  + "accordingly. If this is null, it will be set to "
+                  + APPLICATION_LABEL_INTRA_APPLICATION + " by default.");
         }
       }
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[09/32] hadoop git commit: YARN-7522. Introduce AllocationTagsManager to associate allocation tags to nodes. (Wangda Tan via asuresh)

Posted by as...@apache.org.
YARN-7522. Introduce AllocationTagsManager to associate allocation tags to nodes. (Wangda Tan via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/801c0988
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/801c0988
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/801c0988

Branch: refs/heads/trunk
Commit: 801c0988b5ad1eff1e896a2635c2937721c96b04
Parents: 69de9a1
Author: Arun Suresh <as...@apache.org>
Authored: Fri Dec 8 00:24:00 2017 -0800
Committer: Arun Suresh <as...@apache.org>
Committed: Wed Jan 31 01:30:17 2018 -0800

----------------------------------------------------------------------
 .../resourcemanager/RMActiveServiceContext.java |  15 +
 .../yarn/server/resourcemanager/RMContext.java  |   5 +
 .../server/resourcemanager/RMContextImpl.java   |  12 +
 .../server/resourcemanager/ResourceManager.java |   9 +
 .../constraint/AllocationTagsManager.java       | 431 +++++++++++++++++++
 .../constraint/AllocationTagsNamespaces.java    |  31 ++
 .../InvalidAllocationTagsQueryException.java    |  35 ++
 .../rmcontainer/RMContainer.java                |   8 +
 .../rmcontainer/RMContainerImpl.java            |  21 +
 .../constraint/TestAllocationTagsManager.java   | 328 ++++++++++++++
 .../rmcontainer/TestRMContainerImpl.java        | 124 ++++++
 .../scheduler/capacity/TestUtils.java           |   9 +
 .../scheduler/fifo/TestFifoScheduler.java       |   5 +
 13 files changed, 1033 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/801c0988/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java
index 9dc5945..6ee3a4c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.yarn.event.Dispatcher;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMDelegatedNodeLabelsUpdater;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.placement.PlacementManager;
+import org.apache.hadoop.yarn.server.resourcemanager.constraint.AllocationTagsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.NullRMStateStore;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore;
 import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSystem;
@@ -107,6 +108,7 @@ public class RMActiveServiceContext {
 
   private RMAppLifetimeMonitor rmAppLifetimeMonitor;
   private QueueLimitCalculator queueLimitCalculator;
+  private AllocationTagsManager allocationTagsManager;
 
   public RMActiveServiceContext() {
     queuePlacementManager = new PlacementManager();
@@ -398,6 +400,19 @@ public class RMActiveServiceContext {
 
   @Private
   @Unstable
+  public AllocationTagsManager getAllocationTagsManager() {
+    return allocationTagsManager;
+  }
+
+  @Private
+  @Unstable
+  public void setAllocationTagsManager(
+      AllocationTagsManager allocationTagsManager) {
+    this.allocationTagsManager = allocationTagsManager;
+  }
+
+  @Private
+  @Unstable
   public RMDelegatedNodeLabelsUpdater getRMDelegatedNodeLabelsUpdater() {
     return rmDelegatedNodeLabelsUpdater;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/801c0988/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java
index ec94030..62899d9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWri
 import org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMDelegatedNodeLabelsUpdater;
+import org.apache.hadoop.yarn.server.resourcemanager.constraint.AllocationTagsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.placement.PlacementManager;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore;
 import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSystem;
@@ -166,4 +167,8 @@ public interface RMContext extends ApplicationMasterServiceContext {
   void setResourceProfilesManager(ResourceProfilesManager mgr);
 
   String getAppProxyUrl(Configuration conf, ApplicationId applicationId);
+
+  AllocationTagsManager getAllocationTagsManager();
+
+  void setAllocationTagsManager(AllocationTagsManager allocationTagsManager);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/801c0988/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java
index 80a9109..315fdc1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWri
 import org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMDelegatedNodeLabelsUpdater;
+import org.apache.hadoop.yarn.server.resourcemanager.constraint.AllocationTagsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.placement.PlacementManager;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore;
 import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSystem;
@@ -504,6 +505,17 @@ public class RMContextImpl implements RMContext {
   }
 
   @Override
+  public AllocationTagsManager getAllocationTagsManager() {
+    return activeServiceContext.getAllocationTagsManager();
+  }
+
+  @Override
+  public void setAllocationTagsManager(
+      AllocationTagsManager allocationTagsManager) {
+    activeServiceContext.setAllocationTagsManager(allocationTagsManager);
+  }
+
+  @Override
   public RMDelegatedNodeLabelsUpdater getRMDelegatedNodeLabelsUpdater() {
     return activeServiceContext.getRMDelegatedNodeLabelsUpdater();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/801c0988/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
index 32c4b0a..da0feda 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
@@ -73,6 +73,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.metrics.TimelineServiceV2Pu
 import org.apache.hadoop.yarn.server.resourcemanager.metrics.CombinedSystemMetricsPublisher;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMDelegatedNodeLabelsUpdater;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
+import org.apache.hadoop.yarn.server.resourcemanager.constraint.AllocationTagsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.NullRMStateStore;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.RMState;
@@ -493,6 +494,10 @@ public class ResourceManager extends CompositeService implements Recoverable {
       throws InstantiationException, IllegalAccessException {
     return new RMNodeLabelsManager();
   }
+
+  protected AllocationTagsManager createAllocationTagsManager() {
+    return new AllocationTagsManager();
+  }
   
   protected DelegationTokenRenewer createDelegationTokenRenewer() {
     return new DelegationTokenRenewer();
@@ -619,6 +624,10 @@ public class ResourceManager extends CompositeService implements Recoverable {
       addService(nlm);
       rmContext.setNodeLabelManager(nlm);
 
+      AllocationTagsManager allocationTagsManager =
+          createAllocationTagsManager();
+      rmContext.setAllocationTagsManager(allocationTagsManager);
+
       RMDelegatedNodeLabelsUpdater delegatedNodeLabelsUpdater =
           createRMDelegatedNodeLabelsUpdater();
       if (delegatedNodeLabelsUpdater != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/801c0988/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/AllocationTagsManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/AllocationTagsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/AllocationTagsManager.java
new file mode 100644
index 0000000..b67fab9
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/AllocationTagsManager.java
@@ -0,0 +1,431 @@
+/*
+ * *
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ * /
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.constraint;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
+import org.apache.log4j.Logger;
+
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+import java.util.function.LongBinaryOperator;
+
+/**
+ * Support storing maps between container-tags/applications and
+ * nodes. This will be required by affinity/anti-affinity implementation and
+ * cardinality.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class AllocationTagsManager {
+
+  private static final Logger LOG = Logger.getLogger(
+      AllocationTagsManager.class);
+
+  private ReentrantReadWriteLock.ReadLock readLock;
+  private ReentrantReadWriteLock.WriteLock writeLock;
+
+  // Application's tags to node
+  private Map<ApplicationId, NodeToCountedTags> perAppMappings =
+      new HashMap<>();
+
+  // Global tags to node mapping (used to fast return aggregated tags
+  // cardinality across apps)
+  private NodeToCountedTags globalMapping = new NodeToCountedTags();
+
+  /**
+   * Store node to counted tags.
+   */
+  @VisibleForTesting
+  static class NodeToCountedTags {
+    // Map<NodeId, Map<Tag, Count>>
+    private Map<NodeId, Map<String, Long>> nodeToTagsWithCount =
+        new HashMap<>();
+
+    // protected by external locks
+    private void addTagsToNode(NodeId nodeId, Set<String> tags) {
+      Map<String, Long> innerMap = nodeToTagsWithCount.computeIfAbsent(nodeId,
+          k -> new HashMap<>());
+
+      for (String tag : tags) {
+        Long count = innerMap.get(tag);
+        if (count == null) {
+          innerMap.put(tag, 1L);
+        } else{
+          innerMap.put(tag, count + 1);
+        }
+      }
+    }
+
+    // protected by external locks
+    private void addTagToNode(NodeId nodeId, String tag) {
+      Map<String, Long> innerMap = nodeToTagsWithCount.computeIfAbsent(nodeId,
+          k -> new HashMap<>());
+
+      Long count = innerMap.get(tag);
+      if (count == null) {
+        innerMap.put(tag, 1L);
+      } else{
+        innerMap.put(tag, count + 1);
+      }
+    }
+
+    private void removeTagFromInnerMap(Map<String, Long> innerMap, String tag) {
+      Long count = innerMap.get(tag);
+      if (count > 1) {
+        innerMap.put(tag, count - 1);
+      } else {
+        if (count <= 0) {
+          LOG.warn(
+              "Trying to remove tags from node, however the count already"
+                  + " becomes 0 or less, it could be a potential bug.");
+        }
+        innerMap.remove(tag);
+      }
+    }
+
+    private void removeTagsFromNode(NodeId nodeId, Set<String> tags) {
+      Map<String, Long> innerMap = nodeToTagsWithCount.get(nodeId);
+      if (innerMap == null) {
+        LOG.warn("Failed to find node=" + nodeId
+            + " while trying to remove tags, please double check.");
+        return;
+      }
+
+      for (String tag : tags) {
+        removeTagFromInnerMap(innerMap, tag);
+      }
+
+      if (innerMap.isEmpty()) {
+        nodeToTagsWithCount.remove(nodeId);
+      }
+    }
+
+    private void removeTagFromNode(NodeId nodeId, String tag) {
+      Map<String, Long> innerMap = nodeToTagsWithCount.get(nodeId);
+      if (innerMap == null) {
+        LOG.warn("Failed to find node=" + nodeId
+            + " while trying to remove tags, please double check.");
+        return;
+      }
+
+      removeTagFromInnerMap(innerMap, tag);
+
+      if (innerMap.isEmpty()) {
+        nodeToTagsWithCount.remove(nodeId);
+      }
+    }
+
+    private long getCardinality(NodeId nodeId, String tag) {
+      Map<String, Long> innerMap = nodeToTagsWithCount.get(nodeId);
+      if (innerMap == null) {
+        return 0;
+      }
+      Long value = innerMap.get(tag);
+      return value == null ? 0 : value;
+    }
+
+    private long getCardinality(NodeId nodeId, Set<String> tags,
+        LongBinaryOperator op) {
+      Map<String, Long> innerMap = nodeToTagsWithCount.get(nodeId);
+      if (innerMap == null) {
+        return 0;
+      }
+
+      long returnValue = 0;
+      boolean firstTag = true;
+
+      if (tags != null && !tags.isEmpty()) {
+        for (String tag : tags) {
+          Long value = innerMap.get(tag);
+          if (value == null) {
+            value = 0L;
+          }
+
+          if (firstTag) {
+            returnValue = value;
+            firstTag = false;
+            continue;
+          }
+
+          returnValue = op.applyAsLong(returnValue, value);
+        }
+      } else {
+        // Similar to above if, but only iterate values for better performance
+        for (long value : innerMap.values()) {
+          // For the first value, we will not apply op
+          if (firstTag) {
+            returnValue = value;
+            firstTag = false;
+            continue;
+          }
+          returnValue = op.applyAsLong(returnValue, value);
+        }
+      }
+      return returnValue;
+    }
+
+    private boolean isEmpty() {
+      return nodeToTagsWithCount.isEmpty();
+    }
+
+    @VisibleForTesting
+    public Map<NodeId, Map<String, Long>> getNodeToTagsWithCount() {
+      return nodeToTagsWithCount;
+    }
+  }
+
+  @VisibleForTesting
+  Map<ApplicationId, NodeToCountedTags> getPerAppMappings() {
+    return perAppMappings;
+  }
+
+  @VisibleForTesting
+  NodeToCountedTags getGlobalMapping() {
+    return globalMapping;
+  }
+
+  public AllocationTagsManager() {
+    ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
+    readLock = lock.readLock();
+    writeLock = lock.writeLock();
+  }
+
+  /**
+   * Notify container allocated on a node.
+   *
+   * @param nodeId         allocated node.
+   * @param applicationId  applicationId
+   * @param containerId    container id.
+   * @param allocationTags allocation tags, see
+   *                       {@link SchedulingRequest#getAllocationTags()}
+   *                       application_id will be added to allocationTags.
+   */
+  public void addContainer(NodeId nodeId, ApplicationId applicationId,
+      ContainerId containerId, Set<String> allocationTags) {
+    String applicationIdTag =
+        AllocationTagsNamespaces.APP_ID + applicationId.toString();
+
+    boolean useSet = false;
+    if (allocationTags != null && !allocationTags.isEmpty()) {
+      // Copy before edit it.
+      allocationTags = new HashSet<>(allocationTags);
+      allocationTags.add(applicationIdTag);
+      useSet = true;
+    }
+
+    writeLock.lock();
+    try {
+      NodeToCountedTags perAppTagsMapping = perAppMappings.computeIfAbsent(
+          applicationId, k -> new NodeToCountedTags());
+
+      if (useSet) {
+        perAppTagsMapping.addTagsToNode(nodeId, allocationTags);
+        globalMapping.addTagsToNode(nodeId, allocationTags);
+      } else {
+        perAppTagsMapping.addTagToNode(nodeId, applicationIdTag);
+        globalMapping.addTagToNode(nodeId, applicationIdTag);
+      }
+
+      if (LOG.isDebugEnabled()) {
+        LOG.debug(
+            "Added container=" + containerId + " with tags=[" + StringUtils
+                .join(allocationTags, ",") + "]");
+      }
+    } finally {
+      writeLock.unlock();
+    }
+  }
+
+  /**
+   * Notify container removed.
+   *
+   * @param nodeId         nodeId
+   * @param applicationId  applicationId
+   * @param containerId    containerId.
+   * @param allocationTags allocation tags for given container
+   */
+  public void removeContainer(NodeId nodeId, ApplicationId applicationId,
+      ContainerId containerId, Set<String> allocationTags) {
+    String applicationIdTag =
+        AllocationTagsNamespaces.APP_ID + applicationId.toString();
+    boolean useSet = false;
+
+    if (allocationTags != null && !allocationTags.isEmpty()) {
+      // Copy before edit it.
+      allocationTags = new HashSet<>(allocationTags);
+      allocationTags.add(applicationIdTag);
+      useSet = true;
+    }
+
+    writeLock.lock();
+    try {
+      NodeToCountedTags perAppTagsMapping = perAppMappings.get(applicationId);
+      if (perAppTagsMapping == null) {
+        return;
+      }
+
+      if (useSet) {
+        perAppTagsMapping.removeTagsFromNode(nodeId, allocationTags);
+        globalMapping.removeTagsFromNode(nodeId, allocationTags);
+      } else {
+        perAppTagsMapping.removeTagFromNode(nodeId, applicationIdTag);
+        globalMapping.removeTagFromNode(nodeId, applicationIdTag);
+      }
+
+      if (perAppTagsMapping.isEmpty()) {
+        perAppMappings.remove(applicationId);
+      }
+
+      if (LOG.isDebugEnabled()) {
+        LOG.debug(
+            "Removed container=" + containerId + " with tags=[" + StringUtils
+                .join(allocationTags, ",") + "]");
+      }
+    } finally {
+      writeLock.unlock();
+    }
+  }
+
+  /**
+   * Get cardinality for following conditions. External can pass-in a binary op
+   * to implement customized logic.   *
+   * @param nodeId        nodeId, required.
+   * @param applicationId applicationId. When null is specified, return
+   *                      aggregated cardinality among all nodes.
+   * @param tag           allocation tag, see
+   *                      {@link SchedulingRequest#getAllocationTags()},
+   *                      When multiple tags specified. Returns cardinality
+   *                      depends on op. If a specified tag doesn't exist,
+   *                      0 will be its cardinality.
+   *                      When null/empty tags specified, all tags
+   *                      (of the node/app) will be considered.
+   * @return cardinality of specified query on the node.
+   * @throws InvalidAllocationTagsQueryException when illegal query
+   *                                            parameter specified
+   */
+  public long getNodeCardinality(NodeId nodeId, ApplicationId applicationId,
+      String tag) throws InvalidAllocationTagsQueryException {
+    readLock.lock();
+
+    try {
+      if (nodeId == null) {
+        throw new InvalidAllocationTagsQueryException(
+            "Must specify nodeId/tags/op to query cardinality");
+      }
+
+      NodeToCountedTags mapping;
+      if (applicationId != null) {
+        mapping = perAppMappings.get(applicationId);
+      } else{
+        mapping = globalMapping;
+      }
+
+      if (mapping == null) {
+        return 0;
+      }
+
+      return mapping.getCardinality(nodeId, tag);
+    } finally {
+      readLock.unlock();
+    }
+  }
+
+  /**
+   * Check if given tag exists on node.
+   *
+   * @param nodeId        nodeId, required.
+   * @param applicationId applicationId. When null is specified, return
+   *                      aggregated cardinality among all nodes.
+   * @param tag           allocation tag, see
+   *                      {@link SchedulingRequest#getAllocationTags()},
+   *                      When multiple tags specified. Returns cardinality
+   *                      depends on op. If a specified tag doesn't exist,
+   *                      0 will be its cardinality.
+   *                      When null/empty tags specified, all tags
+   *                      (of the node/app) will be considered.
+   * @return cardinality of specified query on the node.
+   * @throws InvalidAllocationTagsQueryException when illegal query
+   *                                            parameter specified
+   */
+  public boolean allocationTagExistsOnNode(NodeId nodeId,
+      ApplicationId applicationId, String tag)
+      throws InvalidAllocationTagsQueryException {
+    return getNodeCardinality(nodeId, applicationId, tag) > 0;
+  }
+
+  /**
+   * Get cardinality for following conditions. External can pass-in a binary op
+   * to implement customized logic.
+   *
+   * @param nodeId        nodeId, required.
+   * @param applicationId applicationId. When null is specified, return
+   *                      aggregated cardinality among all nodes.
+   * @param tags          allocation tags, see
+   *                      {@link SchedulingRequest#getAllocationTags()},
+   *                      When multiple tags specified. Returns cardinality
+   *                      depends on op. If a specified tag doesn't exist, 0
+   *                      will be its cardinality. When null/empty tags
+   *                      specified, all tags (of the node/app) will be
+   *                      considered.
+   * @param op            operator. Such as Long::max, Long::sum, etc. Required.
+   *                      This sparameter only take effect when #values >= 2.
+   * @return cardinality of specified query on the node.
+   * @throws InvalidAllocationTagsQueryException when illegal query
+   *                                            parameter specified
+   */
+  public long getNodeCardinalityByOp(NodeId nodeId, ApplicationId applicationId,
+      Set<String> tags, LongBinaryOperator op)
+      throws InvalidAllocationTagsQueryException {
+    readLock.lock();
+
+    try {
+      if (nodeId == null || op == null) {
+        throw new InvalidAllocationTagsQueryException(
+            "Must specify nodeId/tags/op to query cardinality");
+      }
+
+      NodeToCountedTags mapping;
+      if (applicationId != null) {
+        mapping = perAppMappings.get(applicationId);
+      } else{
+        mapping = globalMapping;
+      }
+
+      if (mapping == null) {
+        return 0;
+      }
+
+      return mapping.getCardinality(nodeId, tags, op);
+    } finally {
+      readLock.unlock();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/801c0988/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/AllocationTagsNamespaces.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/AllocationTagsNamespaces.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/AllocationTagsNamespaces.java
new file mode 100644
index 0000000..893ff1c
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/AllocationTagsNamespaces.java
@@ -0,0 +1,31 @@
+/*
+ * *
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ * /
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.constraint;
+
+/**
+ * Predefined namespaces for tags
+ *
+ * Same as namespace  of resource types. Namespaces of placement tags are start
+ * with alphabets and ended with "/"
+ */
+public class AllocationTagsNamespaces {
+  public static final String APP_ID = "yarn_app_id/";
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/801c0988/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/InvalidAllocationTagsQueryException.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/InvalidAllocationTagsQueryException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/InvalidAllocationTagsQueryException.java
new file mode 100644
index 0000000..5519e39
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/InvalidAllocationTagsQueryException.java
@@ -0,0 +1,35 @@
+/*
+ * *
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ * /
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.constraint;
+
+import org.apache.hadoop.yarn.exceptions.YarnException;
+
+/**
+ * Exception when invalid parameter specified to do placement tags related
+ * queries.
+ */
+public class InvalidAllocationTagsQueryException extends YarnException {
+  private static final long serialVersionUID = 12312831974894L;
+
+  public InvalidAllocationTagsQueryException(String msg) {
+    super(msg);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/801c0988/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainer.java
index f3cbf63..8f751b0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainer.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.yarn.server.resourcemanager.rmcontainer;
 
 import java.util.List;
+import java.util.Set;
 
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.Container;
@@ -30,6 +31,7 @@ import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.ContainerRequest;
 import org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey;
@@ -115,4 +117,10 @@ public interface RMContainer extends EventHandler<RMContainerEvent>,
   boolean completed();
 
   NodeId getNodeId();
+
+  /**
+   * Return {@link SchedulingRequest#getAllocationTags()} specified by AM.
+   * @return allocation tags, could be null/empty
+   */
+  Set<String> getAllocationTags();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/801c0988/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
index e26689e..184cdfc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.rmcontainer;
 import java.util.Collections;
 import java.util.EnumSet;
 import java.util.List;
+import java.util.Set;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
@@ -189,6 +190,9 @@ public class RMContainerImpl implements RMContainer {
   private boolean isExternallyAllocated;
   private SchedulerRequestKey allocatedSchedulerKey;
 
+  // TODO, set it when container allocated by scheduler (From SchedulingRequest)
+  private Set<String> allocationTags = null;
+
   public RMContainerImpl(Container container, SchedulerRequestKey schedulerKey,
       ApplicationAttemptId appAttemptId, NodeId nodeId, String user,
       RMContext rmContext) {
@@ -501,6 +505,11 @@ public class RMContainerImpl implements RMContainer {
     return nodeId;
   }
 
+  @Override
+  public Set<String> getAllocationTags() {
+    return allocationTags;
+  }
+
   private static class BaseTransition implements
       SingleArcTransition<RMContainerImpl, RMContainerEvent> {
 
@@ -565,6 +574,12 @@ public class RMContainerImpl implements RMContainer {
 
     @Override
     public void transition(RMContainerImpl container, RMContainerEvent event) {
+      // Notify placementManager
+      container.rmContext.getAllocationTagsManager().addContainer(
+          container.getNodeId(),
+          container.getApplicationAttemptId().getApplicationId(),
+          container.getContainerId(), container.getAllocationTags());
+
       container.eventHandler.handle(new RMAppAttemptEvent(
           container.appAttemptId, RMAppAttemptEventType.CONTAINER_ALLOCATED));
     }
@@ -676,6 +691,12 @@ public class RMContainerImpl implements RMContainer {
 
     @Override
     public void transition(RMContainerImpl container, RMContainerEvent event) {
+      // Notify placementManager
+      container.rmContext.getAllocationTagsManager().removeContainer(
+          container.getNodeId(),
+          container.getApplicationAttemptId().getApplicationId(),
+          container.getContainerId(), container.getAllocationTags());
+
       RMContainerFinishedEvent finishedEvent = (RMContainerFinishedEvent) event;
 
       container.finishTime = System.currentTimeMillis();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/801c0988/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/TestAllocationTagsManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/TestAllocationTagsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/TestAllocationTagsManager.java
new file mode 100644
index 0000000..0358792
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/TestAllocationTagsManager.java
@@ -0,0 +1,328 @@
+/*
+ * *
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ * /
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.constraint;
+
+import com.google.common.collect.ImmutableSet;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.TestUtils;
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * Test functionality of AllocationTagsManager.
+ */
+public class TestAllocationTagsManager {
+  @Test
+  public void testAllocationTagsManagerSimpleCases()
+      throws InvalidAllocationTagsQueryException {
+    AllocationTagsManager atm = new AllocationTagsManager();
+
+    /**
+     * Construct test case:
+     * Node1:
+     *    container_1_1 (mapper/reducer/app_1)
+     *    container_1_3 (service/app_1)
+     *
+     * Node2:
+     *    container_1_2 (mapper/reducer/app_1)
+     *    container_1_4 (reducer/app_1)
+     *    container_2_1 (service/app_2)
+     */
+
+    // 3 Containers from app1
+    atm.addContainer(NodeId.fromString("node1:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 1),
+        ImmutableSet.of("mapper", "reducer"));
+
+    atm.addContainer(NodeId.fromString("node2:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 2),
+        ImmutableSet.of("mapper", "reducer"));
+
+    atm.addContainer(NodeId.fromString("node1:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 3),
+        ImmutableSet.of("service"));
+
+    atm.addContainer(NodeId.fromString("node2:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 4),
+        ImmutableSet.of("reducer"));
+
+    // 1 Container from app2
+    atm.addContainer(NodeId.fromString("node2:1234"),
+        TestUtils.getMockApplicationId(2), TestUtils.getMockContainerId(2, 3),
+        ImmutableSet.of("service"));
+
+    // Get Cardinality of app1 on node1, with tag "mapper"
+    Assert.assertEquals(1,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node1:1234"),
+            TestUtils.getMockApplicationId(1), ImmutableSet.of("mapper"),
+            Long::max));
+
+    // Get Cardinality of app1 on node2, with tag "mapper/reducer", op=min
+    Assert.assertEquals(1,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1),
+            ImmutableSet.of("mapper", "reducer"), Long::min));
+
+    // Get Cardinality of app1 on node2, with tag "mapper/reducer", op=max
+    Assert.assertEquals(2,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1),
+            ImmutableSet.of("mapper", "reducer"), Long::max));
+
+    // Get Cardinality of app1 on node2, with tag "mapper/reducer", op=sum
+    Assert.assertEquals(3,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1),
+            ImmutableSet.of("mapper", "reducer"), Long::sum));
+
+    // Get Cardinality by passing single tag.
+    Assert.assertEquals(1,
+        atm.getNodeCardinality(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1), "mapper"));
+
+    Assert.assertEquals(2,
+        atm.getNodeCardinality(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1), "reducer"));
+
+    // Get Cardinality of app1 on node2, with tag "no_existed/reducer", op=min
+    Assert.assertEquals(0,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1),
+            ImmutableSet.of("no_existed", "reducer"), Long::min));
+
+    // Get Cardinality of app1 on node2, with tag "<applicationId>", op=max
+    // (Expect this returns #containers from app1 on node2)
+    Assert.assertEquals(2,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1), ImmutableSet
+                .of(AllocationTagsNamespaces.APP_ID + TestUtils
+                    .getMockApplicationId(1).toString()), Long::max));
+
+    // Get Cardinality of app1 on node2, with empty tag set, op=max
+    Assert.assertEquals(2,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1), ImmutableSet.of(), Long::max));
+
+    // Get Cardinality of all apps on node2, with empty tag set, op=sum
+    Assert.assertEquals(7,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"), null,
+            ImmutableSet.of(), Long::sum));
+
+    // Get Cardinality of app_1 on node2, with empty tag set, op=sum
+    Assert.assertEquals(5,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1), ImmutableSet.of(), Long::sum));
+
+    // Get Cardinality of app_1 on node2, with empty tag set, op=sum
+    Assert.assertEquals(2,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(2), ImmutableSet.of(), Long::sum));
+
+    // Finish all containers:
+    atm.removeContainer(NodeId.fromString("node1:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 1),
+        ImmutableSet.of("mapper", "reducer"));
+
+    atm.removeContainer(NodeId.fromString("node2:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 2),
+        ImmutableSet.of("mapper", "reducer"));
+
+    atm.removeContainer(NodeId.fromString("node1:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 3),
+        ImmutableSet.of("service"));
+
+    atm.removeContainer(NodeId.fromString("node2:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 4),
+        ImmutableSet.of("reducer"));
+
+    atm.removeContainer(NodeId.fromString("node2:1234"),
+        TestUtils.getMockApplicationId(2), TestUtils.getMockContainerId(2, 3),
+        ImmutableSet.of("service"));
+
+    // Expect all cardinality to be 0
+    // Get Cardinality of app1 on node1, with tag "mapper"
+    Assert.assertEquals(0,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node1:1234"),
+            TestUtils.getMockApplicationId(1), ImmutableSet.of("mapper"),
+            Long::max));
+
+    // Get Cardinality of app1 on node2, with tag "mapper/reducer", op=min
+    Assert.assertEquals(0,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1),
+            ImmutableSet.of("mapper", "reducer"), Long::min));
+
+    // Get Cardinality of app1 on node2, with tag "mapper/reducer", op=max
+    Assert.assertEquals(0,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1),
+            ImmutableSet.of("mapper", "reducer"), Long::max));
+
+    // Get Cardinality of app1 on node2, with tag "mapper/reducer", op=sum
+    Assert.assertEquals(0,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1),
+            ImmutableSet.of("mapper", "reducer"), Long::sum));
+
+    // Get Cardinality of app1 on node2, with tag "<applicationId>", op=max
+    // (Expect this returns #containers from app1 on node2)
+    Assert.assertEquals(0,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1),
+            ImmutableSet.of(TestUtils.getMockApplicationId(1).toString()),
+            Long::max));
+
+    Assert.assertEquals(0,
+        atm.getNodeCardinality(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1),
+            TestUtils.getMockApplicationId(1).toString()));
+
+    // Get Cardinality of app1 on node2, with empty tag set, op=max
+    Assert.assertEquals(0,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1), ImmutableSet.of(), Long::max));
+
+    // Get Cardinality of all apps on node2, with empty tag set, op=sum
+    Assert.assertEquals(0,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"), null,
+            ImmutableSet.of(), Long::sum));
+
+    // Get Cardinality of app_1 on node2, with empty tag set, op=sum
+    Assert.assertEquals(0,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1), ImmutableSet.of(), Long::sum));
+
+    // Get Cardinality of app_1 on node2, with empty tag set, op=sum
+    Assert.assertEquals(0,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(2), ImmutableSet.of(), Long::sum));
+  }
+
+  @Test
+  public void testAllocationTagsManagerMemoryAfterCleanup()
+      throws InvalidAllocationTagsQueryException {
+    /**
+     * Make sure YARN cleans up all memory once container/app finishes.
+     */
+
+    AllocationTagsManager atm = new AllocationTagsManager();
+
+    // Add a bunch of containers
+    atm.addContainer(NodeId.fromString("node1:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 1),
+        ImmutableSet.of("mapper", "reducer"));
+
+    atm.addContainer(NodeId.fromString("node2:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 2),
+        ImmutableSet.of("mapper", "reducer"));
+
+    atm.addContainer(NodeId.fromString("node1:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 3),
+        ImmutableSet.of("service"));
+
+    atm.addContainer(NodeId.fromString("node2:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 4),
+        ImmutableSet.of("reducer"));
+
+    atm.addContainer(NodeId.fromString("node2:1234"),
+        TestUtils.getMockApplicationId(2), TestUtils.getMockContainerId(2, 3),
+        ImmutableSet.of("service"));
+
+    // Remove all these containers
+    atm.removeContainer(NodeId.fromString("node1:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 1),
+        ImmutableSet.of("mapper", "reducer"));
+
+    atm.removeContainer(NodeId.fromString("node2:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 2),
+        ImmutableSet.of("mapper", "reducer"));
+
+    atm.removeContainer(NodeId.fromString("node1:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 3),
+        ImmutableSet.of("service"));
+
+    atm.removeContainer(NodeId.fromString("node2:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 4),
+        ImmutableSet.of("reducer"));
+
+    atm.removeContainer(NodeId.fromString("node2:1234"),
+        TestUtils.getMockApplicationId(2), TestUtils.getMockContainerId(2, 3),
+        ImmutableSet.of("service"));
+
+    // Check internal data structure
+    Assert.assertEquals(0,
+        atm.getGlobalMapping().getNodeToTagsWithCount().size());
+    Assert.assertEquals(0, atm.getPerAppMappings().size());
+  }
+
+  @Test
+  public void testQueryCardinalityWithIllegalParameters()
+      throws InvalidAllocationTagsQueryException {
+    /**
+     * Make sure YARN cleans up all memory once container/app finishes.
+     */
+
+    AllocationTagsManager atm = new AllocationTagsManager();
+
+    // Add a bunch of containers
+    atm.addContainer(NodeId.fromString("node1:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 1),
+        ImmutableSet.of("mapper", "reducer"));
+
+    atm.addContainer(NodeId.fromString("node2:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 2),
+        ImmutableSet.of("mapper", "reducer"));
+
+    atm.addContainer(NodeId.fromString("node1:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 3),
+        ImmutableSet.of("service"));
+
+    atm.addContainer(NodeId.fromString("node2:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 4),
+        ImmutableSet.of("reducer"));
+
+    atm.addContainer(NodeId.fromString("node2:1234"),
+        TestUtils.getMockApplicationId(2), TestUtils.getMockContainerId(2, 3),
+        ImmutableSet.of("service"));
+
+    // No node-id
+    boolean caughtException = false;
+    try {
+      atm.getNodeCardinalityByOp(null, TestUtils.getMockApplicationId(2),
+          ImmutableSet.of("mapper"), Long::min);
+    } catch (InvalidAllocationTagsQueryException e) {
+      caughtException = true;
+    }
+    Assert.assertTrue("should fail because of nodeId specified",
+        caughtException);
+
+    // No op
+    caughtException = false;
+    try {
+      atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+          TestUtils.getMockApplicationId(2), ImmutableSet.of("mapper"), null);
+    } catch (InvalidAllocationTagsQueryException e) {
+      caughtException = true;
+    }
+    Assert.assertTrue("should fail because of nodeId specified",
+        caughtException);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/801c0988/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java
index 6c189b3..27ff311 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java
@@ -54,6 +54,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWriter;
 import org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher;
+import org.apache.hadoop.yarn.server.resourcemanager.constraint.AllocationTagsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEventType;
@@ -109,6 +110,8 @@ public class TestRMContainerImpl {
     when(rmContext.getRMApplicationHistoryWriter()).thenReturn(writer);
     when(rmContext.getRMApps()).thenReturn(rmApps);
     when(rmContext.getSystemMetricsPublisher()).thenReturn(publisher);
+    AllocationTagsManager ptm = mock(AllocationTagsManager.class);
+    when(rmContext.getAllocationTagsManager()).thenReturn(ptm);
     YarnConfiguration conf = new YarnConfiguration();
     conf.setBoolean(
         YarnConfiguration.APPLICATION_HISTORY_SAVE_NON_AM_CONTAINER_META_INFO,
@@ -209,6 +212,8 @@ public class TestRMContainerImpl {
     when(rmContext.getContainerAllocationExpirer()).thenReturn(expirer);
     when(rmContext.getRMApplicationHistoryWriter()).thenReturn(writer);
     when(rmContext.getSystemMetricsPublisher()).thenReturn(publisher);
+    AllocationTagsManager ptm = mock(AllocationTagsManager.class);
+    when(rmContext.getAllocationTagsManager()).thenReturn(ptm);
 
     YarnConfiguration conf = new YarnConfiguration();
     conf.setBoolean(
@@ -367,4 +372,123 @@ public class TestRMContainerImpl {
     verify(publisher, times(1)).containerCreated(any(RMContainer.class), anyLong());
     verify(publisher, times(1)).containerFinished(any(RMContainer.class), anyLong());
   }
+
+  @Test
+  public void testContainerTransitionNotifyPlacementTagsManager()
+      throws Exception {
+    DrainDispatcher drainDispatcher = new DrainDispatcher();
+    EventHandler<RMAppAttemptEvent> appAttemptEventHandler = mock(
+        EventHandler.class);
+    EventHandler generic = mock(EventHandler.class);
+    drainDispatcher.register(RMAppAttemptEventType.class,
+        appAttemptEventHandler);
+    drainDispatcher.register(RMNodeEventType.class, generic);
+    drainDispatcher.init(new YarnConfiguration());
+    drainDispatcher.start();
+    NodeId nodeId = BuilderUtils.newNodeId("host", 3425);
+    ApplicationId appId = BuilderUtils.newApplicationId(1, 1);
+    ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(
+        appId, 1);
+    ContainerId containerId = BuilderUtils.newContainerId(appAttemptId, 1);
+    ContainerAllocationExpirer expirer = mock(ContainerAllocationExpirer.class);
+
+    Resource resource = BuilderUtils.newResource(512, 1);
+    Priority priority = BuilderUtils.newPriority(5);
+
+    Container container = BuilderUtils.newContainer(containerId, nodeId,
+        "host:3465", resource, priority, null);
+    ConcurrentMap<ApplicationId, RMApp> rmApps =
+        spy(new ConcurrentHashMap<ApplicationId, RMApp>());
+    RMApp rmApp = mock(RMApp.class);
+    when(rmApp.getRMAppAttempt(Matchers.any())).thenReturn(null);
+    Mockito.doReturn(rmApp).when(rmApps).get(Matchers.any());
+
+    RMApplicationHistoryWriter writer = mock(RMApplicationHistoryWriter.class);
+    SystemMetricsPublisher publisher = mock(SystemMetricsPublisher.class);
+    AllocationTagsManager tagsManager = new AllocationTagsManager();
+    RMContext rmContext = mock(RMContext.class);
+    when(rmContext.getDispatcher()).thenReturn(drainDispatcher);
+    when(rmContext.getContainerAllocationExpirer()).thenReturn(expirer);
+    when(rmContext.getRMApplicationHistoryWriter()).thenReturn(writer);
+    when(rmContext.getRMApps()).thenReturn(rmApps);
+    when(rmContext.getSystemMetricsPublisher()).thenReturn(publisher);
+    when(rmContext.getAllocationTagsManager()).thenReturn(tagsManager);
+    YarnConfiguration conf = new YarnConfiguration();
+    conf.setBoolean(
+        YarnConfiguration.APPLICATION_HISTORY_SAVE_NON_AM_CONTAINER_META_INFO,
+        true);
+    when(rmContext.getYarnConfiguration()).thenReturn(conf);
+
+    /* First container: ALLOCATED -> KILLED */
+    RMContainer rmContainer = new RMContainerImpl(container,
+        SchedulerRequestKey.extractFrom(container), appAttemptId,
+        nodeId, "user", rmContext);
+
+    Assert.assertEquals(0,
+        tagsManager.getNodeCardinalityByOp(nodeId, appId, null, Long::max));
+
+    rmContainer.handle(new RMContainerEvent(containerId,
+        RMContainerEventType.START));
+
+    Assert.assertEquals(1,
+        tagsManager.getNodeCardinalityByOp(nodeId, appId, null, Long::max));
+
+    rmContainer.handle(new RMContainerFinishedEvent(containerId, ContainerStatus
+        .newInstance(containerId, ContainerState.COMPLETE, "", 0),
+        RMContainerEventType.KILL));
+
+    Assert.assertEquals(0,
+        tagsManager.getNodeCardinalityByOp(nodeId, appId, null, Long::max));
+
+    /* Second container: ACQUIRED -> FINISHED */
+    rmContainer = new RMContainerImpl(container,
+        SchedulerRequestKey.extractFrom(container), appAttemptId,
+        nodeId, "user", rmContext);
+
+    Assert.assertEquals(0,
+        tagsManager.getNodeCardinalityByOp(nodeId, appId, null, Long::max));
+
+    rmContainer.handle(new RMContainerEvent(containerId,
+        RMContainerEventType.START));
+
+    Assert.assertEquals(1,
+        tagsManager.getNodeCardinalityByOp(nodeId, appId, null, Long::max));
+
+    rmContainer.handle(
+        new RMContainerEvent(containerId, RMContainerEventType.ACQUIRED));
+
+    rmContainer.handle(new RMContainerFinishedEvent(containerId, ContainerStatus
+        .newInstance(containerId, ContainerState.COMPLETE, "", 0),
+        RMContainerEventType.FINISHED));
+
+    Assert.assertEquals(0,
+        tagsManager.getNodeCardinalityByOp(nodeId, appId, null, Long::max));
+
+    /* Third container: RUNNING -> FINISHED */
+    rmContainer = new RMContainerImpl(container,
+        SchedulerRequestKey.extractFrom(container), appAttemptId,
+        nodeId, "user", rmContext);
+
+    Assert.assertEquals(0,
+        tagsManager.getNodeCardinalityByOp(nodeId, appId, null, Long::max));
+
+    rmContainer.handle(new RMContainerEvent(containerId,
+        RMContainerEventType.START));
+
+    Assert.assertEquals(1,
+        tagsManager.getNodeCardinalityByOp(nodeId, appId, null, Long::max));
+
+    rmContainer.handle(
+        new RMContainerEvent(containerId, RMContainerEventType.ACQUIRED));
+
+    rmContainer.handle(
+        new RMContainerEvent(containerId, RMContainerEventType.LAUNCHED));
+
+    rmContainer.handle(new RMContainerFinishedEvent(containerId, ContainerStatus
+        .newInstance(containerId, ContainerState.COMPLETE, "", 0),
+        RMContainerEventType.FINISHED));
+
+    Assert.assertEquals(0,
+        tagsManager.getNodeCardinalityByOp(nodeId, appId, null, Long::max));
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/801c0988/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java
index e3326c7..61a5555 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWriter;
 import org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
+import org.apache.hadoop.yarn.server.resourcemanager.constraint.AllocationTagsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAllocationExpirer;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
@@ -135,6 +136,9 @@ public class TestUtils {
         new DefaultResourceCalculator());
     rmContext.setScheduler(mockScheduler);
 
+    AllocationTagsManager ptm = mock(AllocationTagsManager.class);
+    rmContext.setAllocationTagsManager(ptm);
+
     return rmContext;
   }
   
@@ -234,6 +238,11 @@ public class TestUtils {
     doReturn(id).when(containerId).getContainerId();
     return containerId;
   }
+
+  public static ContainerId getMockContainerId(int appId, int containerId) {
+    ApplicationAttemptId attemptId = getMockApplicationAttemptId(appId, 1);
+    return ContainerId.newContainerId(attemptId, containerId);
+  }
   
   public static Container getMockContainer(
       ContainerId containerId, NodeId nodeId, 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/801c0988/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java
index 3f97b59..4b902a7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java
@@ -74,6 +74,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWri
 import org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.NullRMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
+import org.apache.hadoop.yarn.server.resourcemanager.constraint.AllocationTagsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
@@ -234,6 +235,8 @@ public class TestFifoScheduler {
     FifoScheduler scheduler = new FifoScheduler();
     RMContext rmContext = new RMContextImpl(dispatcher, null, null, null, null,
         null, containerTokenSecretManager, nmTokenSecretManager, null, scheduler);
+    AllocationTagsManager ptm = mock(AllocationTagsManager.class);
+    rmContext.setAllocationTagsManager(ptm);
     rmContext.setSystemMetricsPublisher(mock(SystemMetricsPublisher.class));
     rmContext.setRMApplicationHistoryWriter(
         mock(RMApplicationHistoryWriter.class));
@@ -312,12 +315,14 @@ public class TestFifoScheduler {
     FifoScheduler scheduler = new FifoScheduler();
     RMContext rmContext = new RMContextImpl(dispatcher, null, null, null, null,
         null, containerTokenSecretManager, nmTokenSecretManager, null, scheduler);
+    AllocationTagsManager ptm = mock(AllocationTagsManager.class);
     rmContext.setSystemMetricsPublisher(mock(SystemMetricsPublisher.class));
     rmContext.setRMApplicationHistoryWriter(mock(RMApplicationHistoryWriter.class));
     ((RMContextImpl) rmContext).setYarnConfiguration(new YarnConfiguration());
     NullRMNodeLabelsManager nlm = new NullRMNodeLabelsManager();
     nlm.init(new Configuration());
     rmContext.setNodeLabelManager(nlm);
+    rmContext.setAllocationTagsManager(ptm);
 
     scheduler.setRMContext(rmContext);
     ((RMContextImpl) rmContext).setScheduler(scheduler);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[07/32] hadoop git commit: YARN-6595. [API] Add Placement Constraints at the application level. (Arun Suresh via kkaranasos)

Posted by as...@apache.org.
YARN-6595. [API] Add Placement Constraints at the application level. (Arun Suresh via kkaranasos)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/db928556
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/db928556
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/db928556

Branch: refs/heads/trunk
Commit: db928556c81e5950b3fe374fa5b99ab26791ef3a
Parents: b57e8bc
Author: Konstantinos Karanasos <kk...@apache.org>
Authored: Mon Nov 13 15:25:24 2017 -0800
Committer: Arun Suresh <as...@apache.org>
Committed: Wed Jan 31 01:30:17 2018 -0800

----------------------------------------------------------------------
 .../RegisterApplicationMasterRequest.java       |  42 ++++-
 .../yarn/api/resource/PlacementConstraint.java  | 156 +++++++++++++++++++
 .../src/main/proto/yarn_protos.proto            |   6 +
 .../src/main/proto/yarn_service_protos.proto    |   1 +
 .../RegisterApplicationMasterRequestPBImpl.java | 106 ++++++++++++-
 .../hadoop/yarn/api/BasePBImplRecordsTest.java  |  11 ++
 6 files changed, 313 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/db928556/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RegisterApplicationMasterRequest.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RegisterApplicationMasterRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RegisterApplicationMasterRequest.java
index 395e190..f2d537a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RegisterApplicationMasterRequest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RegisterApplicationMasterRequest.java
@@ -18,11 +18,16 @@
 
 package org.apache.hadoop.yarn.api.protocolrecords;
 
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Stable;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
 import org.apache.hadoop.yarn.util.Records;
-
 /**
  * The request sent by the {@code ApplicationMaster} to {@code ResourceManager}
  * on registration.
@@ -132,4 +137,39 @@ public abstract class RegisterApplicationMasterRequest {
   @Public
   @Stable
   public abstract void setTrackingUrl(String trackingUrl);
+
+  /**
+   * Return all Placement Constraints specified at the Application level. The
+   * mapping is from a set of allocation tags to a
+   * <code>PlacementConstraint</code> associated with the tags, i.e., each
+   * {@link org.apache.hadoop.yarn.api.records.SchedulingRequest} that has those
+   * tags will be placed taking into account the corresponding constraint.
+   *
+   * @return A map of Placement Constraints.
+   */
+  @Public
+  @Unstable
+  public Map<Set<String>, PlacementConstraint> getPlacementConstraints() {
+    return new HashMap<>();
+  }
+
+  /**
+   * Set Placement Constraints applicable to the
+   * {@link org.apache.hadoop.yarn.api.records.SchedulingRequest}s
+   * of this application.
+   * The mapping is from a set of allocation tags to a
+   * <code>PlacementConstraint</code> associated with the tags.
+   * For example:
+   *  Map &lt;
+   *   &lt;hb_regionserver&gt; -&gt; node_anti_affinity,
+   *   &lt;hb_regionserver, hb_master&gt; -&gt; rack_affinity,
+   *   ...
+   *  &gt;
+   * @param placementConstraints Placement Constraint Mapping.
+   */
+  @Public
+  @Unstable
+  public void setPlacementConstraints(
+      Map<Set<String>, PlacementConstraint> placementConstraints) {
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/db928556/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java
index f0e3982..b6e851a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java
@@ -54,6 +54,26 @@ public class PlacementConstraint {
     return constraintExpr;
   }
 
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) {
+      return true;
+    }
+    if (!(o instanceof PlacementConstraint)) {
+      return false;
+    }
+
+    PlacementConstraint that = (PlacementConstraint) o;
+
+    return getConstraintExpr() != null ? getConstraintExpr().equals(that
+        .getConstraintExpr()) : that.getConstraintExpr() == null;
+  }
+
+  @Override
+  public int hashCode() {
+    return getConstraintExpr() != null ? getConstraintExpr().hashCode() : 0;
+  }
+
   /**
    * Interface used to enable the elements of the constraint tree to be visited.
    */
@@ -174,6 +194,38 @@ public class PlacementConstraint {
     }
 
     @Override
+    public boolean equals(Object o) {
+      if (this == o) {
+        return true;
+      }
+      if (!(o instanceof SingleConstraint)) {
+        return false;
+      }
+
+      SingleConstraint that = (SingleConstraint) o;
+
+      if (getMinCardinality() != that.getMinCardinality()) {
+        return false;
+      }
+      if (getMaxCardinality() != that.getMaxCardinality()) {
+        return false;
+      }
+      if (!getScope().equals(that.getScope())) {
+        return false;
+      }
+      return getTargetExpressions().equals(that.getTargetExpressions());
+    }
+
+    @Override
+    public int hashCode() {
+      int result = getScope().hashCode();
+      result = 31 * result + getMinCardinality();
+      result = 31 * result + getMaxCardinality();
+      result = 31 * result + getTargetExpressions().hashCode();
+      return result;
+    }
+
+    @Override
     public <T> T accept(Visitor<T> visitor) {
       return visitor.visit(this);
     }
@@ -332,6 +384,34 @@ public class PlacementConstraint {
     }
 
     @Override
+    public boolean equals(Object o) {
+      if (this == o) {
+        return true;
+      }
+      if (!(o instanceof TargetConstraint)) {
+        return false;
+      }
+
+      TargetConstraint that = (TargetConstraint) o;
+
+      if (getOp() != that.getOp()) {
+        return false;
+      }
+      if (!getScope().equals(that.getScope())) {
+        return false;
+      }
+      return getTargetExpressions().equals(that.getTargetExpressions());
+    }
+
+    @Override
+    public int hashCode() {
+      int result = getOp().hashCode();
+      result = 31 * result + getScope().hashCode();
+      result = 31 * result + getTargetExpressions().hashCode();
+      return result;
+    }
+
+    @Override
     public <T> T accept(Visitor<T> visitor) {
       return visitor.visit(this);
     }
@@ -388,6 +468,34 @@ public class PlacementConstraint {
     public <T> T accept(Visitor<T> visitor) {
       return visitor.visit(this);
     }
+
+    @Override
+    public boolean equals(Object o) {
+      if (this == o) {
+        return true;
+      }
+      if (o == null || getClass() != o.getClass()) {
+        return false;
+      }
+
+      CardinalityConstraint that = (CardinalityConstraint) o;
+
+      if (minCardinality != that.minCardinality) {
+        return false;
+      }
+      if (maxCardinality != that.maxCardinality) {
+        return false;
+      }
+      return scope != null ? scope.equals(that.scope) : that.scope == null;
+    }
+
+    @Override
+    public int hashCode() {
+      int result = scope != null ? scope.hashCode() : 0;
+      result = 31 * result + minCardinality;
+      result = 31 * result + maxCardinality;
+      return result;
+    }
   }
 
   /**
@@ -406,6 +514,25 @@ public class PlacementConstraint {
      * @return the children of the composite constraint
      */
     public abstract List<R> getChildren();
+
+    @Override
+    public boolean equals(Object o) {
+      if (this == o) {
+        return true;
+      }
+      if (o == null || getClass() != o.getClass()) {
+        return false;
+      }
+
+      return getChildren() != null ? getChildren().equals(
+          ((CompositeConstraint)o).getChildren()) :
+          ((CompositeConstraint)o).getChildren() == null;
+    }
+
+    @Override
+    public int hashCode() {
+      return getChildren() != null ? getChildren().hashCode() : 0;
+    }
   }
 
   /**
@@ -563,5 +690,34 @@ public class PlacementConstraint {
     public <T> T accept(Visitor<T> visitor) {
       return visitor.visit(this);
     }
+
+    @Override
+    public boolean equals(Object o) {
+      if (this == o) {
+        return true;
+      }
+      if (o == null || getClass() != o.getClass()) {
+        return false;
+      }
+
+      TimedPlacementConstraint that = (TimedPlacementConstraint) o;
+
+      if (schedulingDelay != that.schedulingDelay) {
+        return false;
+      }
+      if (constraint != null ? !constraint.equals(that.constraint) :
+          that.constraint != null) {
+        return false;
+      }
+      return delayUnit == that.delayUnit;
+    }
+
+    @Override
+    public int hashCode() {
+      int result = constraint != null ? constraint.hashCode() : 0;
+      result = 31 * result + (int) (schedulingDelay ^ (schedulingDelay >>> 32));
+      result = 31 * result + (delayUnit != null ? delayUnit.hashCode() : 0);
+      return result;
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/db928556/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
index d24f863..fdc39a7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
@@ -649,6 +649,12 @@ message CompositePlacementConstraintProto {
   repeated TimedPlacementConstraintProto timedChildConstraints = 3;
 }
 
+// This associates a set of allocation tags to a Placement Constraint.
+message PlacementConstraintMapEntryProto {
+  repeated string allocation_tags = 1;
+  optional PlacementConstraintProto placement_constraint = 2;
+}
+
 ////////////////////////////////////////////////////////////////////////
 ////// From reservation_protocol /////////////////////////////////////
 ////////////////////////////////////////////////////////////////////////

http://git-wip-us.apache.org/repos/asf/hadoop/blob/db928556/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
index 4e97c74..68e585d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
@@ -38,6 +38,7 @@ message RegisterApplicationMasterRequestProto {
   optional string host = 1;
   optional int32 rpc_port = 2;
   optional string tracking_url = 3;
+  repeated PlacementConstraintMapEntryProto placement_constraints = 4;
 }
 
 message RegisterApplicationMasterResponseProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/db928556/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RegisterApplicationMasterRequestPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RegisterApplicationMasterRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RegisterApplicationMasterRequestPBImpl.java
index 037dfd9..64bee85 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RegisterApplicationMasterRequestPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RegisterApplicationMasterRequestPBImpl.java
@@ -21,24 +21,41 @@ package org.apache.hadoop.yarn.api.protocolrecords.impl.pb;
 
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.pb.PlacementConstraintFromProtoConverter;
+import org.apache.hadoop.yarn.api.pb.PlacementConstraintToProtoConverter;
 import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
+
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
+import org.apache.hadoop.yarn.proto.YarnProtos;
 import org.apache.hadoop.yarn.proto.YarnServiceProtos.RegisterApplicationMasterRequestProto;
 import org.apache.hadoop.yarn.proto.YarnServiceProtos.RegisterApplicationMasterRequestProtoOrBuilder;
 
 import com.google.protobuf.TextFormat;
 
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
 @Private
 @Unstable
-public class RegisterApplicationMasterRequestPBImpl extends RegisterApplicationMasterRequest {
-  RegisterApplicationMasterRequestProto proto = RegisterApplicationMasterRequestProto.getDefaultInstance();
-  RegisterApplicationMasterRequestProto.Builder builder = null;
+public class RegisterApplicationMasterRequestPBImpl
+    extends RegisterApplicationMasterRequest {
+  private RegisterApplicationMasterRequestProto proto =
+      RegisterApplicationMasterRequestProto.getDefaultInstance();
+  private RegisterApplicationMasterRequestProto.Builder builder = null;
+  private Map<Set<String>, PlacementConstraint> placementConstraints = null;
   boolean viaProto = false;
   
   public RegisterApplicationMasterRequestPBImpl() {
     builder = RegisterApplicationMasterRequestProto.newBuilder();
   }
 
-  public RegisterApplicationMasterRequestPBImpl(RegisterApplicationMasterRequestProto proto) {
+  public RegisterApplicationMasterRequestPBImpl(
+      RegisterApplicationMasterRequestProto proto) {
     this.proto = proto;
     viaProto = true;
   }
@@ -71,6 +88,30 @@ public class RegisterApplicationMasterRequestPBImpl extends RegisterApplicationM
   }
 
   private void mergeLocalToBuilder() {
+    if (this.placementConstraints != null) {
+      addPlacementConstraintMap();
+    }
+  }
+
+  private void addPlacementConstraintMap() {
+    maybeInitBuilder();
+    builder.clearPlacementConstraints();
+    if (this.placementConstraints == null) {
+      return;
+    }
+    List<YarnProtos.PlacementConstraintMapEntryProto> protoList =
+        new ArrayList<>();
+    for (Map.Entry<Set<String>, PlacementConstraint> entry :
+        this.placementConstraints.entrySet()) {
+      protoList.add(
+          YarnProtos.PlacementConstraintMapEntryProto.newBuilder()
+              .addAllAllocationTags(entry.getKey())
+              .setPlacementConstraint(
+                  new PlacementConstraintToProtoConverter(
+                      entry.getValue()).convert())
+              .build());
+    }
+    builder.addAllPlacementConstraints(protoList);
   }
 
   private void mergeLocalToProto() {
@@ -90,7 +131,8 @@ public class RegisterApplicationMasterRequestPBImpl extends RegisterApplicationM
 
   @Override
   public String getHost() {
-    RegisterApplicationMasterRequestProtoOrBuilder p = viaProto ? proto : builder;
+    RegisterApplicationMasterRequestProtoOrBuilder p =
+        viaProto ? proto : builder;
     return p.getHost();
   }
 
@@ -106,7 +148,8 @@ public class RegisterApplicationMasterRequestPBImpl extends RegisterApplicationM
 
   @Override
   public int getRpcPort() {
-    RegisterApplicationMasterRequestProtoOrBuilder p = viaProto ? proto : builder;
+    RegisterApplicationMasterRequestProtoOrBuilder p =
+        viaProto ? proto : builder;
     return p.getRpcPort();
   }
 
@@ -118,7 +161,8 @@ public class RegisterApplicationMasterRequestPBImpl extends RegisterApplicationM
 
   @Override
   public String getTrackingUrl() {
-    RegisterApplicationMasterRequestProtoOrBuilder p = viaProto ? proto : builder;
+    RegisterApplicationMasterRequestProtoOrBuilder p =
+        viaProto ? proto : builder;
     return p.getTrackingUrl();
   }
 
@@ -131,4 +175,50 @@ public class RegisterApplicationMasterRequestPBImpl extends RegisterApplicationM
     }
     builder.setTrackingUrl(url);
   }
-}  
+
+  private void initPlacementConstraintMap() {
+    if (this.placementConstraints != null) {
+      return;
+    }
+    RegisterApplicationMasterRequestProtoOrBuilder p =
+        viaProto ? proto : builder;
+    List<YarnProtos.PlacementConstraintMapEntryProto> pcmList =
+        p.getPlacementConstraintsList();
+    this.placementConstraints = new HashMap<>();
+    for (YarnProtos.PlacementConstraintMapEntryProto e : pcmList) {
+      this.placementConstraints.put(
+          new HashSet<>(e.getAllocationTagsList()),
+          new PlacementConstraintFromProtoConverter(
+              e.getPlacementConstraint()).convert());
+    }
+  }
+
+  @Override
+  public Map<Set<String>, PlacementConstraint> getPlacementConstraints() {
+    initPlacementConstraintMap();
+    return this.placementConstraints;
+  }
+
+  @Override
+  public void setPlacementConstraints(
+      Map<Set<String>, PlacementConstraint> constraints) {
+    maybeInitBuilder();
+    if (constraints == null) {
+      builder.clearPlacementConstraints();
+    } else {
+      removeEmptyKeys(constraints);
+    }
+    this.placementConstraints = constraints;
+  }
+
+  private void removeEmptyKeys(
+      Map<Set<String>, PlacementConstraint> constraintMap) {
+    Iterator<Set<String>> iter = constraintMap.keySet().iterator();
+    while (iter.hasNext()) {
+      Set<String> aTags = iter.next();
+      if (aTags.size() == 0) {
+        iter.remove();
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/db928556/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/BasePBImplRecordsTest.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/BasePBImplRecordsTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/BasePBImplRecordsTest.java
index 8694651..ebd66af 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/BasePBImplRecordsTest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/BasePBImplRecordsTest.java
@@ -22,12 +22,19 @@ import com.google.common.collect.Maps;
 import com.google.common.collect.Sets;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraints;
 import org.junit.Assert;
 
 import java.lang.reflect.*;
 import java.nio.ByteBuffer;
 import java.util.*;
 
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.NODE;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints
+    .PlacementTargets.allocationTag;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.targetIn;
+
 /**
  * Generic helper class to validate protocol records.
  */
@@ -85,6 +92,10 @@ public class BasePBImplRecordsTest {
         ByteBuffer buff = ByteBuffer.allocate(4);
         rand.nextBytes(buff.array());
         return buff;
+      } else if (type.equals(PlacementConstraint.class)) {
+        PlacementConstraint.AbstractConstraint sConstraintExpr =
+            targetIn(NODE, allocationTag("foo"));
+        ret = PlacementConstraints.build(sConstraintExpr);
       }
     } else if (type instanceof ParameterizedType) {
       ParameterizedType pt = (ParameterizedType)type;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[10/32] hadoop git commit: YARN-7670. Modifications to the ResourceScheduler API to support SchedulingRequests. (asuresh)

Posted by as...@apache.org.
YARN-7670. Modifications to the ResourceScheduler API to support SchedulingRequests. (asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/88d8d3f4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/88d8d3f4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/88d8d3f4

Branch: refs/heads/trunk
Commit: 88d8d3f40b2923fab23a933bce1cd2e9c320ae84
Parents: 801c098
Author: Arun Suresh <as...@apache.org>
Authored: Tue Dec 19 08:59:23 2017 -0800
Committer: Arun Suresh <as...@apache.org>
Committed: Wed Jan 31 01:30:17 2018 -0800

----------------------------------------------------------------------
 .../scheduler/AbstractYarnScheduler.java        | 18 +++++
 .../scheduler/ResourceScheduler.java            | 13 ++++
 .../scheduler/capacity/CapacityScheduler.java   | 78 ++++++++++++++++++--
 .../common/ResourceAllocationCommitter.java     | 12 ++-
 .../scheduler/common/fica/FiCaSchedulerApp.java | 30 +++++---
 .../TestCapacitySchedulerAsyncScheduling.java   | 10 +--
 6 files changed, 138 insertions(+), 23 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/88d8d3f4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
index 4b76327..213d784 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
@@ -53,6 +53,7 @@ import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceOption;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
 import org.apache.hadoop.yarn.api.records.UpdateContainerError;
 import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -295,6 +296,10 @@ public abstract class AbstractYarnScheduler
     return nodeTracker.getNodes(nodeFilter);
   }
 
+  public List<N> getNodes(final NodeFilter filter) {
+    return nodeTracker.getNodes(filter);
+  }
+
   public boolean shouldContainersBeAutoUpdated() {
     return this.autoUpdateContainers;
   }
@@ -1443,4 +1448,17 @@ public abstract class AbstractYarnScheduler
       throw new IOException(e);
     }
   }
+
+  /**
+   * Default implementation. Always returns false.
+   * @param appAttempt ApplicationAttempt.
+   * @param schedulingRequest SchedulingRequest.
+   * @param schedulerNode SchedulerNode.
+   * @return Success or not.
+   */
+  @Override
+  public boolean attemptAllocationOnNode(SchedulerApplicationAttempt appAttempt,
+      SchedulingRequest schedulingRequest, SchedulerNode schedulerNode) {
+    return false;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88d8d3f4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceScheduler.java
index d96d625..5a56ac7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceScheduler.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.Recoverable;
 
@@ -58,4 +59,16 @@ public interface ResourceScheduler extends YarnScheduler, Recoverable {
    * @return the number of available {@link NodeId} by resource name.
    */
   List<NodeId> getNodeIds(String resourceName);
+
+  /**
+   * Attempts to allocate a SchedulerRequest on a Node.
+   * NOTE: This ignores the numAllocations in the resource sizing and tries
+   *       to allocate a SINGLE container only.
+   * @param appAttempt ApplicationAttempt.
+   * @param schedulingRequest SchedulingRequest.
+   * @param schedulerNode SchedulerNode.
+   * @return true if proposal was accepted.
+   */
+  boolean attemptAllocationOnNode(SchedulerApplicationAttempt appAttempt,
+      SchedulingRequest schedulingRequest, SchedulerNode schedulerNode);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88d8d3f4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index 03ca507..676c0fe 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -50,6 +50,7 @@ import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.ExecutionType;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.QueueACL;
@@ -59,6 +60,7 @@ import org.apache.hadoop.yarn.api.records.ReservationId;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceOption;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
@@ -82,6 +84,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptE
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEventType;
+import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerState;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler;
@@ -99,7 +102,9 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceUsage;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplication;
 
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerDynamicEditException;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivitiesLogger;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivitiesManager;
@@ -141,6 +146,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.Candida
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.SimpleCandidateNodeSet;
 import org.apache.hadoop.yarn.server.resourcemanager.security.AppPriorityACLsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager;
+import org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey;
+import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.apache.hadoop.yarn.server.utils.Lock;
 import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
@@ -596,7 +603,7 @@ public class CapacityScheduler extends
 
           try {
             cs.writeLock.lock();
-            cs.tryCommit(cs.getClusterResource(), request);
+            cs.tryCommit(cs.getClusterResource(), request, true);
           } finally {
             cs.writeLock.unlock();
           }
@@ -2551,10 +2558,67 @@ public class CapacityScheduler extends
       resourceCommitterService.addNewCommitRequest(request);
     } else{
       // Otherwise do it sync-ly.
-      tryCommit(cluster, request);
+      tryCommit(cluster, request, true);
     }
   }
 
+  @Override
+  public boolean attemptAllocationOnNode(SchedulerApplicationAttempt appAttempt,
+      SchedulingRequest schedulingRequest, SchedulerNode schedulerNode) {
+    if (schedulingRequest.getResourceSizing() != null) {
+      if (schedulingRequest.getResourceSizing().getNumAllocations() > 1) {
+        LOG.warn("The SchedulingRequest has requested more than 1 allocation," +
+            " but only 1 will be attempted !!");
+      }
+      if (!appAttempt.isStopped()) {
+        ResourceCommitRequest<FiCaSchedulerApp, FiCaSchedulerNode>
+            resourceCommitRequest = createResourceCommitRequest(
+            appAttempt, schedulingRequest, schedulerNode);
+        return tryCommit(getClusterResource(), resourceCommitRequest, false);
+      }
+    }
+    return false;
+  }
+
+  // This assumes numContainers = 1 for the request.
+  private ResourceCommitRequest<FiCaSchedulerApp, FiCaSchedulerNode>
+      createResourceCommitRequest(SchedulerApplicationAttempt appAttempt,
+      SchedulingRequest schedulingRequest, SchedulerNode schedulerNode) {
+    ContainerAllocationProposal<FiCaSchedulerApp, FiCaSchedulerNode> allocated =
+        null;
+    Resource resource = schedulingRequest.getResourceSizing().getResources();
+    if (Resources.greaterThan(calculator, getClusterResource(),
+        resource, Resources.none())) {
+      ContainerId cId =
+          ContainerId.newContainerId(appAttempt.getApplicationAttemptId(),
+              appAttempt.getAppSchedulingInfo().getNewContainerId());
+      Container container = BuilderUtils.newContainer(
+          cId, schedulerNode.getNodeID(), schedulerNode.getHttpAddress(),
+          resource, schedulingRequest.getPriority(), null,
+          ExecutionType.GUARANTEED,
+          schedulingRequest.getAllocationRequestId());
+      RMContainer rmContainer = new RMContainerImpl(container,
+          SchedulerRequestKey.extractFrom(container),
+          appAttempt.getApplicationAttemptId(), container.getNodeId(),
+          appAttempt.getUser(), rmContext, false);
+
+      allocated = new ContainerAllocationProposal<>(
+          getSchedulerContainer(rmContainer, true),
+          null, null, NodeType.NODE_LOCAL, NodeType.NODE_LOCAL,
+          SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY,
+          resource);
+    }
+
+    if (null != allocated) {
+      List<ContainerAllocationProposal<FiCaSchedulerApp, FiCaSchedulerNode>>
+          allocationsList = new ArrayList<>();
+      allocationsList.add(allocated);
+
+      return new ResourceCommitRequest<>(allocationsList, null, null);
+    }
+    return null;
+  }
+
   @VisibleForTesting
   public ResourceCommitRequest<FiCaSchedulerApp, FiCaSchedulerNode>
       createResourceCommitRequest(CSAssignment csAssignment) {
@@ -2632,7 +2696,8 @@ public class CapacityScheduler extends
   }
 
   @Override
-  public void tryCommit(Resource cluster, ResourceCommitRequest r) {
+  public boolean tryCommit(Resource cluster, ResourceCommitRequest r,
+      boolean updatePending) {
     ResourceCommitRequest<FiCaSchedulerApp, FiCaSchedulerNode> request =
         (ResourceCommitRequest<FiCaSchedulerApp, FiCaSchedulerNode>) r;
 
@@ -2662,15 +2727,17 @@ public class CapacityScheduler extends
       LOG.debug("Try to commit allocation proposal=" + request);
     }
 
+    boolean isSuccess = false;
     if (attemptId != null) {
       FiCaSchedulerApp app = getApplicationAttempt(attemptId);
       // Required sanity check for attemptId - when async-scheduling enabled,
       // proposal might be outdated if AM failover just finished
       // and proposal queue was not be consumed in time
       if (app != null && attemptId.equals(app.getApplicationAttemptId())) {
-        if (app.accept(cluster, request)) {
-          app.apply(cluster, request);
+        if (app.accept(cluster, request, updatePending)) {
+          app.apply(cluster, request, updatePending);
           LOG.info("Allocation proposal accepted");
+          isSuccess = true;
         } else{
           LOG.info("Failed to accept allocation proposal");
         }
@@ -2681,6 +2748,7 @@ public class CapacityScheduler extends
         }
       }
     }
+    return isSuccess;
   }
 
   public int getAsyncSchedulingPendingBacklogs() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88d8d3f4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/ResourceAllocationCommitter.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/ResourceAllocationCommitter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/ResourceAllocationCommitter.java
index bdea97d..2e36b2e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/ResourceAllocationCommitter.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/ResourceAllocationCommitter.java
@@ -25,5 +25,15 @@ import org.apache.hadoop.yarn.api.records.Resource;
  * plus global scheduling functionality
  */
 public interface ResourceAllocationCommitter {
-  void tryCommit(Resource cluster, ResourceCommitRequest proposal);
+
+  /**
+   * Try to commit the allocation Proposal. This also gives the option of
+   * not updating a pending queued request.
+   * @param cluster Cluster Resource.
+   * @param proposal Proposal.
+   * @param updatePending Decrement pending if successful.
+   * @return Is successful or not.
+   */
+  boolean tryCommit(Resource cluster, ResourceCommitRequest proposal,
+      boolean updatePending);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88d8d3f4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
index d6ad292..4ea0347 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
@@ -375,7 +375,8 @@ public class FiCaSchedulerApp extends SchedulerApplicationAttempt {
   }
 
   public boolean accept(Resource cluster,
-      ResourceCommitRequest<FiCaSchedulerApp, FiCaSchedulerNode> request) {
+      ResourceCommitRequest<FiCaSchedulerApp, FiCaSchedulerNode> request,
+      boolean checkPending) {
     ContainerRequest containerRequest = null;
     boolean reReservation = false;
 
@@ -408,9 +409,11 @@ public class FiCaSchedulerApp extends SchedulerApplicationAttempt {
               schedulerContainer.getRmContainer().getContainerRequest();
 
           // Check pending resource request
-          if (!appSchedulingInfo.checkAllocation(allocation.getAllocationLocalityType(),
-              schedulerContainer.getSchedulerNode(),
-              schedulerContainer.getSchedulerRequestKey())) {
+          if (checkPending &&
+              !appSchedulingInfo.checkAllocation(
+                  allocation.getAllocationLocalityType(),
+                  schedulerContainer.getSchedulerNode(),
+                  schedulerContainer.getSchedulerRequestKey())) {
             if (LOG.isDebugEnabled()) {
               LOG.debug("No pending resource for: nodeType=" + allocation
                   .getAllocationLocalityType() + ", node=" + schedulerContainer
@@ -485,8 +488,8 @@ public class FiCaSchedulerApp extends SchedulerApplicationAttempt {
     return accepted;
   }
 
-  public void apply(Resource cluster,
-      ResourceCommitRequest<FiCaSchedulerApp, FiCaSchedulerNode> request) {
+  public void apply(Resource cluster, ResourceCommitRequest<FiCaSchedulerApp,
+      FiCaSchedulerNode> request, boolean updatePending) {
     boolean reReservation = false;
 
     try {
@@ -531,12 +534,15 @@ public class FiCaSchedulerApp extends SchedulerApplicationAttempt {
           liveContainers.put(containerId, rmContainer);
 
           // Deduct pending resource requests
-          ContainerRequest containerRequest = appSchedulingInfo.allocate(
-              allocation.getAllocationLocalityType(),
-              schedulerContainer.getSchedulerNode(),
-              schedulerContainer.getSchedulerRequestKey(),
-              schedulerContainer.getRmContainer().getContainer());
-          ((RMContainerImpl) rmContainer).setContainerRequest(containerRequest);
+          if (updatePending) {
+            ContainerRequest containerRequest = appSchedulingInfo.allocate(
+                allocation.getAllocationLocalityType(),
+                schedulerContainer.getSchedulerNode(),
+                schedulerContainer.getSchedulerRequestKey(),
+                schedulerContainer.getRmContainer().getContainer());
+            ((RMContainerImpl) rmContainer).setContainerRequest(
+                containerRequest);
+          }
 
           attemptResourceUsage.incUsed(schedulerContainer.getNodePartition(),
               allocation.getAllocatedOrReservedResource());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88d8d3f4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java
index 548b909..eddf8c8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java
@@ -264,7 +264,7 @@ public class TestCapacitySchedulerAsyncScheduling {
     reservedProposals.add(reservedForAttempt1Proposal);
     ResourceCommitRequest request =
         new ResourceCommitRequest(null, reservedProposals, null);
-    scheduler.tryCommit(scheduler.getClusterResource(), request);
+    scheduler.tryCommit(scheduler.getClusterResource(), request, true);
     Assert.assertNull("Outdated proposal should not be accepted!",
         sn2.getReservedContainer());
 
@@ -385,7 +385,7 @@ public class TestCapacitySchedulerAsyncScheduling {
           // call real apply
           try {
             cs.tryCommit((Resource) invocation.getArguments()[0],
-                (ResourceCommitRequest) invocation.getArguments()[1]);
+                (ResourceCommitRequest) invocation.getArguments()[1], true);
           } catch (Exception e) {
             e.printStackTrace();
             Assert.fail();
@@ -393,12 +393,12 @@ public class TestCapacitySchedulerAsyncScheduling {
           isChecked.set(true);
         } else {
           cs.tryCommit((Resource) invocation.getArguments()[0],
-              (ResourceCommitRequest) invocation.getArguments()[1]);
+              (ResourceCommitRequest) invocation.getArguments()[1], true);
         }
         return null;
       }
     }).when(spyCs).tryCommit(Mockito.any(Resource.class),
-        Mockito.any(ResourceCommitRequest.class));
+        Mockito.any(ResourceCommitRequest.class), Mockito.anyBoolean());
 
     spyCs.handle(new NodeUpdateSchedulerEvent(sn1.getRMNode()));
 
@@ -473,7 +473,7 @@ public class TestCapacitySchedulerAsyncScheduling {
       newProposals.add(newContainerProposal);
       ResourceCommitRequest request =
           new ResourceCommitRequest(newProposals, null, null);
-      scheduler.tryCommit(scheduler.getClusterResource(), request);
+      scheduler.tryCommit(scheduler.getClusterResource(), request, true);
     }
     // make sure node resource can't be over-allocated!
     Assert.assertTrue("Node resource is Over-allocated!",


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[31/32] hadoop git commit: YARN-7795. Fix jenkins issues of YARN-6592 branch. (Sunil G via asuresh)

Posted by as...@apache.org.
YARN-7795. Fix jenkins issues of YARN-6592 branch. (Sunil G via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c23980c4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c23980c4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c23980c4

Branch: refs/heads/trunk
Commit: c23980c4f2cf4c751a99fd310e60149cb32ea7c7
Parents: 644afe5
Author: Arun Suresh <as...@apache.org>
Authored: Wed Jan 24 14:18:32 2018 -0800
Committer: Arun Suresh <as...@apache.org>
Committed: Wed Jan 31 01:30:17 2018 -0800

----------------------------------------------------------------------
 .../hadoop/yarn/api/protocolrecords/AllocateRequest.java     | 4 ++--
 .../apache/hadoop/yarn/api/records/SchedulingRequest.java    | 8 ++++----
 .../apache/hadoop/yarn/api/resource/PlacementConstraint.java | 3 +++
 .../hadoop/yarn/api/resource/PlacementConstraints.java       | 3 ---
 .../yarn/api/records/impl/pb/ResourceSizingPBImpl.java       | 3 +++
 .../yarn/api/records/impl/pb/SchedulingRequestPBImpl.java    | 3 +++
 .../resourcemanager/scheduler/AbstractYarnScheduler.java     | 1 -
 .../server/resourcemanager/scheduler/AppSchedulingInfo.java  | 3 +--
 .../yarn/server/resourcemanager/scheduler/SchedulerNode.java | 8 ++++++--
 .../scheduler/capacity/CapacityScheduler.java                | 3 +--
 .../scheduler/constraint/AllocationTagsManager.java          | 8 +++++---
 .../scheduler/constraint/PlacementConstraintsUtil.java       | 4 ++--
 .../scheduler/placement/AppPlacementAllocator.java           | 4 ++--
 .../placement/SingleConstraintAppPlacementAllocator.java     | 1 -
 .../scheduler/capacity/TestCapacityScheduler.java            | 1 -
 .../TestCapacitySchedulerSchedulingRequestUpdate.java        | 4 +++-
 .../capacity/TestSchedulingRequestContainerAllocation.java   | 8 --------
 .../TestSchedulingRequestContainerAllocationAsync.java       | 1 -
 18 files changed, 35 insertions(+), 35 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c23980c4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java
index d8d2347..876957e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java
@@ -229,8 +229,8 @@ public abstract class AllocateRequest {
   /**
    * Set the list of Scheduling requests to inform the
    * <code>ResourceManager</code> about the application's resource requirements
-   * (potentially including allocation tags & placement constraints).
-   * @param schedulingRequests list of <code>SchedulingRequest</code> to update
+   * (potentially including allocation tags and placement constraints).
+   * @param schedulingRequests list of {@link SchedulingRequest} to update
    *          the <code>ResourceManager</code> about the application's resource
    *          requirements.
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c23980c4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/SchedulingRequest.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/SchedulingRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/SchedulingRequest.java
index e32dd24..4bb2b84 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/SchedulingRequest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/SchedulingRequest.java
@@ -77,7 +77,7 @@ public abstract class SchedulingRequest {
 
     /**
      * Set the <code>allocationRequestId</code> of the request.
-     * 
+     *
      * @see SchedulingRequest#setAllocationRequestId(long)
      * @param allocationRequestId <code>allocationRequestId</code> of the
      *          request
@@ -107,7 +107,7 @@ public abstract class SchedulingRequest {
 
     /**
      * Set the <code>executionType</code> of the request.
-     * 
+     *
      * @see SchedulingRequest#setExecutionType(ExecutionTypeRequest)
      * @param executionType <code>executionType</code> of the request
      * @return {@link SchedulingRequest.SchedulingRequestBuilder}
@@ -119,7 +119,7 @@ public abstract class SchedulingRequest {
       schedulingRequest.setExecutionType(executionType);
       return this;
     }
-    
+
     /**
      * Set the <code>allocationTags</code> of the request.
      *
@@ -169,7 +169,7 @@ public abstract class SchedulingRequest {
 
     /**
      * Return generated {@link SchedulingRequest} object.
-     * 
+     *
      * @return {@link SchedulingRequest}
      */
     @Public

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c23980c4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java
index 4d998ac..c054cbc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java
@@ -341,6 +341,9 @@ public class PlacementConstraint {
    * {@link TargetOperator} used.
    */
   public static class TargetConstraint extends AbstractConstraint {
+    /**
+     * TargetOperator enum helps to specify type.
+     */
     enum TargetOperator {
       IN, NOT_IN
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c23980c4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraints.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraints.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraints.java
index ba1beae..70a8080 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraints.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraints.java
@@ -20,12 +20,9 @@ package org.apache.hadoop.yarn.api.resource;
 
 import java.util.concurrent.TimeUnit;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.resource.PlacementConstraint.AbstractConstraint;
 import org.apache.hadoop.yarn.api.resource.PlacementConstraint.And;
 import org.apache.hadoop.yarn.api.resource.PlacementConstraint.DelayedOr;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c23980c4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceSizingPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceSizingPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceSizingPBImpl.java
index 4054837..1363942 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceSizingPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceSizingPBImpl.java
@@ -26,6 +26,9 @@ import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ResourceSizingProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ResourceSizingProtoOrBuilder;
 
+/**
+ * Proto Implementation for {@link ResourceSizing} interface.
+ */
 @Private
 @Unstable
 public class ResourceSizingPBImpl extends ResourceSizing {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c23980c4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/SchedulingRequestPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/SchedulingRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/SchedulingRequestPBImpl.java
index 1f86043..11f75bb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/SchedulingRequestPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/SchedulingRequestPBImpl.java
@@ -37,6 +37,9 @@ import org.apache.hadoop.yarn.proto.YarnProtos.ResourceSizingProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.SchedulingRequestProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.SchedulingRequestProtoOrBuilder;
 
+/**
+ * Proto implementation for {@link SchedulingRequest} interface.
+ */
 @Private
 @Unstable
 public class SchedulingRequestPBImpl extends SchedulingRequest {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c23980c4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
index 7f81f00..e76287d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
@@ -53,7 +53,6 @@ import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceOption;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
-import org.apache.hadoop.yarn.api.records.ResourceSizing;
 import org.apache.hadoop.yarn.api.records.SchedulingRequest;
 import org.apache.hadoop.yarn.api.records.UpdateContainerError;
 import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c23980c4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
index 7d6f233..0389895 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
@@ -51,7 +51,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.Applicatio
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.ContainerRequest;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.PendingAsk;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.AppPlacementAllocator;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.LocalityAppPlacementAllocator;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.PendingAskUpdateResult;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.SingleConstraintAppPlacementAllocator;
 import org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey;
@@ -739,7 +738,7 @@ public class AppSchedulingInfo {
 
   /**
    * Pre-check node to see if it satisfy the given schedulerKey and
-   * scheduler mode
+   * scheduler mode.
    *
    * @param schedulerKey schedulerKey
    * @param schedulerNode schedulerNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c23980c4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java
index 96a8e34..d5bfc57 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java
@@ -471,8 +471,12 @@ public abstract class SchedulerNode {
 
   @Override
   public boolean equals(Object o) {
-    if (this == o) return true;
-    if (!(o instanceof SchedulerNode)) return false;
+    if (this == o) {
+      return true;
+    }
+    if (!(o instanceof SchedulerNode)) {
+      return false;
+    }
 
     SchedulerNode that = (SchedulerNode) o;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c23980c4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index a096e2f..cb01351 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -63,7 +63,6 @@ import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.api.records.ResourceSizing;
 import org.apache.hadoop.yarn.api.records.SchedulingRequest;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.exceptions.InvalidResourceRequestException;
 import org.apache.hadoop.yarn.exceptions.SchedulerInvalidResoureRequestException;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
@@ -1062,7 +1061,7 @@ public class CapacityScheduler extends
   }
 
   /**
-   * Normalize a list of SchedulingRequest
+   * Normalize a list of SchedulingRequest.
    *
    * @param asks scheduling request
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c23980c4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
index 42a78c9..8ef9999 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
@@ -66,7 +66,7 @@ public class AllocationTagsManager {
   private TypeToCountedTags<String> globalRackMapping = new TypeToCountedTags();
 
   /**
-   * Generic store mapping type <T> to counted tags.
+   * Generic store mapping type T to counted tags.
    * Currently used both for NodeId to Tag, Count and Rack to Tag, Count
    */
   @VisibleForTesting
@@ -467,7 +467,8 @@ public class AllocationTagsManager {
    *                      specified, all tags (of the node/app) will be
    *                      considered.
    * @param op            operator. Such as Long::max, Long::sum, etc. Required.
-   *                      This parameter only take effect when #values >= 2.
+   *                      This parameter only take effect when #values greater
+   *                      than 2.
    * @return cardinality of specified query on the node.
    * @throws InvalidAllocationTagsQueryException when illegal query
    *                                            parameter specified
@@ -515,7 +516,8 @@ public class AllocationTagsManager {
    *                      specified, all tags (of the rack/app) will be
    *                      considered.
    * @param op            operator. Such as Long::max, Long::sum, etc. Required.
-   *                      This parameter only take effect when #values >= 2.
+   *                      This parameter only take effect when #values
+   *                      greater than 2.
    * @return cardinality of specified query on the rack.
    * @throws InvalidAllocationTagsQueryException when illegal query
    *                                            parameter specified

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c23980c4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java
index c07c16f..199dd62 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java
@@ -74,7 +74,7 @@ public final class PlacementConstraintsUtil {
       throws InvalidAllocationTagsQueryException {
     long minScopeCardinality = 0;
     long maxScopeCardinality = 0;
-    
+
     // Optimizations to only check cardinality if necessary.
     int desiredMinCardinality = sc.getMinCardinality();
     int desiredMaxCardinality = sc.getMaxCardinality();
@@ -179,7 +179,7 @@ public final class PlacementConstraintsUtil {
    * first validates the constraint specified in the request; if not specified,
    * then it validates application level constraint if exists; otherwise, it
    * validates the global constraint if exists.
-   * <p/>
+   *
    * This method only checks whether a scheduling request can be placed
    * on a node with respect to the certain placement constraint. It gives no
    * guarantee that asked allocations can be eventually allocated because

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c23980c4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/AppPlacementAllocator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/AppPlacementAllocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/AppPlacementAllocator.java
index 72a6c4c..df58157 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/AppPlacementAllocator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/AppPlacementAllocator.java
@@ -57,7 +57,7 @@ public abstract class AppPlacementAllocator<N extends SchedulerNode> {
   protected RMContext rmContext;
 
   /**
-   * Get iterator of preferred node depends on requirement and/or availability
+   * Get iterator of preferred node depends on requirement and/or availability.
    * @param candidateNodeSet input CandidateNodeSet
    * @return iterator of preferred node
    */
@@ -180,7 +180,7 @@ public abstract class AppPlacementAllocator<N extends SchedulerNode> {
   public abstract void showRequests();
 
   /**
-   * Initialize this allocator, this will be called by Factory automatically
+   * Initialize this allocator, this will be called by Factory automatically.
    *
    * @param appSchedulingInfo appSchedulingInfo
    * @param schedulerRequestKey schedulerRequestKey

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c23980c4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/SingleConstraintAppPlacementAllocator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/SingleConstraintAppPlacementAllocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/SingleConstraintAppPlacementAllocator.java
index b02cb00..a04816b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/SingleConstraintAppPlacementAllocator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/SingleConstraintAppPlacementAllocator.java
@@ -23,7 +23,6 @@ import org.apache.commons.collections.IteratorUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.util.StringUtils;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ExecutionType;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.api.records.ResourceSizing;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c23980c4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
index 79898bb..7764ac8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
@@ -37,7 +37,6 @@ import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
-import java.util.Set;
 import java.util.concurrent.BrokenBarrierException;
 import java.util.concurrent.CyclicBarrier;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c23980c4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerSchedulingRequestUpdate.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerSchedulingRequestUpdate.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerSchedulingRequestUpdate.java
index b6ac4b6..484d780 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerSchedulingRequestUpdate.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerSchedulingRequestUpdate.java
@@ -26,7 +26,6 @@ import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.api.records.ResourceSizing;
-import org.apache.hadoop.yarn.api.resource.PlacementConstraints;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.MockAM;
 import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
@@ -41,6 +40,9 @@ import org.junit.Test;
 
 import java.util.Arrays;
 
+/**
+ * Test class for verifying Scheduling requests in CS.
+ */
 public class TestCapacitySchedulerSchedulingRequestUpdate
     extends CapacitySchedulerTestBase {
   @Test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c23980c4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestSchedulingRequestContainerAllocation.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestSchedulingRequestContainerAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestSchedulingRequestContainerAllocation.java
index 0a44a1e..b297f79 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestSchedulingRequestContainerAllocation.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestSchedulingRequestContainerAllocation.java
@@ -20,27 +20,19 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
 
 import com.google.common.collect.ImmutableSet;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceSizing;
-import org.apache.hadoop.yarn.api.resource.PlacementConstraints;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.MockAM;
 import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
 import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
-import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.NullRMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
-import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
-import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerAppReport;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptRemovedSchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent;
 import org.junit.Assert;
 import org.junit.Before;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c23980c4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestSchedulingRequestContainerAllocationAsync.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestSchedulingRequestContainerAllocationAsync.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestSchedulingRequestContainerAllocationAsync.java
index c7f13cd..fc1cb0d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestSchedulingRequestContainerAllocationAsync.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestSchedulingRequestContainerAllocationAsync.java
@@ -23,7 +23,6 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceSizing;
-import org.apache.hadoop.yarn.api.resource.PlacementConstraints;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.MockAM;
 import org.apache.hadoop.yarn.server.resourcemanager.MockNM;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[27/32] hadoop git commit: YARN-7788. Factor out management of temp tags from AllocationTagsManager. (Arun Suresh via kkaranasos)

Posted by as...@apache.org.
YARN-7788. Factor out management of temp tags from AllocationTagsManager. (Arun Suresh via kkaranasos)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/adbe87ab
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/adbe87ab
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/adbe87ab

Branch: refs/heads/trunk
Commit: adbe87abf8b2814e0e2988d09ef8a8569190c80e
Parents: 8bf7c44
Author: Konstantinos Karanasos <kk...@apache.org>
Authored: Mon Jan 22 23:51:02 2018 -0800
Committer: Arun Suresh <as...@apache.org>
Committed: Wed Jan 31 01:30:17 2018 -0800

----------------------------------------------------------------------
 .../constraint/AllocationTagsManager.java       | 110 +++---------
 .../algorithm/DefaultPlacementAlgorithm.java    |   8 +-
 .../algorithm/LocalAllocationTagsManager.java   | 167 +++++++++++++++++++
 .../constraint/TestAllocationTagsManager.java   |  82 ---------
 .../TestLocalAllocationTagsManager.java         | 139 +++++++++++++++
 5 files changed, 336 insertions(+), 170 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/adbe87ab/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
index 962e548..7ad5e8c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
@@ -24,17 +24,14 @@ import com.google.common.annotations.VisibleForTesting;
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.SchedulingRequest;
-import org.apache.hadoop.yarn.api.resource.PlacementConstraints;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.log4j.Logger;
 
 import java.util.HashMap;
-import java.util.HashSet;
 import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
@@ -61,9 +58,6 @@ public class AllocationTagsManager {
   // Application's tags to Rack
   private Map<ApplicationId, TypeToCountedTags> perAppRackMappings =
       new HashMap<>();
-  // Application's Temporary containers mapping
-  private Map<ApplicationId, Map<NodeId, Map<ContainerId, Set<String>>>>
-      appTempMappings = new HashMap<>();
 
   // Global tags to node mapping (used to fast return aggregated tags
   // cardinality across apps)
@@ -76,7 +70,7 @@ public class AllocationTagsManager {
    * Currently used both for NodeId to Tag, Count and Rack to Tag, Count
    */
   @VisibleForTesting
-  static class TypeToCountedTags<T> {
+  public static class TypeToCountedTags<T> {
     // Map<Type, Map<Tag, Count>>
     private Map<T, Map<String, Long>> typeToTagsWithCount = new HashMap<>();
 
@@ -214,7 +208,7 @@ public class AllocationTagsManager {
   }
 
   @VisibleForTesting
-  Map<ApplicationId, TypeToCountedTags> getPerAppNodeMappings() {
+  public Map<ApplicationId, TypeToCountedTags> getPerAppNodeMappings() {
     return perAppNodeMappings;
   }
 
@@ -233,12 +227,6 @@ public class AllocationTagsManager {
     return globalRackMapping;
   }
 
-  @VisibleForTesting
-  public Map<NodeId, Map<ContainerId, Set<String>>> getAppTempMappings(
-      ApplicationId applicationId) {
-    return appTempMappings.get(applicationId);
-  }
-
   public AllocationTagsManager(RMContext context) {
     ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
     readLock = lock.readLock();
@@ -246,39 +234,6 @@ public class AllocationTagsManager {
     rmContext = context;
   }
 
-  //
-
-  /**
-   * Method adds a temporary fake-container tag to Node mapping.
-   * Used by the constrained placement algorithm to keep track of containers
-   * that are currently placed on nodes but are not yet allocated.
-   * @param nodeId
-   * @param applicationId
-   * @param allocationTags
-   */
-  public void addTempContainer(NodeId nodeId, ApplicationId applicationId,
-      Set<String> allocationTags) {
-    ContainerId tmpContainer = ContainerId.newContainerId(
-        ApplicationAttemptId.newInstance(applicationId, 1), System.nanoTime());
-
-    writeLock.lock();
-    try {
-      Map<NodeId, Map<ContainerId, Set<String>>> appTempMapping =
-          appTempMappings.computeIfAbsent(applicationId, k -> new HashMap<>());
-      Map<ContainerId, Set<String>> containerTempMapping =
-          appTempMapping.computeIfAbsent(nodeId, k -> new HashMap<>());
-      containerTempMapping.put(tmpContainer, allocationTags);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Added TEMP container=" + tmpContainer + " with tags=["
-            + StringUtils.join(allocationTags, ",") + "]");
-      }
-    } finally {
-      writeLock.unlock();
-    }
-
-    addContainer(nodeId, tmpContainer, allocationTags);
-  }
-
   /**
    * Notify container allocated on a node.
    *
@@ -297,6 +252,15 @@ public class AllocationTagsManager {
     }
     ApplicationId applicationId =
         containerId.getApplicationAttemptId().getApplicationId();
+    addTags(nodeId, applicationId, allocationTags);
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Added container=" + containerId + " with tags=["
+          + StringUtils.join(allocationTags, ",") + "]");
+    }
+  }
+
+  public void addTags(NodeId nodeId, ApplicationId applicationId,
+      Set<String> allocationTags) {
     writeLock.lock();
     try {
       TypeToCountedTags perAppTagsMapping = perAppNodeMappings
@@ -312,11 +276,6 @@ public class AllocationTagsManager {
       perAppRackTagsMapping.addTags(nodeRack, allocationTags);
       globalNodeMapping.addTags(nodeId, allocationTags);
       globalRackMapping.addTags(nodeRack, allocationTags);
-
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Added container=" + containerId + " with tags=["
-            + StringUtils.join(allocationTags, ",") + "]");
-      }
     } finally {
       writeLock.unlock();
     }
@@ -339,6 +298,21 @@ public class AllocationTagsManager {
     ApplicationId applicationId =
         containerId.getApplicationAttemptId().getApplicationId();
 
+    removeTags(nodeId, applicationId, allocationTags);
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Removed container=" + containerId + " with tags=["
+          + StringUtils.join(allocationTags, ",") + "]");
+    }
+  }
+
+  /**
+   * Helper method to just remove the tags associated with a container.
+   * @param nodeId
+   * @param applicationId
+   * @param allocationTags
+   */
+  public void removeTags(NodeId nodeId, ApplicationId applicationId,
+      Set<String> allocationTags) {
     writeLock.lock();
     try {
       TypeToCountedTags perAppTagsMapping =
@@ -364,43 +338,11 @@ public class AllocationTagsManager {
       if (perAppRackTagsMapping.isEmpty()) {
         perAppRackMappings.remove(applicationId);
       }
-
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Removed container=" + containerId + " with tags=["
-            + StringUtils.join(allocationTags, ",") + "]");
-      }
     } finally {
       writeLock.unlock();
     }
   }
 
-  /**
-   * Method removes temporary containers associated with an application
-   * Used by the placement algorithm to clean temporary tags at the end of
-   * a placement cycle.
-   * @param applicationId Application Id.
-   */
-  public void cleanTempContainers(ApplicationId applicationId) {
-
-    if (!appTempMappings.get(applicationId).isEmpty()) {
-      appTempMappings.get(applicationId).entrySet().stream().forEach(nodeE -> {
-        nodeE.getValue().entrySet().stream().forEach(containerE -> {
-          removeContainer(nodeE.getKey(), containerE.getKey(),
-              containerE.getValue());
-        });
-      });
-      writeLock.lock();
-      try {
-        appTempMappings.remove(applicationId);
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Removed TEMP containers of app=" + applicationId);
-        }
-      } finally {
-        writeLock.unlock();
-      }
-    }
-  }
-
 
   /**
    * Get Node cardinality for a specific tag.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/adbe87ab/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/DefaultPlacementAlgorithm.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/DefaultPlacementAlgorithm.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/DefaultPlacementAlgorithm.java
index cf2ed15..9887749 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/DefaultPlacementAlgorithm.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/DefaultPlacementAlgorithm.java
@@ -26,7 +26,6 @@ import org.apache.hadoop.yarn.api.records.SchedulingRequest;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.AllocationTagsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.InvalidAllocationTagsQueryException;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.PlacementConstraintManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.PlacementConstraintsUtil;
@@ -53,13 +52,14 @@ public class DefaultPlacementAlgorithm implements ConstraintPlacementAlgorithm {
   // Number of times to re-attempt placing a single scheduling request.
   private static final int RE_ATTEMPT_COUNT = 2;
 
-  private AllocationTagsManager tagsManager;
+  private LocalAllocationTagsManager tagsManager;
   private PlacementConstraintManager constraintManager;
   private NodeCandidateSelector nodeSelector;
 
   @Override
   public void init(RMContext rmContext) {
-    this.tagsManager = rmContext.getAllocationTagsManager();
+    this.tagsManager = new LocalAllocationTagsManager(
+        rmContext.getAllocationTagsManager());
     this.constraintManager = rmContext.getPlacementConstraintManager();
     this.nodeSelector =
         filter -> ((AbstractYarnScheduler) (rmContext).getScheduler())
@@ -143,7 +143,7 @@ public class DefaultPlacementAlgorithm implements ConstraintPlacementAlgorithm {
             numAllocs =
                 schedulingRequest.getResourceSizing().getNumAllocations();
             // Add temp-container tags for current placement cycle
-            this.tagsManager.addTempContainer(node.getNodeID(),
+            this.tagsManager.addTempTags(node.getNodeID(),
                 requests.getApplicationId(),
                 schedulingRequest.getAllocationTags());
             lastSatisfiedNode = node;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/adbe87ab/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/LocalAllocationTagsManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/LocalAllocationTagsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/LocalAllocationTagsManager.java
new file mode 100644
index 0000000..9472719
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/LocalAllocationTagsManager.java
@@ -0,0 +1,167 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.algorithm;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.AllocationTagsManager;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.InvalidAllocationTagsQueryException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.function.LongBinaryOperator;
+
+class LocalAllocationTagsManager extends AllocationTagsManager {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(LocalAllocationTagsManager.class);
+
+  private final AllocationTagsManager tagsManager;
+
+  // Application's Temporary containers mapping
+  private Map<ApplicationId, Map<NodeId, Map<String, AtomicInteger>>>
+      appTempMappings = new HashMap<>();
+
+  LocalAllocationTagsManager(
+      AllocationTagsManager allocationTagsManager) {
+    super(null);
+    this.tagsManager = allocationTagsManager;
+  }
+
+  void addTempTags(NodeId nodeId,
+      ApplicationId applicationId, Set<String> allocationTags) {
+    Map<NodeId, Map<String, AtomicInteger>> appTempMapping =
+        appTempMappings.computeIfAbsent(applicationId, k -> new HashMap<>());
+    Map<String, AtomicInteger> containerTempMapping =
+        appTempMapping.computeIfAbsent(nodeId, k -> new HashMap<>());
+    for (String tag : allocationTags) {
+      containerTempMapping.computeIfAbsent(tag,
+          k -> new AtomicInteger(0)).incrementAndGet();
+    }
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Added TEMP container with tags=["
+          + StringUtils.join(allocationTags, ",") + "]");
+    }
+    tagsManager.addTags(nodeId, applicationId, allocationTags);
+  }
+
+  void removeTempTags(NodeId nodeId, ApplicationId applicationId,
+      Set<String> allocationTags) {
+    Map<NodeId, Map<String, AtomicInteger>> appTempMapping =
+        appTempMappings.get(applicationId);
+    if (appTempMapping != null) {
+      Map<String, AtomicInteger> containerTempMap =
+          appTempMapping.get(nodeId);
+      if (containerTempMap != null) {
+        for (String tag : allocationTags) {
+          AtomicInteger count = containerTempMap.get(tag);
+          if (count != null) {
+            if (count.decrementAndGet() <= 0) {
+              containerTempMap.remove(tag);
+            }
+          }
+        }
+      }
+    }
+    if (allocationTags != null) {
+      removeTags(nodeId, applicationId, allocationTags);
+    }
+  }
+
+  /**
+   * Method removes temporary containers associated with an application
+   * Used by the placement algorithm to clean temporary tags at the end of
+   * a placement cycle.
+   * @param applicationId Application Id.
+   */
+  public void cleanTempContainers(ApplicationId applicationId) {
+
+    if (!appTempMappings.get(applicationId).isEmpty()) {
+      appTempMappings.get(applicationId).entrySet().stream().forEach(nodeE -> {
+        nodeE.getValue().entrySet().stream().forEach(tagE -> {
+          for (int i = 0; i < tagE.getValue().get(); i++) {
+            removeTags(nodeE.getKey(), applicationId,
+                Collections.singleton(tagE.getKey()));
+          }
+        });
+      });
+      appTempMappings.remove(applicationId);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Removed TEMP containers of app=" + applicationId);
+      }
+    }
+  }
+
+  @Override
+  public void addContainer(NodeId nodeId, ContainerId containerId,
+      Set<String> allocationTags) {
+    tagsManager.addContainer(nodeId, containerId, allocationTags);
+  }
+
+  @Override
+  public void removeContainer(NodeId nodeId, ContainerId containerId,
+      Set<String> allocationTags) {
+    tagsManager.removeContainer(nodeId, containerId, allocationTags);
+  }
+
+  @Override
+  public void removeTags(NodeId nodeId, ApplicationId applicationId,
+      Set<String> allocationTags) {
+    tagsManager.removeTags(nodeId, applicationId, allocationTags);
+  }
+
+  @Override
+  public long getNodeCardinality(NodeId nodeId, ApplicationId applicationId,
+      String tag) throws InvalidAllocationTagsQueryException {
+    return tagsManager.getNodeCardinality(nodeId, applicationId, tag);
+  }
+
+  @Override
+  public long getRackCardinality(String rack, ApplicationId applicationId,
+      String tag) throws InvalidAllocationTagsQueryException {
+    return tagsManager.getRackCardinality(rack, applicationId, tag);
+  }
+
+  @Override
+  public boolean allocationTagExistsOnNode(NodeId nodeId,
+      ApplicationId applicationId, String tag)
+      throws InvalidAllocationTagsQueryException {
+    return tagsManager.allocationTagExistsOnNode(nodeId, applicationId, tag);
+  }
+
+  @Override
+  public long getNodeCardinalityByOp(NodeId nodeId,
+      ApplicationId applicationId, Set<String> tags, LongBinaryOperator op)
+      throws InvalidAllocationTagsQueryException {
+    return tagsManager.getNodeCardinalityByOp(nodeId, applicationId, tags, op);
+  }
+
+  @Override
+  public long getRackCardinalityByOp(String rack, ApplicationId applicationId,
+      Set<String> tags, LongBinaryOperator op)
+      throws InvalidAllocationTagsQueryException {
+    return tagsManager.getRackCardinalityByOp(rack, applicationId, tags, op);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/adbe87ab/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestAllocationTagsManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestAllocationTagsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestAllocationTagsManager.java
index 7afe4ef..76f451e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestAllocationTagsManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestAllocationTagsManager.java
@@ -23,7 +23,6 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint;
 import com.google.common.collect.ImmutableSet;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.Resource;
-import org.apache.hadoop.yarn.api.resource.PlacementConstraints;
 import org.apache.hadoop.yarn.server.resourcemanager.MockNodes;
 import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
@@ -363,87 +362,6 @@ public class TestAllocationTagsManager {
   }
 
   @Test
-  public void testTempContainerAllocations()
-      throws InvalidAllocationTagsQueryException {
-    /**
-     * Construct both TEMP and normal containers: Node1: TEMP container_1_1
-     * (mapper/reducer/app_1) container_1_2 (service/app_1)
-     *
-     * Node2: container_1_3 (reducer/app_1) TEMP container_2_1 (service/app_2)
-     */
-
-    AllocationTagsManager atm = new AllocationTagsManager(rmContext);
-
-    // 3 Containers from app1
-    atm.addTempContainer(NodeId.fromString("host1:123"),
-        TestUtils.getMockApplicationId(1),
-        ImmutableSet.of("mapper", "reducer"));
-
-    atm.addContainer(NodeId.fromString("host1:123"),
-        TestUtils.getMockContainerId(1, 2), ImmutableSet.of("service"));
-
-    atm.addContainer(NodeId.fromString("host2:123"),
-        TestUtils.getMockContainerId(1, 3), ImmutableSet.of("reducer"));
-
-    // 1 Container from app2
-    atm.addTempContainer(NodeId.fromString("host2:123"),
-        TestUtils.getMockApplicationId(2), ImmutableSet.of("service"));
-
-    // Expect tag mappings to be present including temp Tags
-    Assert.assertEquals(1,
-        atm.getNodeCardinalityByOp(NodeId.fromString("host1:123"),
-            TestUtils.getMockApplicationId(1), ImmutableSet.of("mapper"),
-            Long::sum));
-
-    Assert.assertEquals(1,
-        atm.getNodeCardinalityByOp(NodeId.fromString("host1:123"),
-            TestUtils.getMockApplicationId(1), ImmutableSet.of("service"),
-            Long::sum));
-
-    Assert.assertEquals(1,
-        atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"),
-            TestUtils.getMockApplicationId(2), ImmutableSet.of("service"),
-            Long::sum));
-
-    // Do a temp Tag cleanup on app2
-    atm.cleanTempContainers(TestUtils.getMockApplicationId(2));
-    Assert.assertEquals(0,
-        atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"),
-            TestUtils.getMockApplicationId(2), ImmutableSet.of("service"),
-            Long::sum));
-    // Expect app1 to be unaffected
-    Assert.assertEquals(1,
-        atm.getNodeCardinalityByOp(NodeId.fromString("host1:123"),
-            TestUtils.getMockApplicationId(1), ImmutableSet.of("mapper"),
-            Long::sum));
-    // Do a cleanup on app1 as well
-    atm.cleanTempContainers(TestUtils.getMockApplicationId(1));
-    Assert.assertEquals(0,
-        atm.getNodeCardinalityByOp(NodeId.fromString("host1:123"),
-            TestUtils.getMockApplicationId(1), ImmutableSet.of("mapper"),
-            Long::sum));
-
-    // Non temp-tags should be unaffected
-    Assert.assertEquals(1,
-        atm.getNodeCardinalityByOp(NodeId.fromString("host1:123"),
-            TestUtils.getMockApplicationId(1), ImmutableSet.of("service"),
-            Long::sum));
-
-    Assert.assertEquals(0,
-        atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"),
-            TestUtils.getMockApplicationId(2), ImmutableSet.of("service"),
-            Long::sum));
-
-    // Expect app2 with no containers, and app1 with 2 containers across 2 nodes
-    Assert.assertEquals(2,
-        atm.getPerAppNodeMappings().get(TestUtils.getMockApplicationId(1))
-            .getTypeToTagsWithCount().size());
-
-    Assert.assertNull(
-        atm.getPerAppNodeMappings().get(TestUtils.getMockApplicationId(2)));
-  }
-
-  @Test
   public void testQueryCardinalityWithIllegalParameters()
       throws InvalidAllocationTagsQueryException {
     /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/adbe87ab/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/TestLocalAllocationTagsManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/TestLocalAllocationTagsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/TestLocalAllocationTagsManager.java
new file mode 100644
index 0000000..0b9657f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/TestLocalAllocationTagsManager.java
@@ -0,0 +1,139 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.algorithm;
+
+import com.google.common.collect.ImmutableSet;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.server.resourcemanager.MockNodes;
+import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.TestUtils;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.AllocationTagsManager;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.InvalidAllocationTagsQueryException;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.List;
+
+/**
+ * Tests the LocalAllocationTagsManager.
+ */
+public class TestLocalAllocationTagsManager {
+
+  private RMContext rmContext;
+
+  @Before
+  public void setup() {
+    MockRM rm = new MockRM();
+    rm.start();
+    MockNodes.resetHostIds();
+    List<RMNode> rmNodes =
+        MockNodes.newNodes(2, 4, Resource.newInstance(4096, 4));
+    for (RMNode rmNode : rmNodes) {
+      rm.getRMContext().getRMNodes().putIfAbsent(rmNode.getNodeID(), rmNode);
+    }
+    rmContext = rm.getRMContext();
+  }
+
+  @Test
+  public void testTempContainerAllocations()
+      throws InvalidAllocationTagsQueryException {
+    /**
+     * Construct both TEMP and normal containers: Node1: TEMP container_1_1
+     * (mapper/reducer/app_1) container_1_2 (service/app_1)
+     *
+     * Node2: container_1_3 (reducer/app_1) TEMP container_2_1 (service/app_2)
+     */
+
+    AllocationTagsManager atm = new AllocationTagsManager(rmContext);
+    LocalAllocationTagsManager ephAtm =
+        new LocalAllocationTagsManager(atm);
+
+    // 3 Containers from app1
+    ephAtm.addTempTags(NodeId.fromString("host1:123"),
+        TestUtils.getMockApplicationId(1),
+        ImmutableSet.of("mapper", "reducer"));
+
+    atm.addContainer(NodeId.fromString("host1:123"),
+        TestUtils.getMockContainerId(1, 2), ImmutableSet.of("service"));
+
+    atm.addContainer(NodeId.fromString("host2:123"),
+        TestUtils.getMockContainerId(1, 3), ImmutableSet.of("reducer"));
+
+    // 1 Container from app2
+    ephAtm.addTempTags(NodeId.fromString("host2:123"),
+        TestUtils.getMockApplicationId(2), ImmutableSet.of("service"));
+
+    // Expect tag mappings to be present including temp Tags
+    Assert.assertEquals(1,
+        atm.getNodeCardinalityByOp(NodeId.fromString("host1:123"),
+            TestUtils.getMockApplicationId(1), ImmutableSet.of("mapper"),
+            Long::sum));
+
+    Assert.assertEquals(1,
+        atm.getNodeCardinalityByOp(NodeId.fromString("host1:123"),
+            TestUtils.getMockApplicationId(1), ImmutableSet.of("service"),
+            Long::sum));
+
+    Assert.assertEquals(1,
+        atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"),
+            TestUtils.getMockApplicationId(2), ImmutableSet.of("service"),
+            Long::sum));
+
+    // Do a temp Tag cleanup on app2
+    ephAtm.cleanTempContainers(TestUtils.getMockApplicationId(2));
+    Assert.assertEquals(0,
+        atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"),
+            TestUtils.getMockApplicationId(2), ImmutableSet.of("service"),
+            Long::sum));
+    // Expect app1 to be unaffected
+    Assert.assertEquals(1,
+        atm.getNodeCardinalityByOp(NodeId.fromString("host1:123"),
+            TestUtils.getMockApplicationId(1), ImmutableSet.of("mapper"),
+            Long::sum));
+    // Do a cleanup on app1 as well
+    ephAtm.cleanTempContainers(TestUtils.getMockApplicationId(1));
+    Assert.assertEquals(0,
+        atm.getNodeCardinalityByOp(NodeId.fromString("host1:123"),
+            TestUtils.getMockApplicationId(1), ImmutableSet.of("mapper"),
+            Long::sum));
+
+    // Non temp-tags should be unaffected
+    Assert.assertEquals(1,
+        atm.getNodeCardinalityByOp(NodeId.fromString("host1:123"),
+            TestUtils.getMockApplicationId(1), ImmutableSet.of("service"),
+            Long::sum));
+
+    Assert.assertEquals(0,
+        atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"),
+            TestUtils.getMockApplicationId(2), ImmutableSet.of("service"),
+            Long::sum));
+
+    // Expect app2 with no containers, and app1 with 2 containers across 2 nodes
+    Assert.assertEquals(2,
+        atm.getPerAppNodeMappings().get(TestUtils.getMockApplicationId(1))
+            .getTypeToTagsWithCount().size());
+
+    Assert.assertNull(
+        atm.getPerAppNodeMappings().get(TestUtils.getMockApplicationId(2)));
+  }
+
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[03/32] hadoop git commit: YARN-7783. Add validation step to ensure constraints are not violated due to order in which a request is processed. (asuresh)

Posted by as...@apache.org.
YARN-7783. Add validation step to ensure constraints are not violated due to order in which a request is processed. (asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a4c539fc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a4c539fc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a4c539fc

Branch: refs/heads/trunk
Commit: a4c539fcdba817e313b2375abf2c4c9a1d13a4fd
Parents: 9b81cb0
Author: Arun Suresh <as...@apache.org>
Authored: Tue Jan 23 08:15:58 2018 -0800
Committer: Arun Suresh <as...@apache.org>
Committed: Wed Jan 31 01:30:17 2018 -0800

----------------------------------------------------------------------
 .../algorithm/DefaultPlacementAlgorithm.java    | 119 +++++++++++++++++--
 .../constraint/TestPlacementProcessor.java      |  49 ++++++++
 2 files changed, 155 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4c539fc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/DefaultPlacementAlgorithm.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/DefaultPlacementAlgorithm.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/DefaultPlacementAlgorithm.java
index 9887749..4e6473f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/DefaultPlacementAlgorithm.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/DefaultPlacementAlgorithm.java
@@ -22,6 +22,7 @@ import java.util.Iterator;
 import java.util.List;
 
 import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ResourceSizing;
 import org.apache.hadoop.yarn.api.records.SchedulingRequest;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler;
@@ -69,13 +70,9 @@ public class DefaultPlacementAlgorithm implements ConstraintPlacementAlgorithm {
   public boolean attemptPlacementOnNode(ApplicationId appId,
       SchedulingRequest schedulingRequest, SchedulerNode schedulerNode)
       throws InvalidAllocationTagsQueryException {
-    int numAllocs = schedulingRequest.getResourceSizing().getNumAllocations();
-    if (numAllocs > 0) {
-      if (PlacementConstraintsUtil.canSatisfyConstraints(appId,
-          schedulingRequest, schedulerNode,
-          constraintManager, tagsManager)) {
-        return true;
-      }
+    if (PlacementConstraintsUtil.canSatisfyConstraints(appId,
+        schedulingRequest, schedulerNode, constraintManager, tagsManager)) {
+      return true;
     }
     return false;
   }
@@ -93,6 +90,9 @@ public class DefaultPlacementAlgorithm implements ConstraintPlacementAlgorithm {
     int rePlacementCount = RE_ATTEMPT_COUNT;
     while (rePlacementCount > 0) {
       doPlacement(requests, resp, allNodes, rejectedRequests);
+      // Double check if placement constraints are really satisfied
+      validatePlacement(requests.getApplicationId(), resp,
+          rejectedRequests);
       if (rejectedRequests.size() == 0 || rePlacementCount == 1) {
         break;
       }
@@ -122,9 +122,14 @@ public class DefaultPlacementAlgorithm implements ConstraintPlacementAlgorithm {
         break;
       }
       SchedulingRequest schedulingRequest = requestIterator.next();
+      PlacedSchedulingRequest placedReq =
+          new PlacedSchedulingRequest(schedulingRequest);
+      placedReq.setPlacementAttempt(requests.getPlacementAttempt());
+      resp.getPlacedRequests().add(placedReq);
       CircularIterator<SchedulerNode> nodeIter =
           new CircularIterator(lastSatisfiedNode, nIter, allNodes);
-      int numAllocs = schedulingRequest.getResourceSizing().getNumAllocations();
+      int numAllocs =
+          schedulingRequest.getResourceSizing().getNumAllocations();
       while (nodeIter.hasNext() && numAllocs > 0) {
         SchedulerNode node = nodeIter.next();
         try {
@@ -135,11 +140,7 @@ public class DefaultPlacementAlgorithm implements ConstraintPlacementAlgorithm {
                   requests.getApplicationId(), schedulingRequest, node)) {
             schedulingRequest.getResourceSizing()
                 .setNumAllocations(--numAllocs);
-            PlacedSchedulingRequest placedReq =
-                new PlacedSchedulingRequest(schedulingRequest);
-            placedReq.setPlacementAttempt(requests.getPlacementAttempt());
             placedReq.getNodes().add(node);
-            resp.getPlacedRequests().add(placedReq);
             numAllocs =
                 schedulingRequest.getResourceSizing().getNumAllocations();
             // Add temp-container tags for current placement cycle
@@ -156,6 +157,98 @@ public class DefaultPlacementAlgorithm implements ConstraintPlacementAlgorithm {
     // Add all requests whose numAllocations still > 0 to rejected list.
     requests.getSchedulingRequests().stream()
         .filter(sReq -> sReq.getResourceSizing().getNumAllocations() > 0)
-        .forEach(rejReq -> rejectedRequests.add(rejReq));
+        .forEach(rejReq -> rejectedRequests.add(cloneReq(rejReq)));
   }
+
+  /**
+   * During the placement phase, allocation tags are added to the node if the
+   * constraint is satisfied, But depending on the order in which the
+   * algorithm sees the request, it is possible that a constraint that happened
+   * to be valid during placement of an earlier-seen request, might not be
+   * valid after all subsequent requests have been placed.
+   *
+   * For eg:
+   *   Assume nodes n1, n2, n3, n4 and n5
+   *
+   *   Consider the 2 constraints:
+   *   1) "foo", anti-affinity with "foo"
+   *   2) "bar", anti-affinity with "foo"
+   *
+   *   And 2 requests
+   *   req1: NumAllocations = 4, allocTags = [foo]
+   *   req2: NumAllocations = 1, allocTags = [bar]
+   *
+   *   If "req1" is seen first, the algorithm can place the 4 containers in
+   *   n1, n2, n3 and n4. And when it gets to "req2", it will see that 4 nodes
+   *   with the "foo" tag and will place on n5.
+   *   But if "req2" is seem first, then "bar" will be placed on any node,
+   *   since no node currently has "foo", and when it gets to "req1", since
+   *   "foo" has not anti-affinity with "bar", the algorithm can end up placing
+   *   "foo" on a node with "bar" violating the second constraint.
+   *
+   * To prevent the above, we need a validation step: after the placements for a
+   * batch of requests are made, for each req, we remove its tags from the node
+   * and try to see of constraints are still satisfied if the tag were to be
+   * added back on the node.
+   *
+   *   When applied to the example above, after "req2" and "req1" are placed,
+   *   we remove the "bar" tag from the node and try to add it back on the node.
+   *   This time, constraint satisfaction will fail, since there is now a "foo"
+   *   tag on the node and "bar" cannot be added. The algorithm will then
+   *   retry placing "req2" on another node.
+   *
+   * @param applicationId
+   * @param resp
+   * @param rejectedRequests
+   */
+  private void validatePlacement(ApplicationId applicationId,
+      ConstraintPlacementAlgorithmOutput resp,
+      List<SchedulingRequest> rejectedRequests) {
+    Iterator<PlacedSchedulingRequest> pReqIter =
+        resp.getPlacedRequests().iterator();
+    while (pReqIter.hasNext()) {
+      PlacedSchedulingRequest pReq = pReqIter.next();
+      Iterator<SchedulerNode> nodeIter = pReq.getNodes().iterator();
+      // Assuming all reqs were satisfied.
+      int num = 0;
+      while (nodeIter.hasNext()) {
+        SchedulerNode node = nodeIter.next();
+        try {
+          // Remove just the tags for this placement.
+          this.tagsManager.removeTempTags(node.getNodeID(),
+              applicationId, pReq.getSchedulingRequest().getAllocationTags());
+          if (!attemptPlacementOnNode(
+              applicationId, pReq.getSchedulingRequest(), node)) {
+            nodeIter.remove();
+            num++;
+          } else {
+            // Add back the tags if everything is fine.
+            this.tagsManager.addTempTags(node.getNodeID(),
+                applicationId, pReq.getSchedulingRequest().getAllocationTags());
+          }
+        } catch (InvalidAllocationTagsQueryException e) {
+          LOG.warn("Got exception from TagManager !", e);
+        }
+      }
+      if (num > 0) {
+        SchedulingRequest sReq = cloneReq(pReq.getSchedulingRequest());
+        sReq.getResourceSizing().setNumAllocations(num);
+        rejectedRequests.add(sReq);
+      }
+      if (pReq.getNodes().isEmpty()) {
+        pReqIter.remove();
+      }
+    }
+  }
+
+  private static SchedulingRequest cloneReq(SchedulingRequest sReq) {
+    return SchedulingRequest.newInstance(
+        sReq.getAllocationRequestId(), sReq.getPriority(),
+        sReq.getExecutionType(), sReq.getAllocationTags(),
+        ResourceSizing.newInstance(
+            sReq.getResourceSizing().getNumAllocations(),
+            sReq.getResourceSizing().getResources()),
+        sReq.getPlacementConstraint());
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4c539fc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementProcessor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementProcessor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementProcessor.java
index 65daeb8..8426b20 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementProcessor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementProcessor.java
@@ -151,6 +151,55 @@ public class TestPlacementProcessor {
   }
 
   @Test(timeout = 300000)
+  public void testMutualAntiAffinityPlacement() throws Exception {
+    HashMap<NodeId, MockNM> nodes = new HashMap<>();
+    MockNM nm1 = new MockNM("h1:1234", 4096, rm.getResourceTrackerService());
+    nodes.put(nm1.getNodeId(), nm1);
+    MockNM nm2 = new MockNM("h2:1234", 4096, rm.getResourceTrackerService());
+    nodes.put(nm2.getNodeId(), nm2);
+    MockNM nm3 = new MockNM("h3:1234", 4096, rm.getResourceTrackerService());
+    nodes.put(nm3.getNodeId(), nm3);
+    MockNM nm4 = new MockNM("h4:1234", 4096, rm.getResourceTrackerService());
+    nodes.put(nm4.getNodeId(), nm4);
+    MockNM nm5 = new MockNM("h5:1234", 4096, rm.getResourceTrackerService());
+    nodes.put(nm5.getNodeId(), nm5);
+    nm1.registerNode();
+    nm2.registerNode();
+    nm3.registerNode();
+    nm4.registerNode();
+    nm5.registerNode();
+
+    RMApp app1 = rm.submitApp(1 * GB, "app", "user", null, "default");
+    // Containers with allocationTag 'foo' are restricted to 1 per NODE
+    Map<Set<String>, PlacementConstraint> pcMap = new HashMap<>();
+    pcMap.put(Collections.singleton("foo"),
+        PlacementConstraints.build(
+            PlacementConstraints.targetNotIn(NODE, allocationTag("foo"))));
+    pcMap.put(Collections.singleton("bar"),
+        PlacementConstraints.build(
+            PlacementConstraints.targetNotIn(NODE, allocationTag("foo"))));
+    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm2, pcMap);
+    am1.addSchedulingRequest(
+        Arrays.asList(schedulingRequest(1, 1, 1, 512, "bar"),
+            schedulingRequest(1, 2, 1, 512, "foo"),
+            schedulingRequest(1, 3, 1, 512, "foo"),
+            schedulingRequest(1, 4, 1, 512, "foo"),
+            schedulingRequest(1, 5, 1, 512, "foo")));
+    AllocateResponse allocResponse = am1.schedule(); // send the request
+    List<Container> allocatedContainers = new ArrayList<>();
+    allocatedContainers.addAll(allocResponse.getAllocatedContainers());
+
+    // kick the scheduler
+    waitForContainerAllocation(nodes.values(), am1, allocatedContainers, 5);
+
+    Assert.assertEquals(5, allocatedContainers.size());
+    Set<NodeId> nodeIds = allocatedContainers.stream().map(x -> x.getNodeId())
+        .collect(Collectors.toSet());
+    // Ensure unique nodes (antiaffinity)
+    Assert.assertEquals(5, nodeIds.size());
+  }
+
+  @Test(timeout = 300000)
   public void testCardinalityPlacement() throws Exception {
     HashMap<NodeId, MockNM> nodes = new HashMap<>();
     MockNM nm1 = new MockNM("h1:1234", 8192, rm.getResourceTrackerService());


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[21/32] hadoop git commit: YARN-7745. Allow DistributedShell to take a placement specification for containers it wants to launch. (Arun Suresh via wangda)

Posted by as...@apache.org.
YARN-7745. Allow DistributedShell to take a placement specification for containers it wants to launch. (Arun Suresh via wangda)

Change-Id: Ided146d662e944a8a4692e5d6885f23fd9bbcad5


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e60f5129
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e60f5129
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e60f5129

Branch: refs/heads/trunk
Commit: e60f51299dba360d13aa39f9ab714fdfc666b532
Parents: 38af237
Author: Wangda Tan <wa...@apache.org>
Authored: Thu Jan 18 14:22:45 2018 -0800
Committer: Arun Suresh <as...@apache.org>
Committed: Wed Jan 31 01:30:17 2018 -0800

----------------------------------------------------------------------
 .../distributedshell/ApplicationMaster.java     | 124 +++++++++++++++--
 .../applications/distributedshell/Client.java   |  14 ++
 .../distributedshell/PlacementSpec.java         | 137 +++++++++++++++++++
 3 files changed, 263 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e60f5129/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
index 270ef1b..9ba2138 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
@@ -42,6 +42,7 @@ import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.Arrays;
+import java.util.concurrent.atomic.AtomicLong;
 
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.GnuParser;
@@ -87,8 +88,11 @@ import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
 import org.apache.hadoop.yarn.api.records.NodeReport;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.ProfileCapability;
+import org.apache.hadoop.yarn.api.records.RejectedSchedulingRequest;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.ResourceSizing;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
 import org.apache.hadoop.yarn.api.records.URL;
 import org.apache.hadoop.yarn.api.records.UpdatedContainer;
 import org.apache.hadoop.yarn.api.records.ExecutionType;
@@ -99,6 +103,7 @@ import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineEntityGroupId;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineEvent;
 import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
 import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest;
 import org.apache.hadoop.yarn.client.api.TimelineClient;
 import org.apache.hadoop.yarn.client.api.TimelineV2Client;
@@ -274,6 +279,10 @@ public class ApplicationMaster {
   @VisibleForTesting
   protected AtomicInteger numRequestedContainers = new AtomicInteger();
 
+  protected AtomicInteger numIgnore = new AtomicInteger();
+
+  protected AtomicInteger totalRetries = new AtomicInteger(10);
+
   // Shell command to be executed
   private String shellCommand = "";
   // Args to be passed to the shell command
@@ -289,6 +298,9 @@ public class ApplicationMaster {
   // File length needed for local resource
   private long shellScriptPathLen = 0;
 
+  // Placement Specifications
+  private Map<String, PlacementSpec> placementSpecs = null;
+
   // Container retry options
   private ContainerRetryPolicy containerRetryPolicy =
       ContainerRetryPolicy.NEVER_RETRY;
@@ -334,6 +346,7 @@ public class ApplicationMaster {
   private final String windows_command = "cmd /c";
 
   private int yarnShellIdCounter = 1;
+  private final AtomicLong allocIdCounter = new AtomicLong(1);
 
   @VisibleForTesting
   protected final Set<ContainerId> launchedContainers =
@@ -457,6 +470,7 @@ public class ApplicationMaster {
         "If container could retry, it specifies max retires");
     opts.addOption("container_retry_interval", true,
         "Interval between each retry, unit is milliseconds");
+    opts.addOption("placement_spec", true, "Placement specification");
     opts.addOption("debug", false, "Dump out debug information");
 
     opts.addOption("help", false, "Print usage");
@@ -487,6 +501,17 @@ public class ApplicationMaster {
       dumpOutDebugInfo();
     }
 
+    if (cliParser.hasOption("placement_spec")) {
+      String placementSpec = cliParser.getOptionValue("placement_spec");
+      LOG.info("Placement Spec received [{}]", placementSpec);
+      parsePlacementSpecs(placementSpec);
+      LOG.info("Total num containers requested [{}]", numTotalContainers);
+      if (numTotalContainers == 0) {
+        throw new IllegalArgumentException(
+            "Cannot run distributed shell with no containers");
+      }
+    }
+
     Map<String, String> envs = System.getenv();
 
     if (!envs.containsKey(Environment.CONTAINER_ID.name())) {
@@ -609,8 +634,11 @@ public class ApplicationMaster {
     }
     containerResourceProfile =
         cliParser.getOptionValue("container_resource_profile", "");
-    numTotalContainers = Integer.parseInt(cliParser.getOptionValue(
-        "num_containers", "1"));
+
+    if (this.placementSpecs == null) {
+      numTotalContainers = Integer.parseInt(cliParser.getOptionValue(
+          "num_containers", "1"));
+    }
     if (numTotalContainers == 0) {
       throw new IllegalArgumentException(
           "Cannot run distributed shell with no containers");
@@ -642,6 +670,17 @@ public class ApplicationMaster {
     return true;
   }
 
+  private void parsePlacementSpecs(String placementSpecifications) {
+    Map<String, PlacementSpec> pSpecs =
+        PlacementSpec.parse(placementSpecifications);
+    this.placementSpecs = new HashMap<>();
+    this.numTotalContainers = 0;
+    for (PlacementSpec pSpec : pSpecs.values()) {
+      this.numTotalContainers += pSpec.numContainers;
+      this.placementSpecs.put(pSpec.sourceTag, pSpec);
+    }
+  }
+
   /**
    * Helper function to print usage
    *
@@ -719,9 +758,19 @@ public class ApplicationMaster {
     // Register self with ResourceManager
     // This will start heartbeating to the RM
     appMasterHostname = NetUtils.getHostname();
+    Map<Set<String>, PlacementConstraint> placementConstraintMap = null;
+    if (this.placementSpecs != null) {
+      placementConstraintMap = new HashMap<>();
+      for (PlacementSpec spec : this.placementSpecs.values()) {
+        if (spec.constraint != null) {
+          placementConstraintMap.put(
+              Collections.singleton(spec.sourceTag), spec.constraint);
+        }
+      }
+    }
     RegisterApplicationMasterResponse response = amRMClient
         .registerApplicationMaster(appMasterHostname, appMasterRpcPort,
-            appMasterTrackingUrl);
+            appMasterTrackingUrl, placementConstraintMap);
     resourceProfiles = response.getResourceProfiles();
     ResourceUtils.reinitializeResources(response.getResourceTypes());
     // Dump out information about cluster capability as seen by the
@@ -765,9 +814,20 @@ public class ApplicationMaster {
     // containers
     // Keep looping until all the containers are launched and shell script
     // executed on them ( regardless of success/failure).
-    for (int i = 0; i < numTotalContainersToRequest; ++i) {
-      ContainerRequest containerAsk = setupContainerAskForRM();
-      amRMClient.addContainerRequest(containerAsk);
+    if (this.placementSpecs == null) {
+      for (int i = 0; i < numTotalContainersToRequest; ++i) {
+        ContainerRequest containerAsk = setupContainerAskForRM();
+        amRMClient.addContainerRequest(containerAsk);
+      }
+    } else {
+      List<SchedulingRequest> schedReqs = new ArrayList<>();
+      for (PlacementSpec pSpec : this.placementSpecs.values()) {
+        for (int i = 0; i < pSpec.numContainers; i++) {
+          SchedulingRequest sr = setupSchedulingRequest(pSpec);
+          schedReqs.add(sr);
+        }
+      }
+      amRMClient.addSchedulingRequests(schedReqs);
     }
     numRequestedContainers.set(numTotalContainers);
   }
@@ -933,6 +993,12 @@ public class ApplicationMaster {
             numRequestedContainers.decrementAndGet();
             // we do not need to release the container as it would be done
             // by the RM
+
+            // Ignore these containers if placementspec is enabled
+            // for the time being.
+            if (placementSpecs != null) {
+              numIgnore.incrementAndGet();
+            }
           }
         } else {
           // nothing to do
@@ -962,14 +1028,18 @@ public class ApplicationMaster {
       int askCount = numTotalContainers - numRequestedContainers.get();
       numRequestedContainers.addAndGet(askCount);
 
-      if (askCount > 0) {
-        for (int i = 0; i < askCount; ++i) {
-          ContainerRequest containerAsk = setupContainerAskForRM();
-          amRMClient.addContainerRequest(containerAsk);
+      // Dont bother re-asking if we are using placementSpecs
+      if (placementSpecs == null) {
+        if (askCount > 0) {
+          for (int i = 0; i < askCount; ++i) {
+            ContainerRequest containerAsk = setupContainerAskForRM();
+            amRMClient.addContainerRequest(containerAsk);
+          }
         }
       }
-      
-      if (numCompletedContainers.get() == numTotalContainers) {
+
+      if (numCompletedContainers.get() + numIgnore.get() >=
+          numTotalContainers) {
         done = true;
       }
     }
@@ -1029,6 +1099,23 @@ public class ApplicationMaster {
     }
 
     @Override
+    public void onRequestsRejected(List<RejectedSchedulingRequest> rejReqs) {
+      List<SchedulingRequest> reqsToRetry = new ArrayList<>();
+      for (RejectedSchedulingRequest rejReq : rejReqs) {
+        LOG.info("Scheduling Request {} has been rejected. Reason {}",
+            rejReq.getRequest(), rejReq.getReason());
+        reqsToRetry.add(rejReq.getRequest());
+      }
+      totalRetries.addAndGet(-1 * reqsToRetry.size());
+      if (totalRetries.get() <= 0) {
+        LOG.info("Exiting, since retries are exhausted !!");
+        done = true;
+      } else {
+        amRMClient.addSchedulingRequests(reqsToRetry);
+      }
+    }
+
+    @Override
     public void onShutdownRequest() {
       done = true;
     }
@@ -1335,6 +1422,19 @@ public class ApplicationMaster {
     return request;
   }
 
+  private SchedulingRequest setupSchedulingRequest(PlacementSpec spec) {
+    long allocId = allocIdCounter.incrementAndGet();
+    SchedulingRequest sReq = SchedulingRequest.newInstance(
+        allocId, Priority.newInstance(requestPriority),
+        ExecutionTypeRequest.newInstance(),
+        Collections.singleton(spec.sourceTag),
+        ResourceSizing.newInstance(
+            createProfileCapability().getProfileCapabilityOverride()), null);
+    sReq.setPlacementConstraint(spec.constraint);
+    LOG.info("Scheduling Request made: " + sReq.toString());
+    return sReq;
+  }
+
   private boolean fileExist(String filePath) {
     return new File(filePath).exists();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e60f5129/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
index ef635d3..2aafa94 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
@@ -188,6 +188,8 @@ public class Client {
   // Whether to auto promote opportunistic containers
   private boolean autoPromoteContainers = false;
 
+  // Placement specification
+  private String placementSpec = "";
   // log4j.properties file 
   // if available, add to local resources and set into classpath 
   private String log4jPropFile = "";	
@@ -366,6 +368,10 @@ public class Client {
         "If container could retry, it specifies max retires");
     opts.addOption("container_retry_interval", true,
         "Interval between each retry, unit is milliseconds");
+    opts.addOption("placement_spec", true,
+        "Placement specification. Please note, if this option is specified,"
+            + " The \"num_containers\" option will be ignored. All requested"
+            + " containers will be of type GUARANTEED" );
   }
 
   /**
@@ -419,6 +425,11 @@ public class Client {
       keepContainers = true;
     }
 
+    if (cliParser.hasOption("placement_spec")) {
+      placementSpec = cliParser.getOptionValue("placement_spec");
+      // Check if it is parsable
+      PlacementSpec.parse(this.placementSpec);
+    }
     appName = cliParser.getOptionValue("appname", "DistributedShell");
     amPriority = Integer.parseInt(cliParser.getOptionValue("priority", "0"));
     amQueue = cliParser.getOptionValue("queue", "default");
@@ -834,6 +845,9 @@ public class Client {
       vargs.add("--container_resource_profile " + containerResourceProfile);
     }
     vargs.add("--num_containers " + String.valueOf(numContainers));
+    if (placementSpec != null && placementSpec.length() > 0) {
+      vargs.add("--placement_spec " + placementSpec);
+    }
     if (null != nodeLabelExpression) {
       appContext.setNodeLabelExpression(nodeLabelExpression);
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e60f5129/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/PlacementSpec.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/PlacementSpec.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/PlacementSpec.java
new file mode 100644
index 0000000..ed13ee0
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/PlacementSpec.java
@@ -0,0 +1,137 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.applications.distributedshell;
+
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraints;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Scanner;
+
+/**
+ * Class encapsulating a SourceTag, number of container and a Placement
+ * Constraint.
+ */
+public class PlacementSpec {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(PlacementSpec.class);
+  private static final String SPEC_DELIM = ":";
+  private static final String KV_SPLIT_DELIM = "=";
+  private static final String SPEC_VAL_DELIM = ",";
+  private static final String IN = "in";
+  private static final String NOT_IN = "notin";
+  private static final String CARDINALITY = "cardinality";
+
+  public final String sourceTag;
+  public final int numContainers;
+  public final PlacementConstraint constraint;
+
+  public PlacementSpec(String sourceTag, int numContainers,
+      PlacementConstraint constraint) {
+    this.sourceTag = sourceTag;
+    this.numContainers = numContainers;
+    this.constraint = constraint;
+  }
+
+  // Placement specification should be of the form:
+  // PlacementSpec => ""|KeyVal;PlacementSpec
+  // KeyVal => SourceTag=Constraint
+  // SourceTag => String
+  // Constraint => NumContainers|
+  //               NumContainers,"in",Scope,TargetTag|
+  //               NumContainers,"notin",Scope,TargetTag|
+  //               NumContainers,"cardinality",Scope,TargetTag,MinCard,MaxCard
+  // NumContainers => int (number of containers)
+  // Scope => "NODE"|"RACK"
+  // TargetTag => String (Target Tag)
+  // MinCard => int (min cardinality - needed if ConstraintType == cardinality)
+  // MaxCard => int (max cardinality - needed if ConstraintType == cardinality)
+
+  /**
+   * Parser to convert a string representation of a placement spec to mapping
+   * from source tag to Placement Constraint.
+   *
+   * @param specs Placement spec.
+   * @return Mapping from source tag to placement constraint.
+   */
+  public static Map<String, PlacementSpec> parse(String specs) {
+    LOG.info("Parsing Placement Specs: [{}]", specs);
+    Scanner s = new Scanner(specs).useDelimiter(SPEC_DELIM);
+    Map<String, PlacementSpec> pSpecs = new HashMap<>();
+    while (s.hasNext()) {
+      String sp = s.next();
+      LOG.info("Parsing Spec: [{}]", sp);
+      String[] specSplit = sp.split(KV_SPLIT_DELIM);
+      String sourceTag = specSplit[0];
+      Scanner ps = new Scanner(specSplit[1]).useDelimiter(SPEC_VAL_DELIM);
+      int numContainers = ps.nextInt();
+      if (!ps.hasNext()) {
+        pSpecs.put(sourceTag,
+            new PlacementSpec(sourceTag, numContainers, null));
+        LOG.info("Creating Spec without constraint {}: num[{}]",
+            sourceTag, numContainers);
+        continue;
+      }
+      String cType = ps.next().toLowerCase();
+      String scope = ps.next().toLowerCase();
+
+      String targetTag = ps.next();
+      scope = scope.equals("rack") ? PlacementConstraints.RACK :
+          PlacementConstraints.NODE;
+
+      PlacementConstraint pc;
+      if (cType.equals(IN)) {
+        pc = PlacementConstraints.build(
+            PlacementConstraints.targetIn(scope,
+                PlacementConstraints.PlacementTargets.allocationTag(
+                    targetTag)));
+        LOG.info("Creating IN Constraint for source tag [{}], num[{}]: " +
+                "scope[{}], target[{}]",
+            sourceTag, numContainers, scope, targetTag);
+      } else if (cType.equals(NOT_IN)) {
+        pc = PlacementConstraints.build(
+            PlacementConstraints.targetNotIn(scope,
+                PlacementConstraints.PlacementTargets.allocationTag(
+                    targetTag)));
+        LOG.info("Creating NOT_IN Constraint for source tag [{}], num[{}]: " +
+                "scope[{}], target[{}]",
+            sourceTag, numContainers, scope, targetTag);
+      } else if (cType.equals(CARDINALITY)) {
+        int minCard = ps.nextInt();
+        int maxCard = ps.nextInt();
+        pc = PlacementConstraints.build(
+            PlacementConstraints.targetCardinality(scope, minCard, maxCard,
+                PlacementConstraints.PlacementTargets.allocationTag(
+                    targetTag)));
+        LOG.info("Creating CARDINALITY Constraint source tag [{}], num[{}]: " +
+                "scope[{}], min[{}], max[{}], target[{}]",
+            sourceTag, numContainers, scope, minCard, maxCard, targetTag);
+      } else {
+        throw new RuntimeException(
+            "Could not parse constraintType [" + cType + "]" +
+                " in [" + specSplit[1] + "]");
+      }
+      pSpecs.put(sourceTag, new PlacementSpec(sourceTag, numContainers, pc));
+    }
+    return pSpecs;
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[13/32] hadoop git commit: YARN-7653. Node group support for AllocationTagsManager. (Panagiotis Garefalakis via asuresh)

Posted by as...@apache.org.
YARN-7653. Node group support for AllocationTagsManager. (Panagiotis Garefalakis via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/37f1a7b6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/37f1a7b6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/37f1a7b6

Branch: refs/heads/trunk
Commit: 37f1a7b64fcc93191367330cd59d4d71d7b29ac7
Parents: 06eb63e
Author: Arun Suresh <as...@apache.org>
Authored: Fri Dec 22 07:24:37 2017 -0800
Committer: Arun Suresh <as...@apache.org>
Committed: Wed Jan 31 01:30:17 2018 -0800

----------------------------------------------------------------------
 .../server/resourcemanager/ResourceManager.java |   2 +-
 .../constraint/AllocationTagsManager.java       | 282 ++++++++++++++-----
 .../rmcontainer/TestRMContainerImpl.java        |   2 +-
 .../constraint/TestAllocationTagsManager.java   | 269 ++++++++++++------
 4 files changed, 392 insertions(+), 163 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/37f1a7b6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
index a1d3dfc..1d838f0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
@@ -496,7 +496,7 @@ public class ResourceManager extends CompositeService implements Recoverable {
   }
 
   protected AllocationTagsManager createAllocationTagsManager() {
-    return new AllocationTagsManager();
+    return new AllocationTagsManager(this.rmContext);
   }
   
   protected DelegationTokenRenewer createDelegationTokenRenewer() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/37f1a7b6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
index c278606..7b0b959 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.SchedulingRequest;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.log4j.Logger;
 
 import java.util.HashMap;
@@ -38,9 +39,8 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
 import java.util.function.LongBinaryOperator;
 
 /**
- * Support storing maps between container-tags/applications and
- * nodes. This will be required by affinity/anti-affinity implementation and
- * cardinality.
+ * In-memory mapping between applications/container-tags and nodes/racks.
+ * Required by constrained affinity/anti-affinity and cardinality placement.
  */
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
@@ -51,48 +51,54 @@ public class AllocationTagsManager {
 
   private ReentrantReadWriteLock.ReadLock readLock;
   private ReentrantReadWriteLock.WriteLock writeLock;
+  private final RMContext rmContext;
 
-  // Application's tags to node
-  private Map<ApplicationId, NodeToCountedTags> perAppMappings =
+  // Application's tags to Node
+  private Map<ApplicationId, NodeToCountedTags> perAppNodeMappings =
+      new HashMap<>();
+  // Application's tags to Rack
+  private Map<ApplicationId, NodeToCountedTags> perAppRackMappings =
       new HashMap<>();
 
   // Global tags to node mapping (used to fast return aggregated tags
   // cardinality across apps)
-  private NodeToCountedTags globalMapping = new NodeToCountedTags();
+  private NodeToCountedTags<NodeId> globalNodeMapping = new NodeToCountedTags();
+  // Global tags to Rack mapping
+  private NodeToCountedTags<String> globalRackMapping = new NodeToCountedTags();
 
   /**
-   * Store node to counted tags.
+   * Generic store mapping type <T> to counted tags.
+   * Currently used both for NodeId to Tag, Count and Rack to Tag, Count
    */
   @VisibleForTesting
-  static class NodeToCountedTags {
-    // Map<NodeId, Map<Tag, Count>>
-    private Map<NodeId, Map<String, Long>> nodeToTagsWithCount =
-        new HashMap<>();
+  static class NodeToCountedTags<T> {
+    // Map<Type, Map<Tag, Count>>
+    private Map<T, Map<String, Long>> typeToTagsWithCount = new HashMap<>();
 
     // protected by external locks
-    private void addTagsToNode(NodeId nodeId, Set<String> tags) {
-      Map<String, Long> innerMap = nodeToTagsWithCount.computeIfAbsent(nodeId,
-          k -> new HashMap<>());
+    private void addTags(T type, Set<String> tags) {
+      Map<String, Long> innerMap =
+          typeToTagsWithCount.computeIfAbsent(type, k -> new HashMap<>());
 
       for (String tag : tags) {
         Long count = innerMap.get(tag);
         if (count == null) {
           innerMap.put(tag, 1L);
-        } else{
+        } else {
           innerMap.put(tag, count + 1);
         }
       }
     }
 
     // protected by external locks
-    private void addTagToNode(NodeId nodeId, String tag) {
-      Map<String, Long> innerMap = nodeToTagsWithCount.computeIfAbsent(nodeId,
-          k -> new HashMap<>());
+    private void addTag(T type, String tag) {
+      Map<String, Long> innerMap =
+          typeToTagsWithCount.computeIfAbsent(type, k -> new HashMap<>());
 
       Long count = innerMap.get(tag);
       if (count == null) {
         innerMap.put(tag, 1L);
-      } else{
+      } else {
         innerMap.put(tag, count + 1);
       }
     }
@@ -104,17 +110,17 @@ public class AllocationTagsManager {
       } else {
         if (count <= 0) {
           LOG.warn(
-              "Trying to remove tags from node, however the count already"
+              "Trying to remove tags from node/rack, however the count already"
                   + " becomes 0 or less, it could be a potential bug.");
         }
         innerMap.remove(tag);
       }
     }
 
-    private void removeTagsFromNode(NodeId nodeId, Set<String> tags) {
-      Map<String, Long> innerMap = nodeToTagsWithCount.get(nodeId);
+    private void removeTags(T type, Set<String> tags) {
+      Map<String, Long> innerMap = typeToTagsWithCount.get(type);
       if (innerMap == null) {
-        LOG.warn("Failed to find node=" + nodeId
+        LOG.warn("Failed to find node/rack=" + type
             + " while trying to remove tags, please double check.");
         return;
       }
@@ -124,14 +130,14 @@ public class AllocationTagsManager {
       }
 
       if (innerMap.isEmpty()) {
-        nodeToTagsWithCount.remove(nodeId);
+        typeToTagsWithCount.remove(type);
       }
     }
 
-    private void removeTagFromNode(NodeId nodeId, String tag) {
-      Map<String, Long> innerMap = nodeToTagsWithCount.get(nodeId);
+    private void removeTag(T type, String tag) {
+      Map<String, Long> innerMap = typeToTagsWithCount.get(type);
       if (innerMap == null) {
-        LOG.warn("Failed to find node=" + nodeId
+        LOG.warn("Failed to find node/rack=" + type
             + " while trying to remove tags, please double check.");
         return;
       }
@@ -139,12 +145,12 @@ public class AllocationTagsManager {
       removeTagFromInnerMap(innerMap, tag);
 
       if (innerMap.isEmpty()) {
-        nodeToTagsWithCount.remove(nodeId);
+        typeToTagsWithCount.remove(type);
       }
     }
 
-    private long getCardinality(NodeId nodeId, String tag) {
-      Map<String, Long> innerMap = nodeToTagsWithCount.get(nodeId);
+    private long getCardinality(T type, String tag) {
+      Map<String, Long> innerMap = typeToTagsWithCount.get(type);
       if (innerMap == null) {
         return 0;
       }
@@ -152,9 +158,9 @@ public class AllocationTagsManager {
       return value == null ? 0 : value;
     }
 
-    private long getCardinality(NodeId nodeId, Set<String> tags,
+    private long getCardinality(T type, Set<String> tags,
         LongBinaryOperator op) {
-      Map<String, Long> innerMap = nodeToTagsWithCount.get(nodeId);
+      Map<String, Long> innerMap = typeToTagsWithCount.get(type);
       if (innerMap == null) {
         return 0;
       }
@@ -193,29 +199,40 @@ public class AllocationTagsManager {
     }
 
     private boolean isEmpty() {
-      return nodeToTagsWithCount.isEmpty();
+      return typeToTagsWithCount.isEmpty();
     }
 
     @VisibleForTesting
-    public Map<NodeId, Map<String, Long>> getNodeToTagsWithCount() {
-      return nodeToTagsWithCount;
+    public Map<T, Map<String, Long>> getTypeToTagsWithCount() {
+      return typeToTagsWithCount;
     }
   }
 
   @VisibleForTesting
-  Map<ApplicationId, NodeToCountedTags> getPerAppMappings() {
-    return perAppMappings;
+  Map<ApplicationId, NodeToCountedTags> getPerAppNodeMappings() {
+    return perAppNodeMappings;
+  }
+
+  @VisibleForTesting
+  Map<ApplicationId, NodeToCountedTags> getPerAppRackMappings() {
+    return perAppRackMappings;
+  }
+
+  @VisibleForTesting
+  NodeToCountedTags getGlobalNodeMapping() {
+    return globalNodeMapping;
   }
 
   @VisibleForTesting
-  NodeToCountedTags getGlobalMapping() {
-    return globalMapping;
+  NodeToCountedTags getGlobalRackMapping() {
+    return globalRackMapping;
   }
 
-  public AllocationTagsManager() {
+  public AllocationTagsManager(RMContext context) {
     ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
     readLock = lock.readLock();
     writeLock = lock.writeLock();
+    rmContext = context;
   }
 
   /**
@@ -243,21 +260,30 @@ public class AllocationTagsManager {
 
     writeLock.lock();
     try {
-      NodeToCountedTags perAppTagsMapping = perAppMappings.computeIfAbsent(
-          applicationId, k -> new NodeToCountedTags());
-
+      NodeToCountedTags perAppTagsMapping = perAppNodeMappings
+          .computeIfAbsent(applicationId, k -> new NodeToCountedTags());
+      NodeToCountedTags perAppRackTagsMapping = perAppRackMappings
+          .computeIfAbsent(applicationId, k -> new NodeToCountedTags());
+      // Covering test-cases where context is mocked
+      String nodeRack = (rmContext.getRMNodes() != null
+          && rmContext.getRMNodes().get(nodeId) != null)
+              ? rmContext.getRMNodes().get(nodeId).getRackName()
+              : "default-rack";
       if (useSet) {
-        perAppTagsMapping.addTagsToNode(nodeId, allocationTags);
-        globalMapping.addTagsToNode(nodeId, allocationTags);
+        perAppTagsMapping.addTags(nodeId, allocationTags);
+        perAppRackTagsMapping.addTags(nodeRack, allocationTags);
+        globalNodeMapping.addTags(nodeId, allocationTags);
+        globalRackMapping.addTags(nodeRack, allocationTags);
       } else {
-        perAppTagsMapping.addTagToNode(nodeId, applicationIdTag);
-        globalMapping.addTagToNode(nodeId, applicationIdTag);
+        perAppTagsMapping.addTag(nodeId, applicationIdTag);
+        perAppRackTagsMapping.addTag(nodeRack, applicationIdTag);
+        globalNodeMapping.addTag(nodeId, applicationIdTag);
+        globalRackMapping.addTag(nodeRack, applicationIdTag);
       }
 
       if (LOG.isDebugEnabled()) {
-        LOG.debug(
-            "Added container=" + containerId + " with tags=[" + StringUtils
-                .join(allocationTags, ",") + "]");
+        LOG.debug("Added container=" + containerId + " with tags=["
+            + StringUtils.join(allocationTags, ",") + "]");
       }
     } finally {
       writeLock.unlock();
@@ -287,27 +313,40 @@ public class AllocationTagsManager {
 
     writeLock.lock();
     try {
-      NodeToCountedTags perAppTagsMapping = perAppMappings.get(applicationId);
+      NodeToCountedTags perAppTagsMapping =
+          perAppNodeMappings.get(applicationId);
+      NodeToCountedTags perAppRackTagsMapping =
+          perAppRackMappings.get(applicationId);
       if (perAppTagsMapping == null) {
         return;
       }
-
+      // Covering test-cases where context is mocked
+      String nodeRack = (rmContext.getRMNodes() != null
+          && rmContext.getRMNodes().get(nodeId) != null)
+              ? rmContext.getRMNodes().get(nodeId).getRackName()
+              : "default-rack";
       if (useSet) {
-        perAppTagsMapping.removeTagsFromNode(nodeId, allocationTags);
-        globalMapping.removeTagsFromNode(nodeId, allocationTags);
+        perAppTagsMapping.removeTags(nodeId, allocationTags);
+        perAppRackTagsMapping.removeTags(nodeRack, allocationTags);
+        globalNodeMapping.removeTags(nodeId, allocationTags);
+        globalRackMapping.removeTags(nodeRack, allocationTags);
       } else {
-        perAppTagsMapping.removeTagFromNode(nodeId, applicationIdTag);
-        globalMapping.removeTagFromNode(nodeId, applicationIdTag);
+        perAppTagsMapping.removeTag(nodeId, applicationIdTag);
+        perAppRackTagsMapping.removeTag(nodeRack, applicationIdTag);
+        globalNodeMapping.removeTag(nodeId, applicationIdTag);
+        globalRackMapping.removeTag(nodeRack, applicationIdTag);
       }
 
       if (perAppTagsMapping.isEmpty()) {
-        perAppMappings.remove(applicationId);
+        perAppNodeMappings.remove(applicationId);
+      }
+      if (perAppRackTagsMapping.isEmpty()) {
+        perAppRackMappings.remove(applicationId);
       }
 
       if (LOG.isDebugEnabled()) {
-        LOG.debug(
-            "Removed container=" + containerId + " with tags=[" + StringUtils
-                .join(allocationTags, ",") + "]");
+        LOG.debug("Removed container=" + containerId + " with tags=["
+            + StringUtils.join(allocationTags, ",") + "]");
       }
     } finally {
       writeLock.unlock();
@@ -315,18 +354,16 @@ public class AllocationTagsManager {
   }
 
   /**
-   * Get cardinality for following conditions. External can pass-in a binary op
-   * to implement customized logic.   *
+   * Get Node cardinality for a specific tag.
+   * When applicationId is null, method returns aggregated cardinality
+   *
    * @param nodeId        nodeId, required.
    * @param applicationId applicationId. When null is specified, return
    *                      aggregated cardinality among all nodes.
    * @param tag           allocation tag, see
    *                      {@link SchedulingRequest#getAllocationTags()},
-   *                      When multiple tags specified. Returns cardinality
-   *                      depends on op. If a specified tag doesn't exist,
-   *                      0 will be its cardinality.
-   *                      When null/empty tags specified, all tags
-   *                      (of the node/app) will be considered.
+   *                      If a specified tag doesn't exist,
+   *                      method returns 0.
    * @return cardinality of specified query on the node.
    * @throws InvalidAllocationTagsQueryException when illegal query
    *                                            parameter specified
@@ -338,14 +375,14 @@ public class AllocationTagsManager {
     try {
       if (nodeId == null) {
         throw new InvalidAllocationTagsQueryException(
-            "Must specify nodeId/tags/op to query cardinality");
+            "Must specify nodeId/tag to query cardinality");
       }
 
       NodeToCountedTags mapping;
       if (applicationId != null) {
-        mapping = perAppMappings.get(applicationId);
-      } else{
-        mapping = globalMapping;
+        mapping = perAppNodeMappings.get(applicationId);
+      } else {
+        mapping = globalNodeMapping;
       }
 
       if (mapping == null) {
@@ -359,11 +396,54 @@ public class AllocationTagsManager {
   }
 
   /**
+   * Get Rack cardinality for a specific tag.
+   *
+   * @param rack          rack, required.
+   * @param applicationId applicationId. When null is specified, return
+   *                      aggregated cardinality among all nodes.
+   * @param tag           allocation tag, see
+   *                      {@link SchedulingRequest#getAllocationTags()},
+   *                      If a specified tag doesn't exist,
+   *                      method returns 0.
+   * @return cardinality of specified query on the rack.
+   * @throws InvalidAllocationTagsQueryException when illegal query
+   *                                            parameter specified
+   */
+  public long getRackCardinality(String rack, ApplicationId applicationId,
+      String tag) throws InvalidAllocationTagsQueryException {
+    readLock.lock();
+
+    try {
+      if (rack == null) {
+        throw new InvalidAllocationTagsQueryException(
+            "Must specify rack/tag to query cardinality");
+      }
+
+      NodeToCountedTags mapping;
+      if (applicationId != null) {
+        mapping = perAppRackMappings.get(applicationId);
+      } else {
+        mapping = globalRackMapping;
+      }
+
+      if (mapping == null) {
+        return 0;
+      }
+
+      return mapping.getCardinality(rack, tag);
+    } finally {
+      readLock.unlock();
+    }
+  }
+
+
+
+  /**
    * Check if given tag exists on node.
    *
    * @param nodeId        nodeId, required.
    * @param applicationId applicationId. When null is specified, return
-   *                      aggregated cardinality among all nodes.
+   *                      aggregation among all applications.
    * @param tag           allocation tag, see
    *                      {@link SchedulingRequest#getAllocationTags()},
    *                      When multiple tags specified. Returns cardinality
@@ -387,7 +467,7 @@ public class AllocationTagsManager {
    *
    * @param nodeId        nodeId, required.
    * @param applicationId applicationId. When null is specified, return
-   *                      aggregated cardinality among all nodes.
+   *                      aggregated cardinality among all applications.
    * @param tags          allocation tags, see
    *                      {@link SchedulingRequest#getAllocationTags()},
    *                      When multiple tags specified. Returns cardinality
@@ -396,7 +476,7 @@ public class AllocationTagsManager {
    *                      specified, all tags (of the node/app) will be
    *                      considered.
    * @param op            operator. Such as Long::max, Long::sum, etc. Required.
-   *                      This sparameter only take effect when #values >= 2.
+   *                      This parameter only take effect when #values >= 2.
    * @return cardinality of specified query on the node.
    * @throws InvalidAllocationTagsQueryException when illegal query
    *                                            parameter specified
@@ -414,9 +494,9 @@ public class AllocationTagsManager {
 
       NodeToCountedTags mapping;
       if (applicationId != null) {
-        mapping = perAppMappings.get(applicationId);
-      } else{
-        mapping = globalMapping;
+        mapping = perAppNodeMappings.get(applicationId);
+      } else {
+        mapping = globalNodeMapping;
       }
 
       if (mapping == null) {
@@ -428,4 +508,52 @@ public class AllocationTagsManager {
       readLock.unlock();
     }
   }
+
+  /**
+   * Get cardinality for following conditions. External can pass-in a binary op
+   * to implement customized logic.
+   *
+   * @param rack          rack, required.
+   * @param applicationId applicationId. When null is specified, return
+   *                      aggregated cardinality among all applications.
+   * @param tags          allocation tags, see
+   *                      {@link SchedulingRequest#getAllocationTags()},
+   *                      When multiple tags specified. Returns cardinality
+   *                      depends on op. If a specified tag doesn't exist, 0
+   *                      will be its cardinality. When null/empty tags
+   *                      specified, all tags (of the rack/app) will be
+   *                      considered.
+   * @param op            operator. Such as Long::max, Long::sum, etc. Required.
+   *                      This parameter only take effect when #values >= 2.
+   * @return cardinality of specified query on the rack.
+   * @throws InvalidAllocationTagsQueryException when illegal query
+   *                                            parameter specified
+   */
+  public long getRackCardinalityByOp(String rack, ApplicationId applicationId,
+      Set<String> tags, LongBinaryOperator op)
+      throws InvalidAllocationTagsQueryException {
+    readLock.lock();
+
+    try {
+      if (rack == null || op == null) {
+        throw new InvalidAllocationTagsQueryException(
+            "Must specify rack/tags/op to query cardinality");
+      }
+
+      NodeToCountedTags mapping;
+      if (applicationId != null) {
+        mapping = perAppRackMappings.get(applicationId);
+      } else {
+        mapping = globalRackMapping;
+      }
+
+      if (mapping == null) {
+        return 0;
+      }
+
+      return mapping.getCardinality(rack, tags, op);
+    } finally {
+      readLock.unlock();
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/37f1a7b6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java
index 538d128..b927870 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java
@@ -405,8 +405,8 @@ public class TestRMContainerImpl {
 
     RMApplicationHistoryWriter writer = mock(RMApplicationHistoryWriter.class);
     SystemMetricsPublisher publisher = mock(SystemMetricsPublisher.class);
-    AllocationTagsManager tagsManager = new AllocationTagsManager();
     RMContext rmContext = mock(RMContext.class);
+    AllocationTagsManager tagsManager = new AllocationTagsManager(rmContext);
     when(rmContext.getDispatcher()).thenReturn(drainDispatcher);
     when(rmContext.getContainerAllocationExpirer()).thenReturn(expirer);
     when(rmContext.getRMApplicationHistoryWriter()).thenReturn(writer);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/37f1a7b6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestAllocationTagsManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestAllocationTagsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestAllocationTagsManager.java
index 4bb2a18..0ce1614 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestAllocationTagsManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestAllocationTagsManager.java
@@ -20,202 +20,300 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint;
 
-import com.google.common.collect.ImmutableSet;
+import java.util.List;
+
 import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.server.resourcemanager.MockNodes;
+import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.TestUtils;
 import org.junit.Assert;
+import org.junit.Before;
 import org.junit.Test;
 
+import com.google.common.collect.ImmutableSet;
+
 /**
  * Test functionality of AllocationTagsManager.
  */
 public class TestAllocationTagsManager {
+  private RMContext rmContext;
+
+  @Before
+  public void setup() {
+    MockRM rm = new MockRM();
+    rm.start();
+    MockNodes.resetHostIds();
+    List<RMNode> rmNodes =
+        MockNodes.newNodes(2, 4, Resource.newInstance(4096, 4));
+    for (RMNode rmNode : rmNodes) {
+      rm.getRMContext().getRMNodes().putIfAbsent(rmNode.getNodeID(), rmNode);
+    }
+    rmContext = rm.getRMContext();
+  }
+
+
   @Test
   public void testAllocationTagsManagerSimpleCases()
       throws InvalidAllocationTagsQueryException {
-    AllocationTagsManager atm = new AllocationTagsManager();
+
+    AllocationTagsManager atm = new AllocationTagsManager(rmContext);
 
     /**
      * Construct test case:
-     * Node1:
+     * Node1 (rack0):
      *    container_1_1 (mapper/reducer/app_1)
      *    container_1_3 (service/app_1)
      *
-     * Node2:
+     * Node2 (rack0):
      *    container_1_2 (mapper/reducer/app_1)
      *    container_1_4 (reducer/app_1)
      *    container_2_1 (service/app_2)
      */
 
     // 3 Containers from app1
-    atm.addContainer(NodeId.fromString("node1:1234"),
+    atm.addContainer(NodeId.fromString("host1:123"),
         TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 1),
         ImmutableSet.of("mapper", "reducer"));
 
-    atm.addContainer(NodeId.fromString("node2:1234"),
+    atm.addContainer(NodeId.fromString("host2:123"),
         TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 2),
         ImmutableSet.of("mapper", "reducer"));
 
-    atm.addContainer(NodeId.fromString("node1:1234"),
+    atm.addContainer(NodeId.fromString("host1:123"),
         TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 3),
         ImmutableSet.of("service"));
 
-    atm.addContainer(NodeId.fromString("node2:1234"),
+    atm.addContainer(NodeId.fromString("host2:123"),
         TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 4),
         ImmutableSet.of("reducer"));
 
     // 1 Container from app2
-    atm.addContainer(NodeId.fromString("node2:1234"),
+    atm.addContainer(NodeId.fromString("host2:123"),
         TestUtils.getMockApplicationId(2), TestUtils.getMockContainerId(2, 3),
         ImmutableSet.of("service"));
 
-    // Get Cardinality of app1 on node1, with tag "mapper"
+    // Get Node Cardinality of app1 on node1, with tag "mapper"
     Assert.assertEquals(1,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node1:1234"),
+        atm.getNodeCardinalityByOp(NodeId.fromString("host1:123"),
             TestUtils.getMockApplicationId(1), ImmutableSet.of("mapper"),
             Long::max));
 
-    // Get Cardinality of app1 on node2, with tag "mapper/reducer", op=min
+    // Get Rack Cardinality of app1 on rack0, with tag "mapper"
+    Assert.assertEquals(2, atm.getRackCardinality("rack0",
+        TestUtils.getMockApplicationId(1), "mapper"));
+
+    // Get Node Cardinality of app1 on node2, with tag "mapper/reducer", op=min
     Assert.assertEquals(1,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+        atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"),
             TestUtils.getMockApplicationId(1),
             ImmutableSet.of("mapper", "reducer"), Long::min));
 
-    // Get Cardinality of app1 on node2, with tag "mapper/reducer", op=max
+    // Get Node Cardinality of app1 on node2, with tag "mapper/reducer", op=max
     Assert.assertEquals(2,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+        atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"),
             TestUtils.getMockApplicationId(1),
             ImmutableSet.of("mapper", "reducer"), Long::max));
 
-    // Get Cardinality of app1 on node2, with tag "mapper/reducer", op=sum
+    // Get Node Cardinality of app1 on node2, with tag "mapper/reducer", op=sum
     Assert.assertEquals(3,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+        atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"),
             TestUtils.getMockApplicationId(1),
             ImmutableSet.of("mapper", "reducer"), Long::sum));
 
-    // Get Cardinality by passing single tag.
+    // Get Node Cardinality by passing single tag.
     Assert.assertEquals(1,
-        atm.getNodeCardinality(NodeId.fromString("node2:1234"),
+        atm.getNodeCardinality(NodeId.fromString("host2:123"),
             TestUtils.getMockApplicationId(1), "mapper"));
 
     Assert.assertEquals(2,
-        atm.getNodeCardinality(NodeId.fromString("node2:1234"),
+        atm.getNodeCardinality(NodeId.fromString("host2:123"),
             TestUtils.getMockApplicationId(1), "reducer"));
 
-    // Get Cardinality of app1 on node2, with tag "no_existed/reducer", op=min
+    // Get Node Cardinality of app1 on node2, with tag "no_existed/reducer",
+    // op=min
     Assert.assertEquals(0,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+        atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"),
             TestUtils.getMockApplicationId(1),
             ImmutableSet.of("no_existed", "reducer"), Long::min));
 
-    // Get Cardinality of app1 on node2, with tag "<applicationId>", op=max
+    // Get Node Cardinality of app1 on node2, with tag "<applicationId>", op=max
     // (Expect this returns #containers from app1 on node2)
+    Assert
+        .assertEquals(2,
+            atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"),
+                TestUtils.getMockApplicationId(1),
+                ImmutableSet.of(AllocationTagsNamespaces.APP_ID
+                    + TestUtils.getMockApplicationId(1).toString()),
+                Long::max));
+
+    // Get Node Cardinality of app1 on node2, with empty tag set, op=max
     Assert.assertEquals(2,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
-            TestUtils.getMockApplicationId(1), ImmutableSet
-                .of(AllocationTagsNamespaces.APP_ID + TestUtils
-                    .getMockApplicationId(1).toString()), Long::max));
-
-    // Get Cardinality of app1 on node2, with empty tag set, op=max
-    Assert.assertEquals(2,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+        atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"),
             TestUtils.getMockApplicationId(1), ImmutableSet.of(), Long::max));
 
-    // Get Cardinality of all apps on node2, with empty tag set, op=sum
-    Assert.assertEquals(7,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"), null,
-            ImmutableSet.of(), Long::sum));
+    // Get Node Cardinality of all apps on node2, with empty tag set, op=sum
+    Assert.assertEquals(7, atm.getNodeCardinalityByOp(
+        NodeId.fromString("host2:123"), null, ImmutableSet.of(), Long::sum));
 
-    // Get Cardinality of app_1 on node2, with empty tag set, op=sum
+    // Get Node Cardinality of app_1 on node2, with empty tag set, op=sum
     Assert.assertEquals(5,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+        atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"),
             TestUtils.getMockApplicationId(1), ImmutableSet.of(), Long::sum));
 
-    // Get Cardinality of app_1 on node2, with empty tag set, op=sum
+    // Get Node Cardinality of app_1 on node2, with empty tag set, op=sum
     Assert.assertEquals(2,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+        atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"),
             TestUtils.getMockApplicationId(2), ImmutableSet.of(), Long::sum));
 
     // Finish all containers:
-    atm.removeContainer(NodeId.fromString("node1:1234"),
+    atm.removeContainer(NodeId.fromString("host1:123"),
         TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 1),
         ImmutableSet.of("mapper", "reducer"));
 
-    atm.removeContainer(NodeId.fromString("node2:1234"),
+    atm.removeContainer(NodeId.fromString("host2:123"),
         TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 2),
         ImmutableSet.of("mapper", "reducer"));
 
-    atm.removeContainer(NodeId.fromString("node1:1234"),
+    atm.removeContainer(NodeId.fromString("host1:123"),
         TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 3),
         ImmutableSet.of("service"));
 
-    atm.removeContainer(NodeId.fromString("node2:1234"),
+    atm.removeContainer(NodeId.fromString("host2:123"),
         TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 4),
         ImmutableSet.of("reducer"));
 
-    atm.removeContainer(NodeId.fromString("node2:1234"),
+    atm.removeContainer(NodeId.fromString("host2:123"),
         TestUtils.getMockApplicationId(2), TestUtils.getMockContainerId(2, 3),
         ImmutableSet.of("service"));
 
     // Expect all cardinality to be 0
     // Get Cardinality of app1 on node1, with tag "mapper"
     Assert.assertEquals(0,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node1:1234"),
+        atm.getNodeCardinalityByOp(NodeId.fromString("host1:123"),
             TestUtils.getMockApplicationId(1), ImmutableSet.of("mapper"),
             Long::max));
 
-    // Get Cardinality of app1 on node2, with tag "mapper/reducer", op=min
+    // Get Node Cardinality of app1 on node2, with tag "mapper/reducer", op=min
     Assert.assertEquals(0,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+        atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"),
             TestUtils.getMockApplicationId(1),
             ImmutableSet.of("mapper", "reducer"), Long::min));
 
-    // Get Cardinality of app1 on node2, with tag "mapper/reducer", op=max
+    // Get Node Cardinality of app1 on node2, with tag "mapper/reducer", op=max
     Assert.assertEquals(0,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+        atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"),
             TestUtils.getMockApplicationId(1),
             ImmutableSet.of("mapper", "reducer"), Long::max));
 
-    // Get Cardinality of app1 on node2, with tag "mapper/reducer", op=sum
+    // Get Node Cardinality of app1 on node2, with tag "mapper/reducer", op=sum
     Assert.assertEquals(0,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+        atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"),
             TestUtils.getMockApplicationId(1),
             ImmutableSet.of("mapper", "reducer"), Long::sum));
 
-    // Get Cardinality of app1 on node2, with tag "<applicationId>", op=max
+    // Get Node Cardinality of app1 on node2, with tag "<applicationId>", op=max
     // (Expect this returns #containers from app1 on node2)
     Assert.assertEquals(0,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+        atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"),
             TestUtils.getMockApplicationId(1),
             ImmutableSet.of(TestUtils.getMockApplicationId(1).toString()),
             Long::max));
 
     Assert.assertEquals(0,
-        atm.getNodeCardinality(NodeId.fromString("node2:1234"),
+        atm.getNodeCardinality(NodeId.fromString("host2:123"),
             TestUtils.getMockApplicationId(1),
             TestUtils.getMockApplicationId(1).toString()));
 
-    // Get Cardinality of app1 on node2, with empty tag set, op=max
+    // Get Node Cardinality of app1 on node2, with empty tag set, op=max
     Assert.assertEquals(0,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+        atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"),
             TestUtils.getMockApplicationId(1), ImmutableSet.of(), Long::max));
 
-    // Get Cardinality of all apps on node2, with empty tag set, op=sum
-    Assert.assertEquals(0,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"), null,
-            ImmutableSet.of(), Long::sum));
+    // Get Node Cardinality of all apps on node2, with empty tag set, op=sum
+    Assert.assertEquals(0, atm.getNodeCardinalityByOp(
+        NodeId.fromString("host2:123"), null, ImmutableSet.of(), Long::sum));
 
-    // Get Cardinality of app_1 on node2, with empty tag set, op=sum
+    // Get Node Cardinality of app_1 on node2, with empty tag set, op=sum
     Assert.assertEquals(0,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+        atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"),
             TestUtils.getMockApplicationId(1), ImmutableSet.of(), Long::sum));
 
-    // Get Cardinality of app_1 on node2, with empty tag set, op=sum
+    // Get Node Cardinality of app_2 on node2, with empty tag set, op=sum
     Assert.assertEquals(0,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+        atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"),
             TestUtils.getMockApplicationId(2), ImmutableSet.of(), Long::sum));
   }
 
+
+  @Test
+  public void testAllocationTagsManagerRackMapping()
+      throws InvalidAllocationTagsQueryException {
+
+    AllocationTagsManager atm = new AllocationTagsManager(rmContext);
+
+    /**
+     * Construct Rack test case:
+     * Node1 (rack0):
+     *    container_1_1 (mapper/reducer/app_1)
+     *    container_1_4 (reducer/app_2)
+     *
+     * Node2 (rack0):
+     *    container_1_2 (mapper/reducer/app_2)
+     *    container_1_3 (service/app_1)
+     *
+     * Node5 (rack1):
+     *    container_2_1 (service/app_2)
+     */
+
+    // 3 Containers from app1
+    atm.addContainer(NodeId.fromString("host1:123"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 1),
+        ImmutableSet.of("mapper", "reducer"));
+
+    atm.addContainer(NodeId.fromString("host2:123"),
+        TestUtils.getMockApplicationId(2), TestUtils.getMockContainerId(2, 2),
+        ImmutableSet.of("mapper", "reducer"));
+
+    atm.addContainer(NodeId.fromString("host1:123"),
+        TestUtils.getMockApplicationId(2), TestUtils.getMockContainerId(2, 4),
+        ImmutableSet.of("reducer"));
+
+    atm.addContainer(NodeId.fromString("host2:123"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 3),
+        ImmutableSet.of("service"));
+
+    // 1 Container from app2
+    atm.addContainer(NodeId.fromString("host2:123"),
+        TestUtils.getMockApplicationId(2), TestUtils.getMockContainerId(2, 3),
+        ImmutableSet.of("service"));
+
+    // Get Rack Cardinality of app1 on rack0, with tag "mapper"
+    Assert.assertEquals(1, atm.getRackCardinality("rack0",
+        TestUtils.getMockApplicationId(1), "mapper"));
+
+    // Get Rack Cardinality of app2 on rack0, with tag "reducer"
+    Assert.assertEquals(2, atm.getRackCardinality("rack0",
+        TestUtils.getMockApplicationId(2), "reducer"));
+
+    // Get Rack Cardinality of all apps on rack0, with tag "reducer"
+    Assert.assertEquals(3, atm.getRackCardinality("rack0", null, "reducer"));
+
+    // Get Rack Cardinality of app_1 on rack0, with empty tag set, op=max
+    Assert.assertEquals(2, atm.getRackCardinalityByOp("rack0",
+        TestUtils.getMockApplicationId(1), ImmutableSet.of(), Long::max));
+
+    // Get Rack Cardinality of app_1 on rack0, with empty tag set, op=min
+    Assert.assertEquals(1, atm.getRackCardinalityByOp("rack0",
+        TestUtils.getMockApplicationId(1), ImmutableSet.of(), Long::min));
+
+    // Get Rack Cardinality of all apps on rack0, with empty tag set, op=min
+    Assert.assertEquals(3, atm.getRackCardinalityByOp("rack0", null,
+        ImmutableSet.of(), Long::max));
+  }
+
   @Test
   public void testAllocationTagsManagerMemoryAfterCleanup()
       throws InvalidAllocationTagsQueryException {
@@ -223,54 +321,57 @@ public class TestAllocationTagsManager {
      * Make sure YARN cleans up all memory once container/app finishes.
      */
 
-    AllocationTagsManager atm = new AllocationTagsManager();
+    AllocationTagsManager atm = new AllocationTagsManager(rmContext);
 
     // Add a bunch of containers
-    atm.addContainer(NodeId.fromString("node1:1234"),
+    atm.addContainer(NodeId.fromString("host1:123"),
         TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 1),
         ImmutableSet.of("mapper", "reducer"));
 
-    atm.addContainer(NodeId.fromString("node2:1234"),
+    atm.addContainer(NodeId.fromString("host2:123"),
         TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 2),
         ImmutableSet.of("mapper", "reducer"));
 
-    atm.addContainer(NodeId.fromString("node1:1234"),
+    atm.addContainer(NodeId.fromString("host1:123"),
         TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 3),
         ImmutableSet.of("service"));
 
-    atm.addContainer(NodeId.fromString("node2:1234"),
+    atm.addContainer(NodeId.fromString("host2:123"),
         TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 4),
         ImmutableSet.of("reducer"));
 
-    atm.addContainer(NodeId.fromString("node2:1234"),
+    atm.addContainer(NodeId.fromString("host2:123"),
         TestUtils.getMockApplicationId(2), TestUtils.getMockContainerId(2, 3),
         ImmutableSet.of("service"));
 
     // Remove all these containers
-    atm.removeContainer(NodeId.fromString("node1:1234"),
+    atm.removeContainer(NodeId.fromString("host1:123"),
         TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 1),
         ImmutableSet.of("mapper", "reducer"));
 
-    atm.removeContainer(NodeId.fromString("node2:1234"),
+    atm.removeContainer(NodeId.fromString("host2:123"),
         TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 2),
         ImmutableSet.of("mapper", "reducer"));
 
-    atm.removeContainer(NodeId.fromString("node1:1234"),
+    atm.removeContainer(NodeId.fromString("host1:123"),
         TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 3),
         ImmutableSet.of("service"));
 
-    atm.removeContainer(NodeId.fromString("node2:1234"),
+    atm.removeContainer(NodeId.fromString("host2:123"),
         TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 4),
         ImmutableSet.of("reducer"));
 
-    atm.removeContainer(NodeId.fromString("node2:1234"),
+    atm.removeContainer(NodeId.fromString("host2:123"),
         TestUtils.getMockApplicationId(2), TestUtils.getMockContainerId(2, 3),
         ImmutableSet.of("service"));
 
     // Check internal data structure
     Assert.assertEquals(0,
-        atm.getGlobalMapping().getNodeToTagsWithCount().size());
-    Assert.assertEquals(0, atm.getPerAppMappings().size());
+        atm.getGlobalNodeMapping().getTypeToTagsWithCount().size());
+    Assert.assertEquals(0, atm.getPerAppNodeMappings().size());
+    Assert.assertEquals(0,
+        atm.getGlobalRackMapping().getTypeToTagsWithCount().size());
+    Assert.assertEquals(0, atm.getPerAppRackMappings().size());
   }
 
   @Test
@@ -280,26 +381,26 @@ public class TestAllocationTagsManager {
      * Make sure YARN cleans up all memory once container/app finishes.
      */
 
-    AllocationTagsManager atm = new AllocationTagsManager();
+    AllocationTagsManager atm = new AllocationTagsManager(rmContext);
 
     // Add a bunch of containers
-    atm.addContainer(NodeId.fromString("node1:1234"),
+    atm.addContainer(NodeId.fromString("host1:123"),
         TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 1),
         ImmutableSet.of("mapper", "reducer"));
 
-    atm.addContainer(NodeId.fromString("node2:1234"),
+    atm.addContainer(NodeId.fromString("host2:123"),
         TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 2),
         ImmutableSet.of("mapper", "reducer"));
 
-    atm.addContainer(NodeId.fromString("node1:1234"),
+    atm.addContainer(NodeId.fromString("host1:123"),
         TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 3),
         ImmutableSet.of("service"));
 
-    atm.addContainer(NodeId.fromString("node2:1234"),
+    atm.addContainer(NodeId.fromString("host2:123"),
         TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 4),
         ImmutableSet.of("reducer"));
 
-    atm.addContainer(NodeId.fromString("node2:1234"),
+    atm.addContainer(NodeId.fromString("host2:123"),
         TestUtils.getMockApplicationId(2), TestUtils.getMockContainerId(2, 3),
         ImmutableSet.of("service"));
 
@@ -317,7 +418,7 @@ public class TestAllocationTagsManager {
     // No op
     caughtException = false;
     try {
-      atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+      atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"),
           TestUtils.getMockApplicationId(2), ImmutableSet.of("mapper"), null);
     } catch (InvalidAllocationTagsQueryException e) {
       caughtException = true;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[25/32] hadoop git commit: YARN-7774. Miscellaneous fixes to the PlacementProcessor. (asuresh)

Posted by as...@apache.org.
YARN-7774. Miscellaneous fixes to the PlacementProcessor. (asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/28fe7f33
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/28fe7f33
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/28fe7f33

Branch: refs/heads/trunk
Commit: 28fe7f331837b36e78fa34ed990993677dddeaee
Parents: e60f512
Author: Arun Suresh <as...@apache.org>
Authored: Thu Jan 18 11:01:36 2018 -0800
Committer: Arun Suresh <as...@apache.org>
Committed: Wed Jan 31 01:30:17 2018 -0800

----------------------------------------------------------------------
 .../scheduler/SchedulerNode.java                | 16 +++-
 .../scheduler/capacity/CapacityScheduler.java   |  4 +
 .../constraint/PlacementConstraintsUtil.java    |  5 +-
 .../constraint/algorithm/CircularIterator.java  | 86 ++++++++++++++++++++
 .../algorithm/DefaultPlacementAlgorithm.java    | 50 ++++++++++--
 .../constraint/processor/BatchedRequests.java   |  8 ++
 .../SingleConstraintAppPlacementAllocator.java  |  2 +-
 .../yarn/server/resourcemanager/MockAM.java     |  4 +-
 .../constraint/TestPlacementProcessor.java      | 24 +++---
 .../algorithm/TestCircularIterator.java         | 84 +++++++++++++++++++
 ...stSingleConstraintAppPlacementAllocator.java | 28 +++----
 11 files changed, 271 insertions(+), 40 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/28fe7f33/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java
index 89f748d..96a8e34 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java
@@ -469,6 +469,20 @@ public abstract class SchedulerNode {
     this.lastHeartbeatMonotonicTime = Time.monotonicNow();
   }
 
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (!(o instanceof SchedulerNode)) return false;
+
+    SchedulerNode that = (SchedulerNode) o;
+
+    return getNodeID().equals(that.getNodeID());
+  }
+
+  @Override
+  public int hashCode() {
+    return getNodeID().hashCode();
+  }
 
   private static class ContainerInfo {
     private final RMContainer container;
@@ -479,4 +493,4 @@ public abstract class SchedulerNode {
       this.launchedOnNode = launchedOnNode;
     }
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/28fe7f33/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index c713036..429f9f3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -2610,6 +2610,10 @@ public class CapacityScheduler extends
             " but only 1 will be attempted !!");
       }
       if (!appAttempt.isStopped()) {
+        Resource resource =
+            schedulingRequest.getResourceSizing().getResources();
+        schedulingRequest.getResourceSizing().setResources(
+            getNormalizedResource(resource));
         ResourceCommitRequest<FiCaSchedulerApp, FiCaSchedulerNode>
             resourceCommitRequest = createResourceCommitRequest(
             appAttempt, schedulingRequest, schedulerNode);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/28fe7f33/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java
index 24c5a5e..ff5cb67 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java
@@ -99,14 +99,11 @@ public final class PlacementConstraintsUtil {
             targetApplicationId, te.getTargetValues(), Long::min);
       }
     }
-    // Make sure Anti-affinity satisfies hard upper limit
-    maxScopeCardinality = desiredMaxCardinality == 0 ? maxScopeCardinality - 1
-        : maxScopeCardinality;
 
     return (desiredMinCardinality <= 0
         || minScopeCardinality >= desiredMinCardinality) && (
         desiredMaxCardinality == Integer.MAX_VALUE
-            || maxScopeCardinality < desiredMaxCardinality);
+            || maxScopeCardinality <= desiredMaxCardinality);
   }
 
   private static boolean canSatisfyNodePartitionConstraintExpresssion(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/28fe7f33/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/CircularIterator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/CircularIterator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/CircularIterator.java
new file mode 100644
index 0000000..bf9503b
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/CircularIterator.java
@@ -0,0 +1,86 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.algorithm;
+
+import java.util.Iterator;
+
+/**
+ * Iterator that can take current state of an existing iterator
+ * and circularly iterate to that point.
+ */
+class CircularIterator<T> {
+  private Iterator<T> iterator = null;
+  private final Iterable<T> iterable;
+
+  private T startElem = null;
+  private T nextElem = null;
+
+  // if not null, This overrides the starting Element.
+  private T firstElem = null;
+
+  // Can't handle empty or null lists.
+  CircularIterator(T first, Iterator<T> iter,
+      Iterable<T> iterable) {
+    this.firstElem = first;
+    this.iterable = iterable;
+    if (!iter.hasNext()) {
+      this.iterator = this.iterable.iterator();
+    } else {
+      this.iterator = iter;
+    }
+    this.startElem = this.iterator.next();
+    this.nextElem = this.startElem;
+  }
+
+  boolean hasNext() {
+    if (this.nextElem != null || this.firstElem != null) {
+      return true;
+    } else {
+      if (this.iterator.hasNext()) {
+        T next = this.iterator.next();
+        if (this.startElem.equals(next)) {
+          return false;
+        } else {
+          this.nextElem = next;
+          return true;
+        }
+      } else {
+        this.iterator = this.iterable.iterator();
+        this.nextElem = this.iterator.next();
+        if (this.startElem.equals(this.nextElem)) {
+          return false;
+        }
+        return true;
+      }
+    }
+  }
+
+  T next() {
+    T retVal;
+    if (this.firstElem != null) {
+      retVal = this.firstElem;
+      this.firstElem = null;
+    } else if (this.nextElem != null) {
+      retVal = this.nextElem;
+      this.nextElem = null;
+    } else {
+      retVal = this.iterator.next();
+    }
+    return retVal;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/28fe7f33/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/DefaultPlacementAlgorithm.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/DefaultPlacementAlgorithm.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/DefaultPlacementAlgorithm.java
index eb3fe88..a0749f5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/DefaultPlacementAlgorithm.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/DefaultPlacementAlgorithm.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.algorithm;
 
+import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
 
@@ -49,6 +50,9 @@ public class DefaultPlacementAlgorithm implements ConstraintPlacementAlgorithm {
   private static final Logger LOG =
       LoggerFactory.getLogger(DefaultPlacementAlgorithm.class);
 
+  // Number of times to re-attempt placing a single scheduling request.
+  private static final int RE_ATTEMPT_COUNT = 2;
+
   private AllocationTagsManager tagsManager;
   private PlacementConstraintManager constraintManager;
   private NodeCandidateSelector nodeSelector;
@@ -85,16 +89,50 @@ public class DefaultPlacementAlgorithm implements ConstraintPlacementAlgorithm {
         new ConstraintPlacementAlgorithmOutput(requests.getApplicationId());
     List<SchedulerNode> allNodes = nodeSelector.selectNodes(null);
 
+    List<SchedulingRequest> rejectedRequests = new ArrayList<>();
+    int rePlacementCount = RE_ATTEMPT_COUNT;
+    while (rePlacementCount > 0) {
+      doPlacement(requests, resp, allNodes, rejectedRequests);
+      if (rejectedRequests.size() == 0 || rePlacementCount == 1) {
+        break;
+      }
+      requests = new BatchedRequests(requests.getIteratorType(),
+          requests.getApplicationId(), rejectedRequests,
+          requests.getPlacementAttempt());
+      rejectedRequests = new ArrayList<>();
+      rePlacementCount--;
+    }
+
+    resp.getRejectedRequests().addAll(rejectedRequests);
+    collector.collect(resp);
+    // Clean current temp-container tags
+    this.tagsManager.cleanTempContainers(requests.getApplicationId());
+  }
+
+  private void doPlacement(BatchedRequests requests,
+      ConstraintPlacementAlgorithmOutput resp,
+      List<SchedulerNode> allNodes,
+      List<SchedulingRequest> rejectedRequests) {
     Iterator<SchedulingRequest> requestIterator = requests.iterator();
+    Iterator<SchedulerNode> nIter = allNodes.iterator();
+    SchedulerNode lastSatisfiedNode = null;
     while (requestIterator.hasNext()) {
+      if (allNodes.isEmpty()) {
+        LOG.warn("No nodes available for placement at the moment !!");
+        break;
+      }
       SchedulingRequest schedulingRequest = requestIterator.next();
-      Iterator<SchedulerNode> nodeIter = allNodes.iterator();
+      CircularIterator<SchedulerNode> nodeIter =
+          new CircularIterator(lastSatisfiedNode, nIter, allNodes);
       int numAllocs = schedulingRequest.getResourceSizing().getNumAllocations();
       while (nodeIter.hasNext() && numAllocs > 0) {
         SchedulerNode node = nodeIter.next();
         try {
-          if (attemptPlacementOnNode(requests.getApplicationId(),
-              schedulingRequest, node)) {
+          String tag = schedulingRequest.getAllocationTags() == null ? "" :
+              schedulingRequest.getAllocationTags().iterator().next();
+          if (!requests.getBlacklist(tag).contains(node.getNodeID()) &&
+              attemptPlacementOnNode(
+                  requests.getApplicationId(), schedulingRequest, node)) {
             schedulingRequest.getResourceSizing()
                 .setNumAllocations(--numAllocs);
             PlacedSchedulingRequest placedReq =
@@ -108,6 +146,7 @@ public class DefaultPlacementAlgorithm implements ConstraintPlacementAlgorithm {
             this.tagsManager.addTempContainer(node.getNodeID(),
                 requests.getApplicationId(),
                 schedulingRequest.getAllocationTags());
+            lastSatisfiedNode = node;
           }
         } catch (InvalidAllocationTagsQueryException e) {
           LOG.warn("Got exception from TagManager !", e);
@@ -117,9 +156,6 @@ public class DefaultPlacementAlgorithm implements ConstraintPlacementAlgorithm {
     // Add all requests whose numAllocations still > 0 to rejected list.
     requests.getSchedulingRequests().stream()
         .filter(sReq -> sReq.getResourceSizing().getNumAllocations() > 0)
-        .forEach(rejReq -> resp.getRejectedRequests().add(rejReq));
-    collector.collect(resp);
-    // Clean current temp-container tags
-    this.tagsManager.cleanTempContainers(requests.getApplicationId());
+        .forEach(rejReq -> rejectedRequests.add(rejReq));
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/28fe7f33/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/BatchedRequests.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/BatchedRequests.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/BatchedRequests.java
index 8b04860..8e39b63 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/BatchedRequests.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/BatchedRequests.java
@@ -133,4 +133,12 @@ public class BatchedRequests
   public Set<NodeId> getBlacklist(String tag) {
     return blacklist.getOrDefault(tag, Collections.EMPTY_SET);
   }
+
+  /**
+   * Get Iterator type.
+   * @return Iterator type.
+   */
+  public IteratorType getIteratorType() {
+    return iteratorType;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/28fe7f33/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/SingleConstraintAppPlacementAllocator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/SingleConstraintAppPlacementAllocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/SingleConstraintAppPlacementAllocator.java
index f8f758c..dd30b61 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/SingleConstraintAppPlacementAllocator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/SingleConstraintAppPlacementAllocator.java
@@ -263,7 +263,7 @@ public class SingleConstraintAppPlacementAllocator<N extends SchedulerNode>
     }
 
     if (singleConstraint.getMinCardinality() != 0
-        || singleConstraint.getMaxCardinality() != 1) {
+        || singleConstraint.getMaxCardinality() != 0) {
       throwExceptionWithMetaInfo(
           "Only support anti-affinity, which is: minCardinality=0, "
               + "maxCardinality=1");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/28fe7f33/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java
index 9fa2c40..2ed201c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java
@@ -311,7 +311,7 @@ public class MockAM {
             .allocationRequestId(allocationId).priority(priority)
             .allocationTags(allocationTags).placementConstraintExpression(
                 PlacementConstraints
-                    .targetCardinality(PlacementConstraints.NODE, 0, 1,
+                    .targetNotIn(PlacementConstraints.NODE,
                         PlacementConstraints.PlacementTargets
                             .allocationTagToIntraApp(targetTags)).build())
             .resourceSizing(resourceSizing).build()), null);
@@ -325,7 +325,7 @@ public class MockAM {
             ExecutionTypeRequest.newInstance(ExecutionType.GUARANTEED))
             .allocationRequestId(allocationId).priority(priority)
             .placementConstraintExpression(PlacementConstraints
-                .targetCardinality(PlacementConstraints.NODE, 0, 1,
+                .targetNotIn(PlacementConstraints.NODE,
                     PlacementConstraints.PlacementTargets
                         .allocationTagToIntraApp(tags),
                     PlacementConstraints.PlacementTargets

http://git-wip-us.apache.org/repos/asf/hadoop/blob/28fe7f33/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementProcessor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementProcessor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementProcessor.java
index c260fe0..65daeb8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementProcessor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementProcessor.java
@@ -153,13 +153,13 @@ public class TestPlacementProcessor {
   @Test(timeout = 300000)
   public void testCardinalityPlacement() throws Exception {
     HashMap<NodeId, MockNM> nodes = new HashMap<>();
-    MockNM nm1 = new MockNM("h1:1234", 4096, rm.getResourceTrackerService());
+    MockNM nm1 = new MockNM("h1:1234", 8192, rm.getResourceTrackerService());
     nodes.put(nm1.getNodeId(), nm1);
-    MockNM nm2 = new MockNM("h2:1234", 4096, rm.getResourceTrackerService());
+    MockNM nm2 = new MockNM("h2:1234", 8192, rm.getResourceTrackerService());
     nodes.put(nm2.getNodeId(), nm2);
-    MockNM nm3 = new MockNM("h3:1234", 4096, rm.getResourceTrackerService());
+    MockNM nm3 = new MockNM("h3:1234", 8192, rm.getResourceTrackerService());
     nodes.put(nm3.getNodeId(), nm3);
-    MockNM nm4 = new MockNM("h4:1234", 4096, rm.getResourceTrackerService());
+    MockNM nm4 = new MockNM("h4:1234", 8192, rm.getResourceTrackerService());
     nodes.put(nm4.getNodeId(), nm4);
     nm1.registerNode();
     nm2.registerNode();
@@ -171,7 +171,7 @@ public class TestPlacementProcessor {
     MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm2,
         Collections.singletonMap(Collections.singleton("foo"),
             PlacementConstraints.build(PlacementConstraints
-                .targetCardinality(NODE, 0, 4, allocationTag("foo")))));
+                .targetCardinality(NODE, 0, 3, allocationTag("foo")))));
     am1.addSchedulingRequest(
         Arrays.asList(schedulingRequest(1, 1, 1, 512, "foo"),
             schedulingRequest(1, 2, 1, 512, "foo"),
@@ -201,13 +201,13 @@ public class TestPlacementProcessor {
   @Test(timeout = 300000)
   public void testAffinityPlacement() throws Exception {
     HashMap<NodeId, MockNM> nodes = new HashMap<>();
-    MockNM nm1 = new MockNM("h1:1234", 4096, rm.getResourceTrackerService());
+    MockNM nm1 = new MockNM("h1:1234", 8192, rm.getResourceTrackerService());
     nodes.put(nm1.getNodeId(), nm1);
-    MockNM nm2 = new MockNM("h2:1234", 4096, rm.getResourceTrackerService());
+    MockNM nm2 = new MockNM("h2:1234", 8192, rm.getResourceTrackerService());
     nodes.put(nm2.getNodeId(), nm2);
-    MockNM nm3 = new MockNM("h3:1234", 4096, rm.getResourceTrackerService());
+    MockNM nm3 = new MockNM("h3:1234", 8192, rm.getResourceTrackerService());
     nodes.put(nm3.getNodeId(), nm3);
-    MockNM nm4 = new MockNM("h4:1234", 4096, rm.getResourceTrackerService());
+    MockNM nm4 = new MockNM("h4:1234", 8192, rm.getResourceTrackerService());
     nodes.put(nm4.getNodeId(), nm4);
     nm1.registerNode();
     nm2.registerNode();
@@ -267,7 +267,7 @@ public class TestPlacementProcessor {
         PlacementConstraints.build(targetIn(NODE, allocationTag("bar"))));
     // Containers with allocationTag 'foo' should not exceed 2 per NODE
     constraintMap.put(Collections.singleton("foo"), PlacementConstraints
-        .build(targetCardinality(NODE, 0, 2, allocationTag("foo"))));
+        .build(targetCardinality(NODE, 0, 1, allocationTag("foo"))));
     MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm2, constraintMap);
     am1.addSchedulingRequest(
         Arrays.asList(schedulingRequest(1, 1, 1, 512, "bar"),
@@ -513,7 +513,8 @@ public class TestPlacementProcessor {
   private static void waitForContainerAllocation(Collection<MockNM> nodes,
       MockAM am, List<Container> allocatedContainers, int containerNum)
       throws Exception {
-    while (allocatedContainers.size() < containerNum) {
+    int attemptCount = 10;
+    while (allocatedContainers.size() < containerNum && attemptCount > 0) {
       for (MockNM node : nodes) {
         node.nodeHeartbeat(true);
       }
@@ -522,6 +523,7 @@ public class TestPlacementProcessor {
       sleep(1000);
       AllocateResponse allocResponse = am.schedule();
       allocatedContainers.addAll(allocResponse.getAllocatedContainers());
+      attemptCount--;
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/28fe7f33/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/TestCircularIterator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/TestCircularIterator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/TestCircularIterator.java
new file mode 100644
index 0000000..5ce76b0
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/TestCircularIterator.java
@@ -0,0 +1,84 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.algorithm;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.List;
+
+/**
+ * Simple test case to test the Circular Iterator.
+ */
+public class TestCircularIterator {
+
+  @Test
+  public void testIteration() throws Exception {
+    List<String> list = Arrays.asList("a", "b", "c", "d");
+    CircularIterator<String> ci =
+        new CircularIterator<>(null, list.iterator(), list);
+    StringBuffer sb = new StringBuffer("");
+    while (ci.hasNext()) {
+      sb.append(ci.next());
+    }
+    Assert.assertEquals("abcd", sb.toString());
+
+    Iterator<String> lIter = list.iterator();
+    lIter.next();
+    lIter.next();
+    sb = new StringBuffer("");
+    ci = new CircularIterator<>(null, lIter, list);
+    while (ci.hasNext()) {
+      sb.append(ci.next());
+    }
+    Assert.assertEquals("cdab", sb.toString());
+
+    lIter = list.iterator();
+    lIter.next();
+    lIter.next();
+    lIter.next();
+    sb = new StringBuffer("");
+    ci = new CircularIterator<>("x", lIter, list);
+    while (ci.hasNext()) {
+      sb.append(ci.next());
+    }
+    Assert.assertEquals("xdabc", sb.toString());
+
+    list = Arrays.asList("a");
+    lIter = list.iterator();
+    lIter.next();
+    sb = new StringBuffer("");
+    ci = new CircularIterator<>("y", lIter, list);
+    while (ci.hasNext()) {
+      sb.append(ci.next());
+    }
+    Assert.assertEquals("ya", sb.toString());
+
+    try {
+      list = new ArrayList<>();
+      lIter = list.iterator();
+      new CircularIterator<>("y", lIter, list);
+      Assert.fail("Should fail..");
+    } catch (Exception e) {
+      // foo bar
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/28fe7f33/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/TestSingleConstraintAppPlacementAllocator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/TestSingleConstraintAppPlacementAllocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/TestSingleConstraintAppPlacementAllocator.java
index 479d2c1..3485ea8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/TestSingleConstraintAppPlacementAllocator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/TestSingleConstraintAppPlacementAllocator.java
@@ -118,7 +118,7 @@ public class TestSingleConstraintAppPlacementAllocator {
         ExecutionTypeRequest.newInstance(ExecutionType.GUARANTEED))
         .allocationRequestId(10L).priority(Priority.newInstance(1))
         .placementConstraintExpression(PlacementConstraints
-            .targetCardinality(PlacementConstraints.NODE, 0, 1,
+            .targetNotIn(PlacementConstraints.NODE,
                 PlacementConstraints.PlacementTargets
                     .allocationTagToIntraApp("mapper", "reducer"),
                 PlacementConstraints.PlacementTargets.nodePartition(""))
@@ -134,7 +134,7 @@ public class TestSingleConstraintAppPlacementAllocator {
         ExecutionTypeRequest.newInstance(ExecutionType.GUARANTEED))
         .allocationRequestId(10L).priority(Priority.newInstance(1))
         .placementConstraintExpression(PlacementConstraints
-            .targetCardinality(PlacementConstraints.NODE, 0, 1,
+            .targetNotIn(PlacementConstraints.NODE,
                 PlacementConstraints.PlacementTargets
                     .allocationTagToIntraApp("mapper", "reducer"),
                 PlacementConstraints.PlacementTargets.nodePartition("x"))
@@ -150,7 +150,7 @@ public class TestSingleConstraintAppPlacementAllocator {
         ExecutionTypeRequest.newInstance(ExecutionType.GUARANTEED))
         .allocationRequestId(10L).priority(Priority.newInstance(1))
         .placementConstraintExpression(PlacementConstraints
-            .targetCardinality(PlacementConstraints.NODE, 0, 1,
+            .targetNotIn(PlacementConstraints.NODE,
                 PlacementConstraints.PlacementTargets
                     .allocationTagToIntraApp("mapper", "reducer")).build())
         .resourceSizing(
@@ -165,7 +165,7 @@ public class TestSingleConstraintAppPlacementAllocator {
         ExecutionTypeRequest.newInstance(ExecutionType.GUARANTEED))
         .allocationRequestId(10L).priority(Priority.newInstance(1))
         .placementConstraintExpression(PlacementConstraints
-            .targetCardinality(PlacementConstraints.NODE, 0, 1,
+            .targetNotIn(PlacementConstraints.NODE,
                 PlacementConstraints.PlacementTargets
                     .allocationTagToIntraApp("mapper", "reducer")).build())
         .resourceSizing(
@@ -181,7 +181,7 @@ public class TestSingleConstraintAppPlacementAllocator {
         ExecutionTypeRequest.newInstance(ExecutionType.GUARANTEED))
         .allocationRequestId(10L).priority(Priority.newInstance(1))
         .placementConstraintExpression(PlacementConstraints
-            .targetCardinality(PlacementConstraints.NODE, 0, 1,
+            .targetNotIn(PlacementConstraints.NODE,
                 PlacementConstraints.PlacementTargets
                     .allocationTagToIntraApp("mapper", "reducer")).build())
         .build(), true);
@@ -191,7 +191,7 @@ public class TestSingleConstraintAppPlacementAllocator {
         ExecutionTypeRequest.newInstance(ExecutionType.GUARANTEED))
         .allocationRequestId(10L).priority(Priority.newInstance(1))
         .placementConstraintExpression(PlacementConstraints
-            .targetCardinality(PlacementConstraints.NODE, 0, 1).build())
+            .targetNotIn(PlacementConstraints.NODE).build())
         .build(), true);
 
     // Invalid (with multiple allocation tags expression specified)
@@ -199,7 +199,7 @@ public class TestSingleConstraintAppPlacementAllocator {
         ExecutionTypeRequest.newInstance(ExecutionType.GUARANTEED))
         .allocationRequestId(10L).priority(Priority.newInstance(1))
         .placementConstraintExpression(PlacementConstraints
-            .targetCardinality(PlacementConstraints.NODE, 0, 1,
+            .targetNotIn(PlacementConstraints.NODE,
                 PlacementConstraints.PlacementTargets
                     .allocationTagToIntraApp("mapper"),
                 PlacementConstraints.PlacementTargets
@@ -214,7 +214,7 @@ public class TestSingleConstraintAppPlacementAllocator {
         ExecutionTypeRequest.newInstance(ExecutionType.GUARANTEED))
         .allocationRequestId(10L).priority(Priority.newInstance(1))
         .placementConstraintExpression(PlacementConstraints
-            .targetCardinality(PlacementConstraints.NODE, 0, 1,
+            .targetNotIn(PlacementConstraints.NODE,
                 PlacementConstraints.PlacementTargets
                     .allocationTagToIntraApp("mapper"),
                 PlacementConstraints.PlacementTargets
@@ -255,7 +255,7 @@ public class TestSingleConstraintAppPlacementAllocator {
         ExecutionTypeRequest.newInstance(ExecutionType.GUARANTEED))
         .allocationRequestId(10L).priority(Priority.newInstance(1))
         .placementConstraintExpression(PlacementConstraints
-            .targetCardinality(PlacementConstraints.RACK, 0, 1,
+            .targetNotIn(PlacementConstraints.RACK,
                 PlacementConstraints.PlacementTargets
                     .allocationTagToIntraApp("mapper", "reducer"),
                 PlacementConstraints.PlacementTargets.nodePartition(""))
@@ -268,7 +268,7 @@ public class TestSingleConstraintAppPlacementAllocator {
         ExecutionTypeRequest.newInstance(ExecutionType.OPPORTUNISTIC))
         .allocationRequestId(10L).priority(Priority.newInstance(1))
         .placementConstraintExpression(PlacementConstraints
-            .targetCardinality(PlacementConstraints.NODE, 0, 1,
+            .targetNotIn(PlacementConstraints.NODE,
                 PlacementConstraints.PlacementTargets
                     .allocationTagToIntraApp("mapper", "reducer"),
                 PlacementConstraints.PlacementTargets.nodePartition(""))
@@ -284,7 +284,7 @@ public class TestSingleConstraintAppPlacementAllocator {
             ExecutionTypeRequest.newInstance(ExecutionType.GUARANTEED))
             .allocationRequestId(10L).priority(Priority.newInstance(1))
             .placementConstraintExpression(PlacementConstraints
-                .targetCardinality(PlacementConstraints.NODE, 0, 1,
+                .targetNotIn(PlacementConstraints.NODE,
                     PlacementConstraints.PlacementTargets
                         .allocationTagToIntraApp("mapper", "reducer"),
                     PlacementConstraints.PlacementTargets.nodePartition(""))
@@ -330,7 +330,7 @@ public class TestSingleConstraintAppPlacementAllocator {
         ExecutionTypeRequest.newInstance(ExecutionType.GUARANTEED))
         .allocationRequestId(10L).priority(Priority.newInstance(1))
         .placementConstraintExpression(PlacementConstraints
-            .targetCardinality(PlacementConstraints.NODE, 0, 1,
+            .targetNotIn(PlacementConstraints.NODE,
                 PlacementConstraints.PlacementTargets
                     .allocationTagToIntraApp("mapper", "reducer"),
                 PlacementConstraints.PlacementTargets.nodePartition(""))
@@ -350,7 +350,7 @@ public class TestSingleConstraintAppPlacementAllocator {
             ExecutionTypeRequest.newInstance(ExecutionType.GUARANTEED))
             .allocationRequestId(10L).priority(Priority.newInstance(1))
             .placementConstraintExpression(PlacementConstraints
-                .targetCardinality(PlacementConstraints.NODE, 0, 1,
+                .targetNotIn(PlacementConstraints.NODE,
                     PlacementConstraints.PlacementTargets
                         .allocationTagToIntraApp("mapper", "reducer"),
                     PlacementConstraints.PlacementTargets.nodePartition(""))
@@ -372,7 +372,7 @@ public class TestSingleConstraintAppPlacementAllocator {
         ExecutionTypeRequest.newInstance(ExecutionType.GUARANTEED))
         .allocationRequestId(10L).priority(Priority.newInstance(1))
         .placementConstraintExpression(PlacementConstraints
-            .targetCardinality(PlacementConstraints.NODE, 0, 1,
+            .targetNotIn(PlacementConstraints.NODE,
                 PlacementConstraints.PlacementTargets
                     .allocationTagToIntraApp("mapper", "reducer"),
                 PlacementConstraints.PlacementTargets.nodePartition("x"))


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[15/32] hadoop git commit: YARN-7612. Add Processor Framework for Rich Placement Constraints. (asuresh)

Posted by as...@apache.org.
YARN-7612. Add Processor Framework for Rich Placement Constraints. (asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f9af15d6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f9af15d6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f9af15d6

Branch: refs/heads/trunk
Commit: f9af15d659f59fd0cf564fe1ecc8e06c6429ba68
Parents: 1efb2b6
Author: Arun Suresh <as...@apache.org>
Authored: Fri Dec 22 15:51:20 2017 -0800
Committer: Arun Suresh <as...@apache.org>
Committed: Wed Jan 31 01:30:17 2018 -0800

----------------------------------------------------------------------
 .../hadoop/yarn/conf/YarnConfiguration.java     |  26 ++
 .../src/main/resources/yarn-default.xml         |  30 ++
 .../ApplicationMasterService.java               |  15 +
 .../rmcontainer/RMContainerImpl.java            |   7 +-
 .../scheduler/capacity/CapacityScheduler.java   |   2 +
 .../constraint/processor/BatchedRequests.java   | 105 +++++
 .../processor/NodeCandidateSelector.java        |  38 ++
 .../processor/PlacementDispatcher.java          | 145 +++++++
 .../processor/PlacementProcessor.java           | 343 ++++++++++++++++
 .../processor/SamplePlacementAlgorithm.java     | 144 +++++++
 .../constraint/processor/package-info.java      |  29 ++
 .../yarn/server/resourcemanager/MockAM.java     |  26 ++
 .../yarn/server/resourcemanager/MockRM.java     |  14 +
 .../constraint/TestPlacementProcessor.java      | 394 +++++++++++++++++++
 14 files changed, 1316 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f9af15d6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index bbbfc52..8fb3c2e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -531,6 +531,32 @@ public class YarnConfiguration extends Configuration {
   /** The class to use as the resource scheduler.*/
   public static final String RM_SCHEDULER = 
     RM_PREFIX + "scheduler.class";
+
+  /** Placement Algorithm. */
+  public static final String RM_PLACEMENT_CONSTRAINTS_ALGORITHM_CLASS =
+      RM_PREFIX + "placement-constraints.algorithm.class";
+
+  public static final String RM_PLACEMENT_CONSTRAINTS_ENABLED =
+      RM_PREFIX + "placement-constraints.enabled";
+
+  public static final boolean DEFAULT_RM_PLACEMENT_CONSTRAINTS_ENABLED = true;
+
+  public static final String RM_PLACEMENT_CONSTRAINTS_RETRY_ATTEMPTS =
+      RM_PREFIX + "placement-constraints.retry-attempts";
+
+  public static final int DEFAULT_RM_PLACEMENT_CONSTRAINTS_RETRY_ATTEMPTS = 3;
+
+  public static final String RM_PLACEMENT_CONSTRAINTS_ALGORITHM_POOL_SIZE =
+      RM_PREFIX + "placement-constraints.algorithm.pool-size";
+
+  public static final int DEFAULT_RM_PLACEMENT_CONSTRAINTS_ALGORITHM_POOL_SIZE =
+      1;
+
+  public static final String RM_PLACEMENT_CONSTRAINTS_SCHEDULER_POOL_SIZE =
+      RM_PREFIX + "placement-constraints.scheduler.pool-size";
+
+  public static final int DEFAULT_RM_PLACEMENT_CONSTRAINTS_SCHEDULER_POOL_SIZE =
+      1;
  
   public static final String DEFAULT_RM_SCHEDULER = 
       "org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f9af15d6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 0bb4fca..6d52ace 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -131,6 +131,36 @@
   </property>
 
   <property>
+    <description>Enable Constraint Placement.</description>
+    <name>yarn.resourcemanager.placement-constraints.enabled</name>
+    <value>false</value>
+  </property>
+
+  <property>
+    <description>Number of times to retry placing of rejected SchedulingRequests</description>
+    <name>yarn.resourcemanager.placement-constraints.retry-attempts</name>
+    <value>3</value>
+  </property>
+
+  <property>
+    <description>Constraint Placement Algorithm to be used.</description>
+    <name>yarn.resourcemanager.placement-constraints.algorithm.class</name>
+    <value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.processor.SamplePlacementAlgorithm</value>
+  </property>
+
+  <property>
+    <description>Threadpool size for the Algorithm used for placement constraint processing.</description>
+    <name>yarn.resourcemanager.placement-constraints.algorithm.pool-size</name>
+    <value>1</value>
+  </property>
+
+  <property>
+    <description>Threadpool size for the Scheduler invocation phase of placement constraint processing.</description>
+    <name>yarn.resourcemanager.placement-constraints.scheduler.pool-size</name>
+    <value>1</value>
+  </property>
+
+  <property>
     <description>
       Comma separated class names of ApplicationMasterServiceProcessor
       implementations. The processors will be applied in the order

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f9af15d6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
index 90c42be..aa1177d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
@@ -59,6 +59,7 @@ import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.ipc.YarnRPC;
 import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
 import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger.AuditConstants;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.processor.PlacementProcessor;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.AMLivelinessMonitor;
@@ -114,11 +115,25 @@ public class ApplicationMasterService extends AbstractService implements
         YarnConfiguration.RM_SCHEDULER_ADDRESS,
         YarnConfiguration.DEFAULT_RM_SCHEDULER_ADDRESS,
         YarnConfiguration.DEFAULT_RM_SCHEDULER_PORT);
+    initializeProcessingChain(conf);
+  }
+
+  private void initializeProcessingChain(Configuration conf) {
     amsProcessingChain.init(rmContext, null);
+    boolean enablePlacementConstraints = conf.getBoolean(
+        YarnConfiguration.RM_PLACEMENT_CONSTRAINTS_ENABLED,
+        YarnConfiguration.DEFAULT_RM_PLACEMENT_CONSTRAINTS_ENABLED);
+    if (enablePlacementConstraints) {
+      amsProcessingChain.addProcessor(new PlacementProcessor());
+    }
     List<ApplicationMasterServiceProcessor> processors = getProcessorList(conf);
     if (processors != null) {
       Collections.reverse(processors);
       for (ApplicationMasterServiceProcessor p : processors) {
+        // Ensure only single instance of PlacementProcessor is included
+        if (enablePlacementConstraints && p instanceof PlacementProcessor) {
+          continue;
+        }
         this.amsProcessingChain.addProcessor(p);
       }
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f9af15d6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
index 184cdfc..c873509 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
@@ -190,8 +190,7 @@ public class RMContainerImpl implements RMContainer {
   private boolean isExternallyAllocated;
   private SchedulerRequestKey allocatedSchedulerKey;
 
-  // TODO, set it when container allocated by scheduler (From SchedulingRequest)
-  private Set<String> allocationTags = null;
+  private volatile Set<String> allocationTags = null;
 
   public RMContainerImpl(Container container, SchedulerRequestKey schedulerKey,
       ApplicationAttemptId appAttemptId, NodeId nodeId, String user,
@@ -510,6 +509,10 @@ public class RMContainerImpl implements RMContainer {
     return allocationTags;
   }
 
+  public void setAllocationTags(Set<String> tags) {
+    this.allocationTags = tags;
+  }
+
   private static class BaseTransition implements
       SingleArcTransition<RMContainerImpl, RMContainerEvent> {
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f9af15d6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index 676c0fe..e682d0f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -2601,6 +2601,8 @@ public class CapacityScheduler extends
           SchedulerRequestKey.extractFrom(container),
           appAttempt.getApplicationAttemptId(), container.getNodeId(),
           appAttempt.getUser(), rmContext, false);
+      ((RMContainerImpl)rmContainer).setAllocationTags(
+          new HashSet<>(schedulingRequest.getAllocationTags()));
 
       allocated = new ContainerAllocationProposal<>(
           getSchedulerContainer(rmContainer, true),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f9af15d6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/BatchedRequests.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/BatchedRequests.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/BatchedRequests.java
new file mode 100644
index 0000000..fe92d2f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/BatchedRequests.java
@@ -0,0 +1,105 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.processor;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api.ConstraintPlacementAlgorithmInput;
+
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * A grouping of Scheduling Requests which are sent to the PlacementAlgorithm
+ * to place as a batch. The placement algorithm tends to give more optimal
+ * placements if more requests are batched together.
+ */
+class BatchedRequests implements ConstraintPlacementAlgorithmInput {
+
+  // PlacementAlgorithmOutput attempt - the number of times the requests in this
+  // batch has been placed but was rejected by the scheduler.
+  private final int placementAttempt;
+
+  private final ApplicationId applicationId;
+  private final Collection<SchedulingRequest> requests;
+  private final Map<String, Set<NodeId>> blacklist = new HashMap<>();
+
+  BatchedRequests(ApplicationId applicationId,
+      Collection<SchedulingRequest> requests, int attempt) {
+    this.applicationId = applicationId;
+    this.requests = requests;
+    this.placementAttempt = attempt;
+  }
+
+  /**
+   * Get Application Id.
+   * @return Application Id.
+   */
+  ApplicationId getApplicationId() {
+    return applicationId;
+  }
+
+  /**
+   * Get Collection of SchedulingRequests in this batch.
+   * @return Collection of Scheduling Requests.
+   */
+  @Override
+  public Collection<SchedulingRequest> getSchedulingRequests() {
+    return requests;
+  }
+
+  /**
+   * Add a Scheduling request to the batch.
+   * @param req Scheduling Request.
+   */
+  void addToBatch(SchedulingRequest req) {
+    requests.add(req);
+  }
+
+  void addToBlacklist(Set<String> tags, SchedulerNode node) {
+    if (tags != null && !tags.isEmpty()) {
+      // We are currently assuming a single allocation tag
+      // per scheduler request currently.
+      blacklist.computeIfAbsent(tags.iterator().next(),
+          k -> new HashSet<>()).add(node.getNodeID());
+    }
+  }
+
+  /**
+   * Get placement attempt.
+   * @return PlacementAlgorithmOutput placement Attempt.
+   */
+  int getPlacementAttempt() {
+    return placementAttempt;
+  }
+
+  /**
+   * Get any blacklisted nodes associated with tag.
+   * @param tag Tag.
+   * @return Set of blacklisted Nodes.
+   */
+  Set<NodeId> getBlacklist(String tag) {
+    return blacklist.getOrDefault(tag, Collections.EMPTY_SET);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f9af15d6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/NodeCandidateSelector.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/NodeCandidateSelector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/NodeCandidateSelector.java
new file mode 100644
index 0000000..4299050
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/NodeCandidateSelector.java
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.processor;
+
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeFilter;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
+
+import java.util.List;
+
+/**
+ * A read only implementation of the ClusterNodeTracker which exposes a method
+ * to simply return a filtered list of nodes.
+ */
+public interface NodeCandidateSelector {
+
+  /**
+   * Select a list of nodes given a filter.
+   * @param filter a NodeFilter.
+   * @return List of SchedulerNodes.
+   */
+  List<SchedulerNode> selectNodes(NodeFilter filter);
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f9af15d6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/PlacementDispatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/PlacementDispatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/PlacementDispatcher.java
new file mode 100644
index 0000000..6a00ba8
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/PlacementDispatcher.java
@@ -0,0 +1,145 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.processor;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api.ConstraintPlacementAlgorithm;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api.ConstraintPlacementAlgorithmOutput;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api.ConstraintPlacementAlgorithmOutputCollector;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api.PlacedSchedulingRequest;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+
+/**
+ * This class initializes the Constraint Placement Algorithm. It dispatches
+ * input to the algorithm and collects output from it.
+ */
+class PlacementDispatcher implements
+    ConstraintPlacementAlgorithmOutputCollector {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(PlacementDispatcher.class);
+  private ConstraintPlacementAlgorithm algorithm;
+  private ExecutorService algorithmThreadPool;
+
+  private Map<ApplicationId, List<PlacedSchedulingRequest>>
+      placedRequests = new ConcurrentHashMap<>();
+  private Map<ApplicationId, List<SchedulingRequest>>
+      rejectedRequests = new ConcurrentHashMap<>();
+
+  public void init(RMContext rmContext,
+      ConstraintPlacementAlgorithm placementAlgorithm, int poolSize) {
+    LOG.info("Initializing Constraint Placement Planner:");
+    this.algorithm = placementAlgorithm;
+    this.algorithm.init(rmContext);
+    this.algorithmThreadPool = Executors.newFixedThreadPool(poolSize);
+  }
+
+  void dispatch(final BatchedRequests batchedRequests) {
+    final ConstraintPlacementAlgorithmOutputCollector collector = this;
+    Runnable placingTask = () -> {
+      LOG.debug("Got [{}] requests to place from application [{}].. " +
+              "Attempt count [{}]",
+          batchedRequests.getSchedulingRequests().size(),
+          batchedRequests.getApplicationId(),
+          batchedRequests.getPlacementAttempt());
+      algorithm.place(batchedRequests, collector);
+    };
+    this.algorithmThreadPool.submit(placingTask);
+  }
+
+  public List<PlacedSchedulingRequest> pullPlacedRequests(
+      ApplicationId applicationId) {
+    List<PlacedSchedulingRequest> placedReqs =
+        this.placedRequests.get(applicationId);
+    if (placedReqs != null && !placedReqs.isEmpty()) {
+      List<PlacedSchedulingRequest> retList = new ArrayList<>();
+      synchronized (placedReqs) {
+        if (placedReqs.size() > 0) {
+          retList.addAll(placedReqs);
+          placedReqs.clear();
+        }
+      }
+      return retList;
+    }
+    return Collections.EMPTY_LIST;
+  }
+
+  public List<SchedulingRequest> pullRejectedRequests(
+      ApplicationId applicationId) {
+    List<SchedulingRequest> rejectedReqs =
+        this.rejectedRequests.get(applicationId);
+    if (rejectedReqs != null && !rejectedReqs.isEmpty()) {
+      List<SchedulingRequest> retList = new ArrayList<>();
+      synchronized (rejectedReqs) {
+        if (rejectedReqs.size() > 0) {
+          retList.addAll(rejectedReqs);
+          rejectedReqs.clear();
+        }
+      }
+      return retList;
+    }
+    return Collections.EMPTY_LIST;
+  }
+
+  void clearApplicationState(ApplicationId applicationId) {
+    placedRequests.remove(applicationId);
+    rejectedRequests.remove(applicationId);
+  }
+
+  @Override
+  public void collect(ConstraintPlacementAlgorithmOutput placement) {
+    if (!placement.getPlacedRequests().isEmpty()) {
+      List<PlacedSchedulingRequest> processed =
+          placedRequests.computeIfAbsent(
+              placement.getApplicationId(), k -> new ArrayList<>());
+      synchronized (processed) {
+        LOG.debug(
+            "Planning Algorithm has placed for application [{}]" +
+                " the following [{}]", placement.getApplicationId(),
+            placement.getPlacedRequests());
+        for (PlacedSchedulingRequest esr :
+            placement.getPlacedRequests()) {
+          processed.add(esr);
+        }
+      }
+    }
+    if (!placement.getRejectedRequests().isEmpty()) {
+      List<SchedulingRequest> rejected =
+          rejectedRequests.computeIfAbsent(
+              placement.getApplicationId(), k -> new ArrayList());
+      LOG.warn(
+          "Planning Algorithm has rejected for application [{}]" +
+              " the following [{}]", placement.getApplicationId(),
+          placement.getRejectedRequests());
+      synchronized (rejected) {
+        rejected.addAll(placement.getRejectedRequests());
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f9af15d6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/PlacementProcessor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/PlacementProcessor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/PlacementProcessor.java
new file mode 100644
index 0000000..d613d4e
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/PlacementProcessor.java
@@ -0,0 +1,343 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.processor;
+
+import org.apache.hadoop.yarn.ams.ApplicationMasterServiceContext;
+import org.apache.hadoop.yarn.ams.ApplicationMasterServiceProcessor;
+import org.apache.hadoop.yarn.ams.ApplicationMasterServiceUtils;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.RejectedSchedulingRequest;
+import org.apache.hadoop.yarn.api.records.RejectionReason;
+import org.apache.hadoop.yarn.api.records.ResourceSizing;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.PlacementConstraintManager;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api.ConstraintPlacementAlgorithm;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api.PlacedSchedulingRequest;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api.SchedulingResponse;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.stream.Collectors;
+
+/**
+ * An ApplicationMasterService Processor that performs Constrained placement of
+ * Scheduling Requests. It does the following:
+ * 1. All initialization.
+ * 2. Intercepts placement constraints from the register call and adds it to
+ *    the placement constraint manager.
+ * 3. Dispatches Scheduling Requests to the Planner.
+ */
+public class PlacementProcessor implements ApplicationMasterServiceProcessor {
+
+  /**
+   * Wrapper over the SchedulingResponse that wires in the placement attempt
+   * and last attempted Node.
+   */
+  static final class Response extends SchedulingResponse {
+
+    private final int placementAttempt;
+    private final SchedulerNode attemptedNode;
+
+    private Response(boolean isSuccess, ApplicationId applicationId,
+        SchedulingRequest schedulingRequest, int placementAttempt,
+        SchedulerNode attemptedNode) {
+      super(isSuccess, applicationId, schedulingRequest);
+      this.placementAttempt = placementAttempt;
+      this.attemptedNode = attemptedNode;
+    }
+  }
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(PlacementProcessor.class);
+  private PlacementConstraintManager constraintManager;
+  private ApplicationMasterServiceProcessor nextAMSProcessor;
+
+  private AbstractYarnScheduler scheduler;
+  private ExecutorService schedulingThreadPool;
+  private int retryAttempts;
+  private Map<ApplicationId, List<BatchedRequests>> requestsToRetry =
+      new ConcurrentHashMap<>();
+  private Map<ApplicationId, List<SchedulingRequest>> requestsToReject =
+      new ConcurrentHashMap<>();
+
+  private PlacementDispatcher placementDispatcher;
+
+
+  @Override
+  public void init(ApplicationMasterServiceContext amsContext,
+      ApplicationMasterServiceProcessor nextProcessor) {
+    LOG.info("Initializing Constraint Placement Processor:");
+    this.nextAMSProcessor = nextProcessor;
+    this.constraintManager =
+        ((RMContextImpl)amsContext).getPlacementConstraintManager();
+
+    this.scheduler =
+        (AbstractYarnScheduler)((RMContextImpl)amsContext).getScheduler();
+    // Only the first class is considered - even if a comma separated
+    // list is provided. (This is for simplicity, since getInstances does a
+    // lot of good things by handling things correctly)
+    List<ConstraintPlacementAlgorithm> instances =
+        ((RMContextImpl) amsContext).getYarnConfiguration().getInstances(
+            YarnConfiguration.RM_PLACEMENT_CONSTRAINTS_ALGORITHM_CLASS,
+            ConstraintPlacementAlgorithm.class);
+    ConstraintPlacementAlgorithm algorithm = null;
+    if (instances != null && !instances.isEmpty()) {
+      algorithm = instances.get(0);
+    } else {
+      algorithm = new SamplePlacementAlgorithm();
+    }
+    LOG.info("Planning Algorithm [{}]", algorithm.getClass().getName());
+
+    int algoPSize = ((RMContextImpl) amsContext).getYarnConfiguration().getInt(
+        YarnConfiguration.RM_PLACEMENT_CONSTRAINTS_ALGORITHM_POOL_SIZE,
+        YarnConfiguration.DEFAULT_RM_PLACEMENT_CONSTRAINTS_ALGORITHM_POOL_SIZE);
+    this.placementDispatcher = new PlacementDispatcher();
+    this.placementDispatcher.init(
+        ((RMContextImpl)amsContext), algorithm, algoPSize);
+    LOG.info("Planning Algorithm pool size [{}]", algoPSize);
+
+    int schedPSize = ((RMContextImpl) amsContext).getYarnConfiguration().getInt(
+        YarnConfiguration.RM_PLACEMENT_CONSTRAINTS_SCHEDULER_POOL_SIZE,
+        YarnConfiguration.DEFAULT_RM_PLACEMENT_CONSTRAINTS_SCHEDULER_POOL_SIZE);
+    this.schedulingThreadPool = Executors.newFixedThreadPool(schedPSize);
+    LOG.info("Scheduler pool size [{}]", schedPSize);
+
+    // Number of times a request that is not satisfied by the scheduler
+    // can be retried.
+    this.retryAttempts =
+        ((RMContextImpl) amsContext).getYarnConfiguration().getInt(
+            YarnConfiguration.RM_PLACEMENT_CONSTRAINTS_RETRY_ATTEMPTS,
+            YarnConfiguration.DEFAULT_RM_PLACEMENT_CONSTRAINTS_RETRY_ATTEMPTS);
+    LOG.info("Num retry attempts [{}]", this.retryAttempts);
+  }
+
+  @Override
+  public void registerApplicationMaster(ApplicationAttemptId appAttemptId,
+      RegisterApplicationMasterRequest request,
+      RegisterApplicationMasterResponse response)
+      throws IOException, YarnException {
+    Map<Set<String>, PlacementConstraint> appPlacementConstraints =
+        request.getPlacementConstraints();
+    processPlacementConstraints(
+        appAttemptId.getApplicationId(), appPlacementConstraints);
+    nextAMSProcessor.registerApplicationMaster(appAttemptId, request, response);
+  }
+
+  private void processPlacementConstraints(ApplicationId applicationId,
+      Map<Set<String>, PlacementConstraint> appPlacementConstraints) {
+    if (appPlacementConstraints != null && !appPlacementConstraints.isEmpty()) {
+      LOG.info("Constraints added for application [{}] against tags [{}]",
+          applicationId, appPlacementConstraints);
+      constraintManager.registerApplication(
+          applicationId, appPlacementConstraints);
+    }
+  }
+
+  @Override
+  public void allocate(ApplicationAttemptId appAttemptId,
+      AllocateRequest request, AllocateResponse response) throws YarnException {
+    List<SchedulingRequest> schedulingRequests =
+        request.getSchedulingRequests();
+    dispatchRequestsForPlacement(appAttemptId, schedulingRequests);
+    reDispatchRetryableRequests(appAttemptId);
+    schedulePlacedRequests(appAttemptId);
+
+    nextAMSProcessor.allocate(appAttemptId, request, response);
+
+    handleRejectedRequests(appAttemptId, response);
+  }
+
+  private void dispatchRequestsForPlacement(ApplicationAttemptId appAttemptId,
+      List<SchedulingRequest> schedulingRequests) {
+    if (schedulingRequests != null && !schedulingRequests.isEmpty()) {
+      this.placementDispatcher.dispatch(
+          new BatchedRequests(appAttemptId.getApplicationId(),
+              schedulingRequests, 1));
+    }
+  }
+
+  private void reDispatchRetryableRequests(ApplicationAttemptId appAttId) {
+    List<BatchedRequests> reqsToRetry =
+        this.requestsToRetry.get(appAttId.getApplicationId());
+    if (reqsToRetry != null && !reqsToRetry.isEmpty()) {
+      synchronized (reqsToRetry) {
+        for (BatchedRequests bReq: reqsToRetry) {
+          this.placementDispatcher.dispatch(bReq);
+        }
+        reqsToRetry.clear();
+      }
+    }
+  }
+
+  private void schedulePlacedRequests(ApplicationAttemptId appAttemptId) {
+    ApplicationId applicationId = appAttemptId.getApplicationId();
+    List<PlacedSchedulingRequest> placedSchedulingRequests =
+        this.placementDispatcher.pullPlacedRequests(applicationId);
+    for (PlacedSchedulingRequest placedReq : placedSchedulingRequests) {
+      SchedulingRequest sReq = placedReq.getSchedulingRequest();
+      for (SchedulerNode node : placedReq.getNodes()) {
+        final SchedulingRequest sReqClone =
+            SchedulingRequest.newInstance(sReq.getAllocationRequestId(),
+                sReq.getPriority(), sReq.getExecutionType(),
+                sReq.getAllocationTags(),
+                ResourceSizing.newInstance(
+                    sReq.getResourceSizing().getResources()),
+                sReq.getPlacementConstraint());
+        SchedulerApplicationAttempt applicationAttempt =
+            this.scheduler.getApplicationAttempt(appAttemptId);
+        Runnable task = () -> {
+          boolean success =
+              scheduler.attemptAllocationOnNode(
+                  applicationAttempt, sReqClone, node);
+          if (!success) {
+            LOG.warn("Unsuccessful allocation attempt [{}] for [{}]",
+                placedReq.getPlacementAttempt(), sReqClone);
+          }
+          handleSchedulingResponse(
+              new Response(success, applicationId, sReqClone,
+              placedReq.getPlacementAttempt(), node));
+        };
+        this.schedulingThreadPool.submit(task);
+      }
+    }
+  }
+
+  private void handleRejectedRequests(ApplicationAttemptId appAttemptId,
+      AllocateResponse response) {
+    List<SchedulingRequest> rejectedRequests =
+        this.placementDispatcher.pullRejectedRequests(
+            appAttemptId.getApplicationId());
+    if (rejectedRequests != null && !rejectedRequests.isEmpty()) {
+      LOG.warn("Following requests of [{}] were rejected by" +
+              " the PlacementAlgorithmOutput Algorithm: {}",
+          appAttemptId.getApplicationId(), rejectedRequests);
+      ApplicationMasterServiceUtils.addToRejectedSchedulingRequests(response,
+          rejectedRequests.stream()
+              .map(sr -> RejectedSchedulingRequest.newInstance(
+                  RejectionReason.COULD_NOT_PLACE_ON_NODE, sr))
+              .collect(Collectors.toList()));
+    }
+    rejectedRequests =
+        this.requestsToReject.get(appAttemptId.getApplicationId());
+    if (rejectedRequests != null && !rejectedRequests.isEmpty()) {
+      synchronized (rejectedRequests) {
+        LOG.warn("Following requests of [{}] exhausted all retry attempts " +
+                "trying to schedule on placed node: {}",
+            appAttemptId.getApplicationId(), rejectedRequests);
+        ApplicationMasterServiceUtils.addToRejectedSchedulingRequests(response,
+            rejectedRequests.stream()
+                .map(sr -> RejectedSchedulingRequest.newInstance(
+                    RejectionReason.COULD_NOT_SCHEDULE_ON_NODE, sr))
+                .collect(Collectors.toList()));
+        rejectedRequests.clear();
+      }
+    }
+  }
+
+  @Override
+  public void finishApplicationMaster(ApplicationAttemptId appAttemptId,
+      FinishApplicationMasterRequest request,
+      FinishApplicationMasterResponse response) {
+    constraintManager.unregisterApplication(appAttemptId.getApplicationId());
+    placementDispatcher.clearApplicationState(appAttemptId.getApplicationId());
+    requestsToReject.remove(appAttemptId.getApplicationId());
+    requestsToRetry.remove(appAttemptId.getApplicationId());
+    nextAMSProcessor.finishApplicationMaster(appAttemptId, request, response);
+  }
+
+  private void handleSchedulingResponse(SchedulingResponse schedulerResponse) {
+    int placementAttempt = ((Response)schedulerResponse).placementAttempt;
+    // Retry this placement as it is not successful and we are still
+    // under max retry. The req is batched with other unsuccessful
+    // requests from the same app
+    if (!schedulerResponse.isSuccess() && placementAttempt < retryAttempts) {
+      List<BatchedRequests> reqsToRetry =
+          requestsToRetry.computeIfAbsent(
+              schedulerResponse.getApplicationId(),
+              k -> new ArrayList<>());
+      synchronized (reqsToRetry) {
+        addToRetryList(schedulerResponse, placementAttempt, reqsToRetry);
+      }
+      LOG.warn("Going to retry request for application [{}] after [{}]" +
+              " attempts: [{}]", schedulerResponse.getApplicationId(),
+          placementAttempt, schedulerResponse.getSchedulingRequest());
+    } else {
+      if (!schedulerResponse.isSuccess()) {
+        LOG.warn("Not retrying request for application [{}] after [{}]" +
+                " attempts: [{}]", schedulerResponse.getApplicationId(),
+            placementAttempt, schedulerResponse.getSchedulingRequest());
+        List<SchedulingRequest> reqsToReject =
+            requestsToReject.computeIfAbsent(
+                schedulerResponse.getApplicationId(),
+                k -> new ArrayList<>());
+        synchronized (reqsToReject) {
+          reqsToReject.add(schedulerResponse.getSchedulingRequest());
+        }
+      }
+    }
+  }
+
+  private void addToRetryList(SchedulingResponse schedulerResponse,
+      int placementAttempt, List<BatchedRequests> reqsToRetry) {
+    boolean isAdded = false;
+    for (BatchedRequests br : reqsToRetry) {
+      if (br.getPlacementAttempt() == placementAttempt + 1) {
+        br.addToBatch(schedulerResponse.getSchedulingRequest());
+        br.addToBlacklist(
+            schedulerResponse.getSchedulingRequest().getAllocationTags(),
+            ((Response) schedulerResponse).attemptedNode);
+        isAdded = true;
+        break;
+      }
+    }
+    if (!isAdded) {
+      BatchedRequests br =
+          new BatchedRequests(schedulerResponse.getApplicationId(),
+              Collections.singleton(
+                  schedulerResponse.getSchedulingRequest()),
+              placementAttempt + 1);
+      reqsToRetry.add(br);
+      br.addToBlacklist(
+          schedulerResponse.getSchedulingRequest().getAllocationTags(),
+          ((Response) schedulerResponse).attemptedNode);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f9af15d6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/SamplePlacementAlgorithm.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/SamplePlacementAlgorithm.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/SamplePlacementAlgorithm.java
new file mode 100644
index 0000000..8d49801
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/SamplePlacementAlgorithm.java
@@ -0,0 +1,144 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.processor;
+
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TargetConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraintTransformations.SpecializedConstraintTransformer;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.AllocationTagsManager;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.InvalidAllocationTagsQueryException;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.PlacementConstraintManager;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api.ConstraintPlacementAlgorithm;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api.ConstraintPlacementAlgorithmInput;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api.ConstraintPlacementAlgorithmOutput;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api.ConstraintPlacementAlgorithmOutputCollector;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api.PlacedSchedulingRequest;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Sample Test algorithm. Assumes anti-affinity always
+ * It also assumes the numAllocations in resource sizing is always = 1
+ *
+ * NOTE: This is just a sample implementation. Not be actually used
+ */
+public class SamplePlacementAlgorithm implements ConstraintPlacementAlgorithm {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(SamplePlacementAlgorithm.class);
+
+  private AllocationTagsManager tagsManager;
+  private PlacementConstraintManager constraintManager;
+  private NodeCandidateSelector nodeSelector;
+
+  @Override
+  public void init(RMContext rmContext) {
+    this.tagsManager = rmContext.getAllocationTagsManager();
+    this.constraintManager = rmContext.getPlacementConstraintManager();
+    this.nodeSelector =
+        filter -> ((AbstractYarnScheduler)(rmContext)
+            .getScheduler()).getNodes(filter);
+  }
+
+  @Override
+  public void place(ConstraintPlacementAlgorithmInput input,
+      ConstraintPlacementAlgorithmOutputCollector collector) {
+    BatchedRequests requests = (BatchedRequests)input;
+    ConstraintPlacementAlgorithmOutput resp =
+        new ConstraintPlacementAlgorithmOutput(requests.getApplicationId());
+    List<SchedulerNode> allNodes = nodeSelector.selectNodes(null);
+    Map<String, List<SchedulingRequest>> tagIndexedRequests = new HashMap<>();
+    requests.getSchedulingRequests()
+        .stream()
+        .filter(r -> r.getAllocationTags() != null)
+        .forEach(
+            req -> req.getAllocationTags().forEach(
+                tag -> tagIndexedRequests.computeIfAbsent(tag,
+                    k -> new ArrayList<>()).add(req))
+        );
+    for (Map.Entry<String, List<SchedulingRequest>> entry :
+        tagIndexedRequests.entrySet()) {
+      String tag = entry.getKey();
+      PlacementConstraint constraint =
+          constraintManager.getConstraint(requests.getApplicationId(),
+              Collections.singleton(tag));
+      if (constraint != null) {
+        // Currently works only for simple anti-affinity
+        // NODE scope target expressions
+        SpecializedConstraintTransformer transformer =
+            new SpecializedConstraintTransformer(constraint);
+        PlacementConstraint transform = transformer.transform();
+        TargetConstraint targetConstraint =
+            (TargetConstraint) transform.getConstraintExpr();
+        // Assume a single target expression tag;
+        // The Sample Algorithm assumes a constraint will always be a simple
+        // Target Constraint with a single entry in the target set.
+        // As mentioned in the class javadoc - This algorithm should be
+        // used mostly for testing and validating end-2-end workflow.
+        String targetTag =
+            targetConstraint.getTargetExpressions().iterator().next()
+            .getTargetValues().iterator().next();
+        // iterate over all nodes
+        Iterator<SchedulerNode> nodeIter = allNodes.iterator();
+        List<SchedulingRequest> schedulingRequests = entry.getValue();
+        Iterator<SchedulingRequest> reqIter = schedulingRequests.iterator();
+        while (reqIter.hasNext()) {
+          SchedulingRequest sReq = reqIter.next();
+          int numAllocs = sReq.getResourceSizing().getNumAllocations();
+          while (numAllocs > 0 && nodeIter.hasNext()) {
+            SchedulerNode node = nodeIter.next();
+            long nodeCardinality = 0;
+            try {
+              nodeCardinality = tagsManager.getNodeCardinality(
+                  node.getNodeID(), requests.getApplicationId(),
+                  targetTag);
+              if (nodeCardinality == 0 &&
+                  !requests.getBlacklist(tag).contains(node.getNodeID())) {
+                numAllocs--;
+                sReq.getResourceSizing().setNumAllocations(numAllocs);
+                PlacedSchedulingRequest placedReq =
+                    new PlacedSchedulingRequest(sReq);
+                placedReq.setPlacementAttempt(requests.getPlacementAttempt());
+                placedReq.getNodes().add(node);
+                resp.getPlacedRequests().add(placedReq);
+              }
+            } catch (InvalidAllocationTagsQueryException e) {
+              LOG.warn("Got exception from TagManager !", e);
+            }
+          }
+        }
+      }
+    }
+    // Add all requests whose numAllocations still > 0 to rejected list.
+    requests.getSchedulingRequests().stream()
+        .filter(sReq -> sReq.getResourceSizing().getNumAllocations() > 0)
+        .forEach(rejReq -> resp.getRejectedRequests().add(rejReq));
+    collector.collect(resp);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f9af15d6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/package-info.java
new file mode 100644
index 0000000..7090154
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/package-info.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Package o.a.h.yarn.server.resourcemanager.scheduler.constraint.processor
+ * contains classes related to scheduling containers using placement
+ * processor.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.processor;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f9af15d6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java
index 12dfe18..975abe6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java
@@ -21,7 +21,10 @@ package org.apache.hadoop.yarn.server.resourcemanager;
 import java.lang.reflect.UndeclaredThrowableException;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
+import java.util.Set;
 
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
@@ -39,7 +42,9 @@ import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.api.records.ExecutionTypeRequest;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
 import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
 import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
@@ -57,6 +62,9 @@ public class MockAM {
   private ApplicationMasterProtocol amRMProtocol;
   private UserGroupInformation ugi;
   private volatile AllocateResponse lastResponse;
+  private Map<Set<String>, PlacementConstraint> placementConstraints =
+      new HashMap<>();
+  private List<SchedulingRequest> schedulingRequests = new ArrayList<>();
 
   private final List<ResourceRequest> requests = new ArrayList<ResourceRequest>();
   private final List<ContainerId> releases = new ArrayList<ContainerId>();
@@ -93,6 +101,16 @@ public class MockAM {
     return registerAppAttempt(true);
   }
 
+  public void addPlacementConstraint(Set<String> tags,
+      PlacementConstraint constraint) {
+    placementConstraints.put(tags, constraint);
+  }
+
+  public MockAM addSchedulingRequest(List<SchedulingRequest> reqs) {
+    schedulingRequests.addAll(reqs);
+    return this;
+  }
+
   public RegisterApplicationMasterResponse registerAppAttempt(boolean wait)
       throws Exception {
     if (wait) {
@@ -104,6 +122,9 @@ public class MockAM {
     req.setHost("");
     req.setRpcPort(1);
     req.setTrackingUrl("");
+    if (!placementConstraints.isEmpty()) {
+      req.setPlacementConstraints(this.placementConstraints);
+    }
     if (ugi == null) {
       ugi = UserGroupInformation.createRemoteUser(
           attemptId.toString());
@@ -247,12 +268,17 @@ public class MockAM {
 
   }
 
+
   public AllocateResponse allocate(
       List<ResourceRequest> resourceRequest, List<ContainerId> releases)
       throws Exception {
     final AllocateRequest req =
         AllocateRequest.newInstance(0, 0F, resourceRequest,
           releases, null);
+    if (!schedulingRequests.isEmpty()) {
+      req.setSchedulingRequests(schedulingRequests);
+      schedulingRequests.clear();
+    }
     return allocate(req);
   }
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f9af15d6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
index 2df3788..eb4c626 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
@@ -27,6 +27,7 @@ import java.util.Collections;
 import java.util.EnumSet;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.DataOutputBuffer;
@@ -65,6 +66,7 @@ import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.api.records.SignalContainerCommand;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.AsyncDispatcher;
 import org.apache.hadoop.yarn.event.Dispatcher;
@@ -1240,6 +1242,18 @@ public class MockRM extends ResourceManager {
     return am;
   }
 
+  public static MockAM launchAndRegisterAM(RMApp app, MockRM rm, MockNM nm,
+      Map<Set<String>, PlacementConstraint> constraints) throws Exception {
+    MockAM am = launchAM(app, rm, nm);
+    for (Map.Entry<Set<String>, PlacementConstraint> e :
+        constraints.entrySet()) {
+      am.addPlacementConstraint(e.getKey(), e.getValue());
+    }
+    am.registerAppAttempt();
+    rm.waitForState(app.getApplicationId(), RMAppState.RUNNING);
+    return am;
+  }
+
   public ApplicationReport getApplicationReport(ApplicationId appId)
       throws YarnException, IOException {
     ApplicationClientProtocol client = getClientRMService();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f9af15d6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementProcessor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementProcessor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementProcessor.java
new file mode 100644
index 0000000..db8ae15
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementProcessor.java
@@ -0,0 +1,394 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ExecutionType;
+import org.apache.hadoop.yarn.api.records.ExecutionTypeRequest;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.RejectedSchedulingRequest;
+import org.apache.hadoop.yarn.api.records.RejectionReason;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceSizing;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraints;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.event.Dispatcher;
+import org.apache.hadoop.yarn.event.DrainDispatcher;
+import org.apache.hadoop.yarn.server.resourcemanager.MockAM;
+import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
+import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+import static java.lang.Thread.sleep;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.NODE;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.PlacementTargets.allocationTag;
+
+/**
+ * This tests end2end workflow of the constraint placement framework.
+ */
+public class TestPlacementProcessor {
+
+  private static final int GB = 1024;
+
+  private static final Log LOG =
+      LogFactory.getLog(TestPlacementProcessor.class);
+  private MockRM rm;
+  private DrainDispatcher dispatcher;
+
+  @Before
+  public void createAndStartRM() {
+    CapacitySchedulerConfiguration csConf =
+        new CapacitySchedulerConfiguration();
+    YarnConfiguration conf = new YarnConfiguration(csConf);
+    conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
+        ResourceScheduler.class);
+    conf.setBoolean(
+        YarnConfiguration.RM_PLACEMENT_CONSTRAINTS_ENABLED, true);
+    conf.setInt(
+        YarnConfiguration.RM_PLACEMENT_CONSTRAINTS_RETRY_ATTEMPTS, 1);
+    startRM(conf);
+  }
+
+  private void startRM(final YarnConfiguration conf) {
+    dispatcher = new DrainDispatcher();
+    rm = new MockRM(conf) {
+      @Override
+      protected Dispatcher createDispatcher() {
+        return dispatcher;
+      }
+    };
+    rm.start();
+  }
+
+  @After
+  public void stopRM() {
+    if (rm != null) {
+      rm.stop();
+    }
+  }
+
+  @Test(timeout = 300000)
+  public void testPlacement() throws Exception {
+    HashMap<NodeId, MockNM> nodes = new HashMap<>();
+    MockNM nm1 = new MockNM("h1:1234", 4096, rm.getResourceTrackerService());
+    nodes.put(nm1.getNodeId(), nm1);
+    MockNM nm2 = new MockNM("h2:1234", 4096, rm.getResourceTrackerService());
+    nodes.put(nm2.getNodeId(), nm2);
+    MockNM nm3 = new MockNM("h3:1234", 4096, rm.getResourceTrackerService());
+    nodes.put(nm3.getNodeId(), nm3);
+    MockNM nm4 = new MockNM("h4:1234", 4096, rm.getResourceTrackerService());
+    nodes.put(nm4.getNodeId(), nm4);
+    nm1.registerNode();
+    nm2.registerNode();
+    nm3.registerNode();
+    nm4.registerNode();
+
+    RMApp app1 = rm.submitApp(1 * GB, "app", "user", null, "default");
+    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm2,
+        Collections.singletonMap(
+            Collections.singleton("foo"),
+            PlacementConstraints.build(
+                PlacementConstraints.targetNotIn(NODE, allocationTag("foo")))
+        ));
+    am1.addSchedulingRequest(
+        Arrays.asList(
+            schedulingRequest(1, 1, 1, 512, "foo"),
+            schedulingRequest(1, 2, 1, 512, "foo"),
+            schedulingRequest(1, 3, 1, 512, "foo"),
+            schedulingRequest(1, 5, 1, 512, "foo"))
+    );
+    AllocateResponse allocResponse = am1.schedule(); // send the request
+    List<Container> allocatedContainers = new ArrayList<>();
+    allocatedContainers.addAll(allocResponse.getAllocatedContainers());
+
+    // kick the scheduler
+
+    while (allocatedContainers.size() < 4) {
+      nm1.nodeHeartbeat(true);
+      nm2.nodeHeartbeat(true);
+      nm3.nodeHeartbeat(true);
+      nm4.nodeHeartbeat(true);
+      LOG.info("Waiting for containers to be created for app 1...");
+      sleep(1000);
+      allocResponse = am1.schedule();
+      allocatedContainers.addAll(allocResponse.getAllocatedContainers());
+    }
+
+    Assert.assertEquals(4, allocatedContainers.size());
+    Set<NodeId> nodeIds = allocatedContainers.stream()
+        .map(x -> x.getNodeId()).collect(Collectors.toSet());
+    // Ensure unique nodes
+    Assert.assertEquals(4, nodeIds.size());
+  }
+
+  @Test(timeout = 300000)
+  public void testSchedulerRejection() throws Exception {
+    HashMap<NodeId, MockNM> nodes = new HashMap<>();
+    MockNM nm1 = new MockNM("h1:1234", 4096, rm.getResourceTrackerService());
+    nodes.put(nm1.getNodeId(), nm1);
+    MockNM nm2 = new MockNM("h2:1234", 4096, rm.getResourceTrackerService());
+    nodes.put(nm2.getNodeId(), nm2);
+    MockNM nm3 = new MockNM("h3:1234", 4096, rm.getResourceTrackerService());
+    nodes.put(nm3.getNodeId(), nm3);
+    MockNM nm4 = new MockNM("h4:1234", 4096, rm.getResourceTrackerService());
+    nodes.put(nm4.getNodeId(), nm4);
+    nm1.registerNode();
+    nm2.registerNode();
+    nm3.registerNode();
+    nm4.registerNode();
+
+    RMApp app1 = rm.submitApp(1 * GB, "app", "user", null, "default");
+    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm2,
+        Collections.singletonMap(
+            Collections.singleton("foo"),
+            PlacementConstraints.build(
+                PlacementConstraints.targetNotIn(NODE, allocationTag("foo")))
+        ));
+    am1.addSchedulingRequest(
+        Arrays.asList(
+            schedulingRequest(1, 1, 1, 512, "foo"),
+            schedulingRequest(1, 2, 1, 512, "foo"),
+            schedulingRequest(1, 3, 1, 512, "foo"),
+            // Ask for a container larger than the node
+            schedulingRequest(1, 4, 1, 5120, "foo"))
+    );
+    AllocateResponse allocResponse = am1.schedule(); // send the request
+    List<Container> allocatedContainers = new ArrayList<>();
+    List<RejectedSchedulingRequest> rejectedReqs = new ArrayList<>();
+    int allocCount = 1;
+    allocatedContainers.addAll(allocResponse.getAllocatedContainers());
+    rejectedReqs.addAll(allocResponse.getRejectedSchedulingRequests());
+
+    // kick the scheduler
+
+    while (allocCount < 11) {
+      nm1.nodeHeartbeat(true);
+      nm2.nodeHeartbeat(true);
+      nm3.nodeHeartbeat(true);
+      nm4.nodeHeartbeat(true);
+      LOG.info("Waiting for containers to be created for app 1...");
+      sleep(1000);
+      allocResponse = am1.schedule();
+      allocatedContainers.addAll(allocResponse.getAllocatedContainers());
+      rejectedReqs.addAll(allocResponse.getRejectedSchedulingRequests());
+      allocCount++;
+      if (rejectedReqs.size() > 0 && allocatedContainers.size() > 2) {
+        break;
+      }
+    }
+
+    Assert.assertEquals(3, allocatedContainers.size());
+    Set<NodeId> nodeIds = allocatedContainers.stream()
+        .map(x -> x.getNodeId()).collect(Collectors.toSet());
+    // Ensure unique nodes
+    Assert.assertEquals(3, nodeIds.size());
+    RejectedSchedulingRequest rej = rejectedReqs.get(0);
+    Assert.assertEquals(4, rej.getRequest().getAllocationRequestId());
+    Assert.assertEquals(RejectionReason.COULD_NOT_SCHEDULE_ON_NODE,
+        rej.getReason());
+  }
+
+  @Test(timeout = 300000)
+  public void testRePlacementAfterSchedulerRejection() throws Exception {
+    stopRM();
+    CapacitySchedulerConfiguration csConf =
+        new CapacitySchedulerConfiguration();
+    YarnConfiguration conf = new YarnConfiguration(csConf);
+    conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
+        ResourceScheduler.class);
+    conf.setBoolean(
+        YarnConfiguration.RM_PLACEMENT_CONSTRAINTS_ENABLED, true);
+    conf.setInt(
+        YarnConfiguration.RM_PLACEMENT_CONSTRAINTS_RETRY_ATTEMPTS, 2);
+    startRM(conf);
+
+    HashMap<NodeId, MockNM> nodes = new HashMap<>();
+    MockNM nm1 = new MockNM("h1:1234", 4096, rm.getResourceTrackerService());
+    nodes.put(nm1.getNodeId(), nm1);
+    MockNM nm2 = new MockNM("h2:1234", 4096, rm.getResourceTrackerService());
+    nodes.put(nm2.getNodeId(), nm2);
+    MockNM nm3 = new MockNM("h3:1234", 4096, rm.getResourceTrackerService());
+    nodes.put(nm3.getNodeId(), nm3);
+    MockNM nm4 = new MockNM("h4:1234", 4096, rm.getResourceTrackerService());
+    nodes.put(nm4.getNodeId(), nm4);
+    MockNM nm5 = new MockNM("h5:1234", 8192, rm.getResourceTrackerService());
+    nodes.put(nm5.getNodeId(), nm5);
+    nm1.registerNode();
+    nm2.registerNode();
+    nm3.registerNode();
+    nm4.registerNode();
+    // No not register nm5 yet..
+
+    RMApp app1 = rm.submitApp(1 * GB, "app", "user", null, "default");
+    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm2,
+        Collections.singletonMap(
+            Collections.singleton("foo"),
+            PlacementConstraints.build(
+                PlacementConstraints.targetNotIn(NODE, allocationTag("foo")))
+        ));
+    am1.addSchedulingRequest(
+        Arrays.asList(
+            schedulingRequest(1, 1, 1, 512, "foo"),
+            schedulingRequest(1, 2, 1, 512, "foo"),
+            schedulingRequest(1, 3, 1, 512, "foo"),
+            // Ask for a container larger than the node
+            schedulingRequest(1, 4, 1, 5120, "foo"))
+    );
+    AllocateResponse allocResponse = am1.schedule(); // send the request
+    List<Container> allocatedContainers = new ArrayList<>();
+    List<RejectedSchedulingRequest> rejectedReqs = new ArrayList<>();
+    int allocCount = 1;
+    allocatedContainers.addAll(allocResponse.getAllocatedContainers());
+    rejectedReqs.addAll(allocResponse.getRejectedSchedulingRequests());
+
+    // Register node5 only after first allocate - so the initial placement
+    // for the large schedReq goes to some other node..
+    nm5.registerNode();
+
+    // kick the scheduler
+    while (allocCount < 11) {
+      nm1.nodeHeartbeat(true);
+      nm2.nodeHeartbeat(true);
+      nm3.nodeHeartbeat(true);
+      nm4.nodeHeartbeat(true);
+      nm5.nodeHeartbeat(true);
+      LOG.info("Waiting for containers to be created for app 1...");
+      sleep(1000);
+      allocResponse = am1.schedule();
+      allocatedContainers.addAll(allocResponse.getAllocatedContainers());
+      rejectedReqs.addAll(allocResponse.getRejectedSchedulingRequests());
+      allocCount++;
+      if (allocatedContainers.size() > 3) {
+        break;
+      }
+    }
+
+    Assert.assertEquals(4, allocatedContainers.size());
+    Set<NodeId> nodeIds = allocatedContainers.stream()
+        .map(x -> x.getNodeId()).collect(Collectors.toSet());
+    // Ensure unique nodes
+    Assert.assertEquals(4, nodeIds.size());
+  }
+
+  @Test(timeout = 300000)
+  public void testPlacementRejection() throws Exception {
+    HashMap<NodeId, MockNM> nodes = new HashMap<>();
+    MockNM nm1 = new MockNM("h1:1234", 4096, rm.getResourceTrackerService());
+    nodes.put(nm1.getNodeId(), nm1);
+    MockNM nm2 = new MockNM("h2:1234", 4096, rm.getResourceTrackerService());
+    nodes.put(nm2.getNodeId(), nm2);
+    MockNM nm3 = new MockNM("h3:1234", 4096, rm.getResourceTrackerService());
+    nodes.put(nm3.getNodeId(), nm3);
+    MockNM nm4 = new MockNM("h4:1234", 4096, rm.getResourceTrackerService());
+    nodes.put(nm4.getNodeId(), nm4);
+    nm1.registerNode();
+    nm2.registerNode();
+    nm3.registerNode();
+    nm4.registerNode();
+
+    RMApp app1 = rm.submitApp(1 * GB, "app", "user", null, "default");
+    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm2,
+        Collections.singletonMap(
+            Collections.singleton("foo"),
+            PlacementConstraints.build(
+                PlacementConstraints.targetNotIn(NODE, allocationTag("foo")))
+        ));
+    am1.addSchedulingRequest(
+        Arrays.asList(
+            schedulingRequest(1, 1, 1, 512, "foo"),
+            schedulingRequest(1, 2, 1, 512, "foo"),
+            schedulingRequest(1, 3, 1, 512, "foo"),
+            schedulingRequest(1, 4, 1, 512, "foo"),
+            // Ask for more containers than nodes
+            schedulingRequest(1, 5, 1, 512, "foo"))
+    );
+    AllocateResponse allocResponse = am1.schedule(); // send the request
+    List<Container> allocatedContainers = new ArrayList<>();
+    List<RejectedSchedulingRequest> rejectedReqs = new ArrayList<>();
+    int allocCount = 1;
+    allocatedContainers.addAll(allocResponse.getAllocatedContainers());
+    rejectedReqs.addAll(allocResponse.getRejectedSchedulingRequests());
+
+    // kick the scheduler
+
+    while (allocCount < 11) {
+      nm1.nodeHeartbeat(true);
+      nm2.nodeHeartbeat(true);
+      nm3.nodeHeartbeat(true);
+      nm4.nodeHeartbeat(true);
+      LOG.info("Waiting for containers to be created for app 1...");
+      sleep(1000);
+      allocResponse = am1.schedule();
+      allocatedContainers.addAll(allocResponse.getAllocatedContainers());
+      rejectedReqs.addAll(allocResponse.getRejectedSchedulingRequests());
+      allocCount++;
+      if (rejectedReqs.size() > 0 && allocatedContainers.size() > 3) {
+        break;
+      }
+    }
+
+    Assert.assertEquals(4, allocatedContainers.size());
+    Set<NodeId> nodeIds = allocatedContainers.stream()
+        .map(x -> x.getNodeId()).collect(Collectors.toSet());
+    // Ensure unique nodes
+    Assert.assertEquals(4, nodeIds.size());
+    RejectedSchedulingRequest rej = rejectedReqs.get(0);
+    Assert.assertEquals(RejectionReason.COULD_NOT_PLACE_ON_NODE,
+        rej.getReason());
+  }
+
+  private static SchedulingRequest schedulingRequest(
+      int priority, long allocReqId, int cores, int mem, String... tags) {
+    return schedulingRequest(priority, allocReqId, cores, mem,
+        ExecutionType.GUARANTEED, tags);
+  }
+
+  private static SchedulingRequest schedulingRequest(
+      int priority, long allocReqId, int cores, int mem,
+      ExecutionType execType, String... tags) {
+    return SchedulingRequest.newBuilder()
+        .priority(Priority.newInstance(priority))
+        .allocationRequestId(allocReqId)
+        .allocationTags(new HashSet<>(Arrays.asList(tags)))
+        .executionType(ExecutionTypeRequest.newInstance(execType, true))
+        .resourceSizing(
+            ResourceSizing.newInstance(1, Resource.newInstance(mem, cores)))
+        .build();
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[12/32] hadoop git commit: YARN-7669. API and interface modifications for placement constraint processor. (asuresh)

Posted by as...@apache.org.
YARN-7669. API and interface modifications for placement constraint processor. (asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/06eb63e6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/06eb63e6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/06eb63e6

Branch: refs/heads/trunk
Commit: 06eb63e64b05e2e8bb8a76c15360ab0495f11317
Parents: 88d8d3f
Author: Arun Suresh <as...@apache.org>
Authored: Tue Dec 19 22:47:46 2017 -0800
Committer: Arun Suresh <as...@apache.org>
Committed: Wed Jan 31 01:30:17 2018 -0800

----------------------------------------------------------------------
 .../yarn/ams/ApplicationMasterServiceUtils.java |  16 +
 .../api/protocolrecords/AllocateResponse.java   |  23 +
 .../api/records/RejectedSchedulingRequest.java  |  70 +++
 .../yarn/api/records/RejectionReason.java       |  44 ++
 .../src/main/proto/yarn_protos.proto            |  10 +
 .../src/main/proto/yarn_service_protos.proto    |   1 +
 .../impl/pb/AllocateResponsePBImpl.java         |  85 ++++
 .../yarn/api/records/impl/pb/ProtoUtils.java    |  16 +
 .../pb/RejectedSchedulingRequestPBImpl.java     | 148 +++++++
 .../records/impl/pb/ResourceSizingPBImpl.java   |   8 +
 .../impl/pb/SchedulingRequestPBImpl.java        |  11 +
 .../hadoop/yarn/api/TestPBImplRecords.java      |   2 +
 .../resourcemanager/RMActiveServiceContext.java |   2 +-
 .../yarn/server/resourcemanager/RMContext.java  |   2 +-
 .../server/resourcemanager/RMContextImpl.java   |   2 +-
 .../server/resourcemanager/ResourceManager.java |   2 +-
 .../constraint/AllocationTagsManager.java       | 431 -------------------
 .../constraint/AllocationTagsNamespaces.java    |  31 --
 .../InvalidAllocationTagsQueryException.java    |  35 --
 .../constraint/AllocationTagsManager.java       | 431 +++++++++++++++++++
 .../constraint/AllocationTagsNamespaces.java    |  31 ++
 .../InvalidAllocationTagsQueryException.java    |  35 ++
 .../api/ConstraintPlacementAlgorithm.java       |  43 ++
 .../api/ConstraintPlacementAlgorithmInput.java  |  32 ++
 .../api/ConstraintPlacementAlgorithmOutput.java |  58 +++
 ...traintPlacementAlgorithmOutputCollector.java |  32 ++
 .../constraint/api/PlacedSchedulingRequest.java |  79 ++++
 .../constraint/api/SchedulingResponse.java      |  70 +++
 .../scheduler/constraint/api/package-info.java  |  28 ++
 .../constraint/TestAllocationTagsManager.java   | 328 --------------
 .../rmcontainer/TestRMContainerImpl.java        |   2 +-
 .../scheduler/capacity/TestUtils.java           |   2 +-
 .../constraint/TestAllocationTagsManager.java   | 328 ++++++++++++++
 .../scheduler/fifo/TestFifoScheduler.java       |   2 +-
 34 files changed, 1608 insertions(+), 832 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/06eb63e6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceUtils.java
index 476da8b..8bdfaf3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceUtils.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.yarn.ams;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerUpdateType;
+import org.apache.hadoop.yarn.api.records.RejectedSchedulingRequest;
 import org.apache.hadoop.yarn.api.records.UpdateContainerError;
 import org.apache.hadoop.yarn.api.records.UpdatedContainer;
 
@@ -86,4 +87,19 @@ public final class ApplicationMasterServiceUtils {
     }
     allocateResponse.setAllocatedContainers(allocatedContainers);
   }
+
+  /**
+   * Add rejected Scheduling Requests to {@link AllocateResponse}.
+   * @param allocateResponse Allocate Response.
+   * @param rejectedRequests Rejected SchedulingRequests.
+   */
+  public static void addToRejectedSchedulingRequests(
+      AllocateResponse allocateResponse,
+      List<RejectedSchedulingRequest> rejectedRequests) {
+    if (allocateResponse.getRejectedSchedulingRequests() != null
+        && !allocateResponse.getRejectedSchedulingRequests().isEmpty()) {
+      rejectedRequests.addAll(allocateResponse.getRejectedSchedulingRequests());
+    }
+    allocateResponse.setRejectedSchedulingRequests(rejectedRequests);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/06eb63e6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateResponse.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateResponse.java
index 655c6dc..52c30e2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateResponse.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateResponse.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.yarn.api.protocolrecords;
 
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.List;
 
 import org.apache.hadoop.classification.InterfaceAudience.Private;
@@ -35,6 +36,7 @@ import org.apache.hadoop.yarn.api.records.NMToken;
 import org.apache.hadoop.yarn.api.records.NodeReport;
 import org.apache.hadoop.yarn.api.records.PreemptionMessage;
 import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.RejectedSchedulingRequest;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.Token;
 import org.apache.hadoop.yarn.api.records.UpdateContainerError;
@@ -410,6 +412,27 @@ public abstract class AllocateResponse {
   public abstract void setContainersFromPreviousAttempts(
       List<Container> containersFromPreviousAttempt);
 
+  /**
+   * Get a list of all SchedulingRequests that the RM has rejected between
+   * this allocate call and the previous one.
+   * @return List of RejectedSchedulingRequests.
+   */
+  @Public
+  @Unstable
+  public List<RejectedSchedulingRequest> getRejectedSchedulingRequests() {
+    return Collections.EMPTY_LIST;
+  }
+
+  /**
+   * Add a list of rejected SchedulingRequests to the AllocateResponse.
+   * @param rejectedRequests List of Rejected Scheduling Requests.
+   */
+  @Private
+  @Unstable
+  public void setRejectedSchedulingRequests(
+      List<RejectedSchedulingRequest> rejectedRequests) {
+  }
+
   @Private
   @Unstable
   public static AllocateResponseBuilder newBuilder() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/06eb63e6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/RejectedSchedulingRequest.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/RejectedSchedulingRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/RejectedSchedulingRequest.java
new file mode 100644
index 0000000..6e2d95b
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/RejectedSchedulingRequest.java
@@ -0,0 +1,70 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.api.records;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.yarn.util.Records;
+
+/**
+ * This encapsulates a Rejected SchedulingRequest. It contains the offending
+ * Scheduling Request along with the reason for rejection.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Unstable
+public abstract class RejectedSchedulingRequest {
+
+  /**
+   * Create new RejectedSchedulingRequest.
+   * @param reason Rejection Reason.
+   * @param request Rejected Scheduling Request.
+   * @return RejectedSchedulingRequest.
+   */
+  public static RejectedSchedulingRequest newInstance(RejectionReason reason,
+      SchedulingRequest request) {
+    RejectedSchedulingRequest instance =
+        Records.newRecord(RejectedSchedulingRequest.class);
+    instance.setReason(reason);
+    instance.setRequest(request);
+    return instance;
+  }
+
+  /**
+   * Get Rejection Reason.
+   * @return Rejection reason.
+   */
+  public abstract RejectionReason getReason();
+
+  /**
+   * Set Rejection Reason.
+   * @param reason Rejection Reason.
+   */
+  public abstract void setReason(RejectionReason reason);
+
+  /**
+   * Get the Rejected Scheduling Request.
+   * @return SchedulingRequest.
+   */
+  public abstract SchedulingRequest getRequest();
+
+  /**
+   * Set the SchedulingRequest.
+   * @param request SchedulingRequest.
+   */
+  public abstract void setRequest(SchedulingRequest request);
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/06eb63e6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/RejectionReason.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/RejectionReason.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/RejectionReason.java
new file mode 100644
index 0000000..afbc2ed
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/RejectionReason.java
@@ -0,0 +1,44 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.api.records;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Reason for rejecting a Scheduling Request.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Unstable
+public enum RejectionReason {
+  /**
+   * This is used to indicate a possible constraint violation. For eg. If the
+   * App requested anti-affinity across 5 container requests, but only 4 nodes
+   * exist. Another eg. could be if tag A has affinity with tag B and tag B has
+   * affinity with tag C, but tag A has anti-affinity with tag C, all at a rack
+   * scope - and only 1 rack exists. Essentially all situations where the
+   * Algorithm cannot assign a Node to SchedulingRequest.
+   */
+  COULD_NOT_PLACE_ON_NODE,
+  /**
+   * This is used to indicate when after the Algorithm has placed a Scheduling
+   * Request at a node, but the commit failed because the Queue has no
+   * capacity etc. This can be a transient situation.
+   */
+  COULD_NOT_SCHEDULE_ON_NODE
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/06eb63e6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
index fdc39a7..5cb1177 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
@@ -424,6 +424,16 @@ enum AMCommandProto {
   AM_SHUTDOWN = 2;
 }
 
+enum RejectionReasonProto {
+  RRP_COULD_NOT_PLACE_ON_NODE = 1;
+  RRP_COULD_NOT_SCHEDULE_ON_NODE = 2;
+}
+
+message RejectedSchedulingRequestProto {
+  required RejectionReasonProto reason = 1;
+  required SchedulingRequestProto request = 2;
+}
+
 message PreemptionMessageProto {
   optional StrictPreemptionContractProto strictContract = 1;
   optional PreemptionContractProto contract = 2;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/06eb63e6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
index e49c4e3..92a65ad 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
@@ -120,6 +120,7 @@ message AllocateResponseProto {
   repeated UpdateContainerErrorProto update_errors = 15;
   repeated UpdatedContainerProto updated_containers = 16;
   repeated ContainerProto containers_from_previous_attempts = 17;
+  repeated RejectedSchedulingRequestProto rejected_scheduling_requests = 18;
 }
 
 enum SchedulerResourceTypes {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/06eb63e6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java
index 5ca1e73..3ab5563 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.yarn.api.records.NMToken;
 import org.apache.hadoop.yarn.api.records.NodeReport;
 import org.apache.hadoop.yarn.api.records.PreemptionMessage;
 import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.RejectedSchedulingRequest;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.Token;
 import org.apache.hadoop.yarn.api.records.UpdateContainerError;
@@ -47,9 +48,11 @@ import org.apache.hadoop.yarn.api.records.impl.pb.NodeReportPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.PreemptionMessagePBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.PriorityPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.ProtoUtils;
+import org.apache.hadoop.yarn.api.records.impl.pb.RejectedSchedulingRequestPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.ResourcePBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.TokenPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.UpdatedContainerPBImpl;
+import org.apache.hadoop.yarn.proto.YarnProtos;
 import org.apache.hadoop.yarn.proto.YarnProtos.CollectorInfoProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ContainerProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ContainerStatusProto;
@@ -81,6 +84,7 @@ public class AllocateResponsePBImpl extends AllocateResponse {
 
   private List<NodeReport> updatedNodes = null;
   private List<UpdateContainerError> updateErrors = null;
+  private List<RejectedSchedulingRequest> rejectedRequests = null;
   private PreemptionMessage preempt;
   private Token amrmToken = null;
   private Priority appPriority = null;
@@ -140,6 +144,13 @@ public class AllocateResponsePBImpl extends AllocateResponse {
           getContainerStatusProtoIterable(this.completedContainersStatuses);
       builder.addAllCompletedContainerStatuses(iterable);
     }
+    if (this.rejectedRequests != null) {
+      builder.clearRejectedSchedulingRequests();
+      Iterable<YarnProtos.RejectedSchedulingRequestProto> iterable =
+          getRejectedSchedulingRequestsProtoIterable(
+              this.rejectedRequests);
+      builder.addAllRejectedSchedulingRequests(iterable);
+    }
     if (this.updatedNodes != null) {
       builder.clearUpdatedNodes();
       Iterable<NodeReportProto> iterable =
@@ -471,6 +482,24 @@ public class AllocateResponsePBImpl extends AllocateResponse {
     containersFromPreviousAttempts.addAll(containers);
   }
 
+  @Override
+  public synchronized List<RejectedSchedulingRequest>
+      getRejectedSchedulingRequests() {
+    initRejectedRequestsList();
+    return this.rejectedRequests;
+  }
+
+  @Override
+  public synchronized void setRejectedSchedulingRequests(
+      List<RejectedSchedulingRequest> rejectedReqs) {
+    if (rejectedReqs == null) {
+      return;
+    }
+    initRejectedRequestsList();
+    this.rejectedRequests.clear();
+    this.rejectedRequests.addAll(rejectedReqs);
+  }
+
   private synchronized void initLocalUpdatedContainerList() {
     if (this.updatedContainers != null) {
       return;
@@ -528,6 +557,20 @@ public class AllocateResponsePBImpl extends AllocateResponse {
     }
   }
 
+  private synchronized void initRejectedRequestsList() {
+    if (this.rejectedRequests != null) {
+      return;
+    }
+    AllocateResponseProtoOrBuilder p = viaProto ? proto : builder;
+    List<YarnProtos.RejectedSchedulingRequestProto> list =
+        p.getRejectedSchedulingRequestsList();
+    rejectedRequests = new ArrayList<>();
+
+    for (YarnProtos.RejectedSchedulingRequestProto c : list) {
+      rejectedRequests.add(convertFromProtoFormat(c));
+    }
+  }
+
   private synchronized void initLocalNewNMTokenList() {
     if (nmTokens != null) {
       return;
@@ -712,6 +755,38 @@ public class AllocateResponsePBImpl extends AllocateResponse {
       }
     };
   }
+
+  private synchronized Iterable<YarnProtos.RejectedSchedulingRequestProto>
+      getRejectedSchedulingRequestsProtoIterable(
+      final List<RejectedSchedulingRequest> rejectedReqsList) {
+    maybeInitBuilder();
+    return new Iterable<YarnProtos.RejectedSchedulingRequestProto>() {
+      @Override
+      public Iterator<YarnProtos.RejectedSchedulingRequestProto> iterator() {
+        return new Iterator<YarnProtos.RejectedSchedulingRequestProto>() {
+
+          private Iterator<RejectedSchedulingRequest> iter =
+              rejectedReqsList.iterator();
+
+          @Override
+          public synchronized boolean hasNext() {
+            return iter.hasNext();
+          }
+
+          @Override
+          public synchronized YarnProtos.RejectedSchedulingRequestProto next() {
+            return convertToProtoFormat(iter.next());
+          }
+
+          @Override
+          public synchronized void remove() {
+            throw new UnsupportedOperationException();
+
+          }
+        };
+      }
+    };
+  }
   
   private synchronized Iterable<NodeReportProto>
   getNodeReportProtoIterable(
@@ -808,6 +883,16 @@ public class AllocateResponsePBImpl extends AllocateResponse {
     return ((ContainerStatusPBImpl)t).getProto();
   }
 
+  private synchronized RejectedSchedulingRequestPBImpl convertFromProtoFormat(
+      YarnProtos.RejectedSchedulingRequestProto p) {
+    return new RejectedSchedulingRequestPBImpl(p);
+  }
+
+  private synchronized YarnProtos.RejectedSchedulingRequestProto
+      convertToProtoFormat(RejectedSchedulingRequest t) {
+    return ((RejectedSchedulingRequestPBImpl)t).getProto();
+  }
+
   private synchronized ResourcePBImpl convertFromProtoFormat(ResourceProto p) {
     return new ResourcePBImpl(p);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/06eb63e6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java
index 168d864..76e86ad 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java
@@ -49,6 +49,7 @@ import org.apache.hadoop.yarn.api.records.NodeState;
 import org.apache.hadoop.yarn.api.records.NodeUpdateType;
 import org.apache.hadoop.yarn.api.records.QueueACL;
 import org.apache.hadoop.yarn.api.records.QueueState;
+import org.apache.hadoop.yarn.api.records.RejectionReason;
 import org.apache.hadoop.yarn.api.records.ReservationRequestInterpreter;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceInformation;
@@ -233,6 +234,21 @@ public class ProtoUtils {
   }
 
   /*
+   * RejectionReason
+   */
+  private static final String REJECTION_REASON_PREFIX = "RRP_";
+  public static YarnProtos.RejectionReasonProto convertToProtoFormat(
+      RejectionReason e) {
+    return YarnProtos.RejectionReasonProto
+        .valueOf(REJECTION_REASON_PREFIX + e.name());
+  }
+  public static RejectionReason convertFromProtoFormat(
+      YarnProtos.RejectionReasonProto e) {
+    return RejectionReason.valueOf(e.name()
+        .replace(REJECTION_REASON_PREFIX, ""));
+  }
+
+  /*
    * ByteBuffer
    */
   public static ByteBuffer convertFromProtoFormat(ByteString byteString) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/06eb63e6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/RejectedSchedulingRequestPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/RejectedSchedulingRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/RejectedSchedulingRequestPBImpl.java
new file mode 100644
index 0000000..ed78551
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/RejectedSchedulingRequestPBImpl.java
@@ -0,0 +1,148 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.api.records.impl.pb;
+
+import com.google.protobuf.TextFormat;
+import org.apache.hadoop.yarn.api.records.RejectedSchedulingRequest;
+import org.apache.hadoop.yarn.api.records.RejectionReason;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
+import org.apache.hadoop.yarn.proto.YarnProtos;
+
+/**
+ * Implementation of RejectedSchedulingRequest.
+ */
+public class RejectedSchedulingRequestPBImpl extends RejectedSchedulingRequest {
+
+  private YarnProtos.RejectedSchedulingRequestProto proto =
+      YarnProtos.RejectedSchedulingRequestProto.getDefaultInstance();
+  private YarnProtos.RejectedSchedulingRequestProto.Builder builder = null;
+  private boolean viaProto = false;
+  private SchedulingRequest request;
+
+  public RejectedSchedulingRequestPBImpl() {
+    builder = YarnProtos.RejectedSchedulingRequestProto.newBuilder();
+  }
+
+  public RejectedSchedulingRequestPBImpl(
+      YarnProtos.RejectedSchedulingRequestProto proto) {
+    this.proto = proto;
+    viaProto = true;
+  }
+
+  public synchronized YarnProtos.RejectedSchedulingRequestProto getProto() {
+    mergeLocalToProto();
+    proto = viaProto ? proto : builder.build();
+    viaProto = true;
+    return proto;
+  }
+
+  @Override
+  public int hashCode() {
+    return getProto().hashCode();
+  }
+
+  @Override
+  public boolean equals(Object other) {
+    if (other == null) {
+      return false;
+    }
+    if (other.getClass().isAssignableFrom(this.getClass())) {
+      return this.getProto().equals(this.getClass().cast(other).getProto());
+    }
+    return false;
+  }
+
+  @Override
+  public String toString() {
+    return TextFormat.shortDebugString(getProto());
+  }
+
+  private synchronized void mergeLocalToProto() {
+    if (viaProto) {
+      maybeInitBuilder();
+    }
+    mergeLocalToBuilder();
+    proto = builder.build();
+    viaProto = true;
+  }
+
+  private synchronized void mergeLocalToBuilder() {
+    if (this.request != null) {
+      builder.setRequest(convertToProtoFormat(this.request));
+    }
+  }
+  private synchronized void maybeInitBuilder() {
+    if (viaProto || builder == null) {
+      builder = YarnProtos.RejectedSchedulingRequestProto.newBuilder(proto);
+    }
+    viaProto = false;
+  }
+
+  @Override
+  public synchronized RejectionReason getReason() {
+    YarnProtos.RejectedSchedulingRequestProtoOrBuilder p =
+        viaProto ? proto : builder;
+    if (!p.hasReason()) {
+      return null;
+    }
+    return ProtoUtils.convertFromProtoFormat(p.getReason());
+  }
+
+  @Override
+  public synchronized void setReason(RejectionReason reason) {
+    maybeInitBuilder();
+    if (reason == null) {
+      builder.clearReason();
+      return;
+    }
+    builder.setReason(ProtoUtils.convertToProtoFormat(reason));
+  }
+
+  @Override
+  public synchronized SchedulingRequest getRequest() {
+    YarnProtos.RejectedSchedulingRequestProtoOrBuilder p =
+        viaProto ? proto : builder;
+    if (this.request != null) {
+      return this.request;
+    }
+    if (!p.hasRequest()) {
+      return null;
+    }
+    this.request = convertFromProtoFormat(p.getRequest());
+    return this.request;
+  }
+
+  @Override
+  public synchronized void setRequest(SchedulingRequest req) {
+    maybeInitBuilder();
+    if (null == req) {
+      builder.clearRequest();
+    }
+    this.request = req;
+  }
+
+  private synchronized YarnProtos.SchedulingRequestProto convertToProtoFormat(
+      SchedulingRequest r) {
+    return ((SchedulingRequestPBImpl)r).getProto();
+  }
+
+  private synchronized SchedulingRequestPBImpl convertFromProtoFormat(
+      YarnProtos.SchedulingRequestProto p) {
+    return new SchedulingRequestPBImpl(p);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/06eb63e6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceSizingPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceSizingPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceSizingPBImpl.java
index f98e488..4054837 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceSizingPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceSizingPBImpl.java
@@ -114,4 +114,12 @@ public class ResourceSizingPBImpl extends ResourceSizing {
   private ResourceProto convertToProtoFormat(Resource r) {
     return ProtoUtils.convertToProtoFormat(r);
   }
+
+  @Override
+  public String toString() {
+    return "ResourceSizingPBImpl{" +
+        "numAllocations=" + getNumAllocations() +
+        ", resources=" + getResources() +
+        '}';
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/06eb63e6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/SchedulingRequestPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/SchedulingRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/SchedulingRequestPBImpl.java
index 305856a..1f86043 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/SchedulingRequestPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/SchedulingRequestPBImpl.java
@@ -279,4 +279,15 @@ public class SchedulingRequestPBImpl extends SchedulingRequest {
     }
     return false;
   }
+
+  @Override
+  public String toString() {
+    return "SchedulingRequestPBImpl{" +
+        "priority=" + getPriority() +
+        ", allocationReqId=" + getAllocationRequestId() +
+        ", executionType=" + getExecutionType() +
+        ", allocationTags=" + getAllocationTags() +
+        ", resourceSizing=" + getResourceSizing() +
+        '}';
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/06eb63e6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java
index a0b907d..ae80910 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java
@@ -138,6 +138,7 @@ import org.apache.hadoop.yarn.api.records.QueueInfo;
 import org.apache.hadoop.yarn.api.records.QueueState;
 import org.apache.hadoop.yarn.api.records.QueueStatistics;
 import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
+import org.apache.hadoop.yarn.api.records.RejectedSchedulingRequest;
 import org.apache.hadoop.yarn.api.records.ReservationAllocationState;
 import org.apache.hadoop.yarn.api.records.ReservationDefinition;
 import org.apache.hadoop.yarn.api.records.ReservationId;
@@ -436,6 +437,7 @@ public class TestPBImplRecords extends BasePBImplRecordsTest {
     generateByNewInstance(ResourceTypeInfo.class);
     generateByNewInstance(ResourceSizing.class);
     generateByNewInstance(SchedulingRequest.class);
+    generateByNewInstance(RejectedSchedulingRequest.class);
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/06eb63e6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java
index 6ee3a4c..4d0c230 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java
@@ -33,7 +33,6 @@ import org.apache.hadoop.yarn.event.Dispatcher;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMDelegatedNodeLabelsUpdater;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.placement.PlacementManager;
-import org.apache.hadoop.yarn.server.resourcemanager.constraint.AllocationTagsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.NullRMStateStore;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore;
 import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSystem;
@@ -43,6 +42,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.monitor.RMAppLifetime
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAllocationExpirer;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.AllocationTagsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.distributed.QueueLimitCalculator;
 import org.apache.hadoop.yarn.server.resourcemanager.security.AMRMTokenSecretManager;
 import org.apache.hadoop.yarn.server.resourcemanager.security.ClientToAMTokenSecretManagerInRM;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/06eb63e6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java
index 62899d9..00da108 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java
@@ -32,7 +32,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWri
 import org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMDelegatedNodeLabelsUpdater;
-import org.apache.hadoop.yarn.server.resourcemanager.constraint.AllocationTagsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.placement.PlacementManager;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore;
 import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSystem;
@@ -44,6 +43,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAlloca
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.AllocationTagsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.distributed.QueueLimitCalculator;
 import org.apache.hadoop.yarn.server.resourcemanager.security.AMRMTokenSecretManager;
 import org.apache.hadoop.yarn.server.resourcemanager.security.ClientToAMTokenSecretManagerInRM;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/06eb63e6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java
index 315fdc1..da50ef8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java
@@ -38,7 +38,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWri
 import org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMDelegatedNodeLabelsUpdater;
-import org.apache.hadoop.yarn.server.resourcemanager.constraint.AllocationTagsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.placement.PlacementManager;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore;
 import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSystem;
@@ -50,6 +49,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAlloca
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.AllocationTagsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.distributed.QueueLimitCalculator;
 import org.apache.hadoop.yarn.server.resourcemanager.security.AMRMTokenSecretManager;
 import org.apache.hadoop.yarn.server.resourcemanager.security.ClientToAMTokenSecretManagerInRM;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/06eb63e6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
index da0feda..a1d3dfc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
@@ -73,7 +73,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.metrics.TimelineServiceV2Pu
 import org.apache.hadoop.yarn.server.resourcemanager.metrics.CombinedSystemMetricsPublisher;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMDelegatedNodeLabelsUpdater;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
-import org.apache.hadoop.yarn.server.resourcemanager.constraint.AllocationTagsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.NullRMStateStore;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.RMState;
@@ -97,6 +96,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEventType;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.AllocationTagsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEventType;
 import org.apache.hadoop.yarn.server.resourcemanager.security.DelegationTokenRenewer;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/06eb63e6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/AllocationTagsManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/AllocationTagsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/AllocationTagsManager.java
deleted file mode 100644
index b67fab9..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/AllocationTagsManager.java
+++ /dev/null
@@ -1,431 +0,0 @@
-/*
- * *
- *  Licensed to the Apache Software Foundation (ASF) under one
- *  or more contributor license agreements.  See the NOTICE file
- *  distributed with this work for additional information
- *  regarding copyright ownership.  The ASF licenses this file
- *  to you under the Apache License, Version 2.0 (the
- *  "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- * /
- */
-
-package org.apache.hadoop.yarn.server.resourcemanager.constraint;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.lang.StringUtils;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.NodeId;
-import org.apache.hadoop.yarn.api.records.SchedulingRequest;
-import org.apache.log4j.Logger;
-
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-import java.util.function.LongBinaryOperator;
-
-/**
- * Support storing maps between container-tags/applications and
- * nodes. This will be required by affinity/anti-affinity implementation and
- * cardinality.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-public class AllocationTagsManager {
-
-  private static final Logger LOG = Logger.getLogger(
-      AllocationTagsManager.class);
-
-  private ReentrantReadWriteLock.ReadLock readLock;
-  private ReentrantReadWriteLock.WriteLock writeLock;
-
-  // Application's tags to node
-  private Map<ApplicationId, NodeToCountedTags> perAppMappings =
-      new HashMap<>();
-
-  // Global tags to node mapping (used to fast return aggregated tags
-  // cardinality across apps)
-  private NodeToCountedTags globalMapping = new NodeToCountedTags();
-
-  /**
-   * Store node to counted tags.
-   */
-  @VisibleForTesting
-  static class NodeToCountedTags {
-    // Map<NodeId, Map<Tag, Count>>
-    private Map<NodeId, Map<String, Long>> nodeToTagsWithCount =
-        new HashMap<>();
-
-    // protected by external locks
-    private void addTagsToNode(NodeId nodeId, Set<String> tags) {
-      Map<String, Long> innerMap = nodeToTagsWithCount.computeIfAbsent(nodeId,
-          k -> new HashMap<>());
-
-      for (String tag : tags) {
-        Long count = innerMap.get(tag);
-        if (count == null) {
-          innerMap.put(tag, 1L);
-        } else{
-          innerMap.put(tag, count + 1);
-        }
-      }
-    }
-
-    // protected by external locks
-    private void addTagToNode(NodeId nodeId, String tag) {
-      Map<String, Long> innerMap = nodeToTagsWithCount.computeIfAbsent(nodeId,
-          k -> new HashMap<>());
-
-      Long count = innerMap.get(tag);
-      if (count == null) {
-        innerMap.put(tag, 1L);
-      } else{
-        innerMap.put(tag, count + 1);
-      }
-    }
-
-    private void removeTagFromInnerMap(Map<String, Long> innerMap, String tag) {
-      Long count = innerMap.get(tag);
-      if (count > 1) {
-        innerMap.put(tag, count - 1);
-      } else {
-        if (count <= 0) {
-          LOG.warn(
-              "Trying to remove tags from node, however the count already"
-                  + " becomes 0 or less, it could be a potential bug.");
-        }
-        innerMap.remove(tag);
-      }
-    }
-
-    private void removeTagsFromNode(NodeId nodeId, Set<String> tags) {
-      Map<String, Long> innerMap = nodeToTagsWithCount.get(nodeId);
-      if (innerMap == null) {
-        LOG.warn("Failed to find node=" + nodeId
-            + " while trying to remove tags, please double check.");
-        return;
-      }
-
-      for (String tag : tags) {
-        removeTagFromInnerMap(innerMap, tag);
-      }
-
-      if (innerMap.isEmpty()) {
-        nodeToTagsWithCount.remove(nodeId);
-      }
-    }
-
-    private void removeTagFromNode(NodeId nodeId, String tag) {
-      Map<String, Long> innerMap = nodeToTagsWithCount.get(nodeId);
-      if (innerMap == null) {
-        LOG.warn("Failed to find node=" + nodeId
-            + " while trying to remove tags, please double check.");
-        return;
-      }
-
-      removeTagFromInnerMap(innerMap, tag);
-
-      if (innerMap.isEmpty()) {
-        nodeToTagsWithCount.remove(nodeId);
-      }
-    }
-
-    private long getCardinality(NodeId nodeId, String tag) {
-      Map<String, Long> innerMap = nodeToTagsWithCount.get(nodeId);
-      if (innerMap == null) {
-        return 0;
-      }
-      Long value = innerMap.get(tag);
-      return value == null ? 0 : value;
-    }
-
-    private long getCardinality(NodeId nodeId, Set<String> tags,
-        LongBinaryOperator op) {
-      Map<String, Long> innerMap = nodeToTagsWithCount.get(nodeId);
-      if (innerMap == null) {
-        return 0;
-      }
-
-      long returnValue = 0;
-      boolean firstTag = true;
-
-      if (tags != null && !tags.isEmpty()) {
-        for (String tag : tags) {
-          Long value = innerMap.get(tag);
-          if (value == null) {
-            value = 0L;
-          }
-
-          if (firstTag) {
-            returnValue = value;
-            firstTag = false;
-            continue;
-          }
-
-          returnValue = op.applyAsLong(returnValue, value);
-        }
-      } else {
-        // Similar to above if, but only iterate values for better performance
-        for (long value : innerMap.values()) {
-          // For the first value, we will not apply op
-          if (firstTag) {
-            returnValue = value;
-            firstTag = false;
-            continue;
-          }
-          returnValue = op.applyAsLong(returnValue, value);
-        }
-      }
-      return returnValue;
-    }
-
-    private boolean isEmpty() {
-      return nodeToTagsWithCount.isEmpty();
-    }
-
-    @VisibleForTesting
-    public Map<NodeId, Map<String, Long>> getNodeToTagsWithCount() {
-      return nodeToTagsWithCount;
-    }
-  }
-
-  @VisibleForTesting
-  Map<ApplicationId, NodeToCountedTags> getPerAppMappings() {
-    return perAppMappings;
-  }
-
-  @VisibleForTesting
-  NodeToCountedTags getGlobalMapping() {
-    return globalMapping;
-  }
-
-  public AllocationTagsManager() {
-    ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
-    readLock = lock.readLock();
-    writeLock = lock.writeLock();
-  }
-
-  /**
-   * Notify container allocated on a node.
-   *
-   * @param nodeId         allocated node.
-   * @param applicationId  applicationId
-   * @param containerId    container id.
-   * @param allocationTags allocation tags, see
-   *                       {@link SchedulingRequest#getAllocationTags()}
-   *                       application_id will be added to allocationTags.
-   */
-  public void addContainer(NodeId nodeId, ApplicationId applicationId,
-      ContainerId containerId, Set<String> allocationTags) {
-    String applicationIdTag =
-        AllocationTagsNamespaces.APP_ID + applicationId.toString();
-
-    boolean useSet = false;
-    if (allocationTags != null && !allocationTags.isEmpty()) {
-      // Copy before edit it.
-      allocationTags = new HashSet<>(allocationTags);
-      allocationTags.add(applicationIdTag);
-      useSet = true;
-    }
-
-    writeLock.lock();
-    try {
-      NodeToCountedTags perAppTagsMapping = perAppMappings.computeIfAbsent(
-          applicationId, k -> new NodeToCountedTags());
-
-      if (useSet) {
-        perAppTagsMapping.addTagsToNode(nodeId, allocationTags);
-        globalMapping.addTagsToNode(nodeId, allocationTags);
-      } else {
-        perAppTagsMapping.addTagToNode(nodeId, applicationIdTag);
-        globalMapping.addTagToNode(nodeId, applicationIdTag);
-      }
-
-      if (LOG.isDebugEnabled()) {
-        LOG.debug(
-            "Added container=" + containerId + " with tags=[" + StringUtils
-                .join(allocationTags, ",") + "]");
-      }
-    } finally {
-      writeLock.unlock();
-    }
-  }
-
-  /**
-   * Notify container removed.
-   *
-   * @param nodeId         nodeId
-   * @param applicationId  applicationId
-   * @param containerId    containerId.
-   * @param allocationTags allocation tags for given container
-   */
-  public void removeContainer(NodeId nodeId, ApplicationId applicationId,
-      ContainerId containerId, Set<String> allocationTags) {
-    String applicationIdTag =
-        AllocationTagsNamespaces.APP_ID + applicationId.toString();
-    boolean useSet = false;
-
-    if (allocationTags != null && !allocationTags.isEmpty()) {
-      // Copy before edit it.
-      allocationTags = new HashSet<>(allocationTags);
-      allocationTags.add(applicationIdTag);
-      useSet = true;
-    }
-
-    writeLock.lock();
-    try {
-      NodeToCountedTags perAppTagsMapping = perAppMappings.get(applicationId);
-      if (perAppTagsMapping == null) {
-        return;
-      }
-
-      if (useSet) {
-        perAppTagsMapping.removeTagsFromNode(nodeId, allocationTags);
-        globalMapping.removeTagsFromNode(nodeId, allocationTags);
-      } else {
-        perAppTagsMapping.removeTagFromNode(nodeId, applicationIdTag);
-        globalMapping.removeTagFromNode(nodeId, applicationIdTag);
-      }
-
-      if (perAppTagsMapping.isEmpty()) {
-        perAppMappings.remove(applicationId);
-      }
-
-      if (LOG.isDebugEnabled()) {
-        LOG.debug(
-            "Removed container=" + containerId + " with tags=[" + StringUtils
-                .join(allocationTags, ",") + "]");
-      }
-    } finally {
-      writeLock.unlock();
-    }
-  }
-
-  /**
-   * Get cardinality for following conditions. External can pass-in a binary op
-   * to implement customized logic.   *
-   * @param nodeId        nodeId, required.
-   * @param applicationId applicationId. When null is specified, return
-   *                      aggregated cardinality among all nodes.
-   * @param tag           allocation tag, see
-   *                      {@link SchedulingRequest#getAllocationTags()},
-   *                      When multiple tags specified. Returns cardinality
-   *                      depends on op. If a specified tag doesn't exist,
-   *                      0 will be its cardinality.
-   *                      When null/empty tags specified, all tags
-   *                      (of the node/app) will be considered.
-   * @return cardinality of specified query on the node.
-   * @throws InvalidAllocationTagsQueryException when illegal query
-   *                                            parameter specified
-   */
-  public long getNodeCardinality(NodeId nodeId, ApplicationId applicationId,
-      String tag) throws InvalidAllocationTagsQueryException {
-    readLock.lock();
-
-    try {
-      if (nodeId == null) {
-        throw new InvalidAllocationTagsQueryException(
-            "Must specify nodeId/tags/op to query cardinality");
-      }
-
-      NodeToCountedTags mapping;
-      if (applicationId != null) {
-        mapping = perAppMappings.get(applicationId);
-      } else{
-        mapping = globalMapping;
-      }
-
-      if (mapping == null) {
-        return 0;
-      }
-
-      return mapping.getCardinality(nodeId, tag);
-    } finally {
-      readLock.unlock();
-    }
-  }
-
-  /**
-   * Check if given tag exists on node.
-   *
-   * @param nodeId        nodeId, required.
-   * @param applicationId applicationId. When null is specified, return
-   *                      aggregated cardinality among all nodes.
-   * @param tag           allocation tag, see
-   *                      {@link SchedulingRequest#getAllocationTags()},
-   *                      When multiple tags specified. Returns cardinality
-   *                      depends on op. If a specified tag doesn't exist,
-   *                      0 will be its cardinality.
-   *                      When null/empty tags specified, all tags
-   *                      (of the node/app) will be considered.
-   * @return cardinality of specified query on the node.
-   * @throws InvalidAllocationTagsQueryException when illegal query
-   *                                            parameter specified
-   */
-  public boolean allocationTagExistsOnNode(NodeId nodeId,
-      ApplicationId applicationId, String tag)
-      throws InvalidAllocationTagsQueryException {
-    return getNodeCardinality(nodeId, applicationId, tag) > 0;
-  }
-
-  /**
-   * Get cardinality for following conditions. External can pass-in a binary op
-   * to implement customized logic.
-   *
-   * @param nodeId        nodeId, required.
-   * @param applicationId applicationId. When null is specified, return
-   *                      aggregated cardinality among all nodes.
-   * @param tags          allocation tags, see
-   *                      {@link SchedulingRequest#getAllocationTags()},
-   *                      When multiple tags specified. Returns cardinality
-   *                      depends on op. If a specified tag doesn't exist, 0
-   *                      will be its cardinality. When null/empty tags
-   *                      specified, all tags (of the node/app) will be
-   *                      considered.
-   * @param op            operator. Such as Long::max, Long::sum, etc. Required.
-   *                      This sparameter only take effect when #values >= 2.
-   * @return cardinality of specified query on the node.
-   * @throws InvalidAllocationTagsQueryException when illegal query
-   *                                            parameter specified
-   */
-  public long getNodeCardinalityByOp(NodeId nodeId, ApplicationId applicationId,
-      Set<String> tags, LongBinaryOperator op)
-      throws InvalidAllocationTagsQueryException {
-    readLock.lock();
-
-    try {
-      if (nodeId == null || op == null) {
-        throw new InvalidAllocationTagsQueryException(
-            "Must specify nodeId/tags/op to query cardinality");
-      }
-
-      NodeToCountedTags mapping;
-      if (applicationId != null) {
-        mapping = perAppMappings.get(applicationId);
-      } else{
-        mapping = globalMapping;
-      }
-
-      if (mapping == null) {
-        return 0;
-      }
-
-      return mapping.getCardinality(nodeId, tags, op);
-    } finally {
-      readLock.unlock();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/06eb63e6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/AllocationTagsNamespaces.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/AllocationTagsNamespaces.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/AllocationTagsNamespaces.java
deleted file mode 100644
index 893ff1c..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/AllocationTagsNamespaces.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * *
- *  Licensed to the Apache Software Foundation (ASF) under one
- *  or more contributor license agreements.  See the NOTICE file
- *  distributed with this work for additional information
- *  regarding copyright ownership.  The ASF licenses this file
- *  to you under the Apache License, Version 2.0 (the
- *  "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- * /
- */
-
-package org.apache.hadoop.yarn.server.resourcemanager.constraint;
-
-/**
- * Predefined namespaces for tags
- *
- * Same as namespace  of resource types. Namespaces of placement tags are start
- * with alphabets and ended with "/"
- */
-public class AllocationTagsNamespaces {
-  public static final String APP_ID = "yarn_app_id/";
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/06eb63e6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/InvalidAllocationTagsQueryException.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/InvalidAllocationTagsQueryException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/InvalidAllocationTagsQueryException.java
deleted file mode 100644
index 5519e39..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/InvalidAllocationTagsQueryException.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * *
- *  Licensed to the Apache Software Foundation (ASF) under one
- *  or more contributor license agreements.  See the NOTICE file
- *  distributed with this work for additional information
- *  regarding copyright ownership.  The ASF licenses this file
- *  to you under the Apache License, Version 2.0 (the
- *  "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- * /
- */
-
-package org.apache.hadoop.yarn.server.resourcemanager.constraint;
-
-import org.apache.hadoop.yarn.exceptions.YarnException;
-
-/**
- * Exception when invalid parameter specified to do placement tags related
- * queries.
- */
-public class InvalidAllocationTagsQueryException extends YarnException {
-  private static final long serialVersionUID = 12312831974894L;
-
-  public InvalidAllocationTagsQueryException(String msg) {
-    super(msg);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/06eb63e6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
new file mode 100644
index 0000000..c278606
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
@@ -0,0 +1,431 @@
+/*
+ * *
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ * /
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
+import org.apache.log4j.Logger;
+
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+import java.util.function.LongBinaryOperator;
+
+/**
+ * Support storing maps between container-tags/applications and
+ * nodes. This will be required by affinity/anti-affinity implementation and
+ * cardinality.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class AllocationTagsManager {
+
+  private static final Logger LOG = Logger.getLogger(
+      AllocationTagsManager.class);
+
+  private ReentrantReadWriteLock.ReadLock readLock;
+  private ReentrantReadWriteLock.WriteLock writeLock;
+
+  // Application's tags to node
+  private Map<ApplicationId, NodeToCountedTags> perAppMappings =
+      new HashMap<>();
+
+  // Global tags to node mapping (used to fast return aggregated tags
+  // cardinality across apps)
+  private NodeToCountedTags globalMapping = new NodeToCountedTags();
+
+  /**
+   * Store node to counted tags.
+   */
+  @VisibleForTesting
+  static class NodeToCountedTags {
+    // Map<NodeId, Map<Tag, Count>>
+    private Map<NodeId, Map<String, Long>> nodeToTagsWithCount =
+        new HashMap<>();
+
+    // protected by external locks
+    private void addTagsToNode(NodeId nodeId, Set<String> tags) {
+      Map<String, Long> innerMap = nodeToTagsWithCount.computeIfAbsent(nodeId,
+          k -> new HashMap<>());
+
+      for (String tag : tags) {
+        Long count = innerMap.get(tag);
+        if (count == null) {
+          innerMap.put(tag, 1L);
+        } else{
+          innerMap.put(tag, count + 1);
+        }
+      }
+    }
+
+    // protected by external locks
+    private void addTagToNode(NodeId nodeId, String tag) {
+      Map<String, Long> innerMap = nodeToTagsWithCount.computeIfAbsent(nodeId,
+          k -> new HashMap<>());
+
+      Long count = innerMap.get(tag);
+      if (count == null) {
+        innerMap.put(tag, 1L);
+      } else{
+        innerMap.put(tag, count + 1);
+      }
+    }
+
+    private void removeTagFromInnerMap(Map<String, Long> innerMap, String tag) {
+      Long count = innerMap.get(tag);
+      if (count > 1) {
+        innerMap.put(tag, count - 1);
+      } else {
+        if (count <= 0) {
+          LOG.warn(
+              "Trying to remove tags from node, however the count already"
+                  + " becomes 0 or less, it could be a potential bug.");
+        }
+        innerMap.remove(tag);
+      }
+    }
+
+    private void removeTagsFromNode(NodeId nodeId, Set<String> tags) {
+      Map<String, Long> innerMap = nodeToTagsWithCount.get(nodeId);
+      if (innerMap == null) {
+        LOG.warn("Failed to find node=" + nodeId
+            + " while trying to remove tags, please double check.");
+        return;
+      }
+
+      for (String tag : tags) {
+        removeTagFromInnerMap(innerMap, tag);
+      }
+
+      if (innerMap.isEmpty()) {
+        nodeToTagsWithCount.remove(nodeId);
+      }
+    }
+
+    private void removeTagFromNode(NodeId nodeId, String tag) {
+      Map<String, Long> innerMap = nodeToTagsWithCount.get(nodeId);
+      if (innerMap == null) {
+        LOG.warn("Failed to find node=" + nodeId
+            + " while trying to remove tags, please double check.");
+        return;
+      }
+
+      removeTagFromInnerMap(innerMap, tag);
+
+      if (innerMap.isEmpty()) {
+        nodeToTagsWithCount.remove(nodeId);
+      }
+    }
+
+    private long getCardinality(NodeId nodeId, String tag) {
+      Map<String, Long> innerMap = nodeToTagsWithCount.get(nodeId);
+      if (innerMap == null) {
+        return 0;
+      }
+      Long value = innerMap.get(tag);
+      return value == null ? 0 : value;
+    }
+
+    private long getCardinality(NodeId nodeId, Set<String> tags,
+        LongBinaryOperator op) {
+      Map<String, Long> innerMap = nodeToTagsWithCount.get(nodeId);
+      if (innerMap == null) {
+        return 0;
+      }
+
+      long returnValue = 0;
+      boolean firstTag = true;
+
+      if (tags != null && !tags.isEmpty()) {
+        for (String tag : tags) {
+          Long value = innerMap.get(tag);
+          if (value == null) {
+            value = 0L;
+          }
+
+          if (firstTag) {
+            returnValue = value;
+            firstTag = false;
+            continue;
+          }
+
+          returnValue = op.applyAsLong(returnValue, value);
+        }
+      } else {
+        // Similar to above if, but only iterate values for better performance
+        for (long value : innerMap.values()) {
+          // For the first value, we will not apply op
+          if (firstTag) {
+            returnValue = value;
+            firstTag = false;
+            continue;
+          }
+          returnValue = op.applyAsLong(returnValue, value);
+        }
+      }
+      return returnValue;
+    }
+
+    private boolean isEmpty() {
+      return nodeToTagsWithCount.isEmpty();
+    }
+
+    @VisibleForTesting
+    public Map<NodeId, Map<String, Long>> getNodeToTagsWithCount() {
+      return nodeToTagsWithCount;
+    }
+  }
+
+  @VisibleForTesting
+  Map<ApplicationId, NodeToCountedTags> getPerAppMappings() {
+    return perAppMappings;
+  }
+
+  @VisibleForTesting
+  NodeToCountedTags getGlobalMapping() {
+    return globalMapping;
+  }
+
+  public AllocationTagsManager() {
+    ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
+    readLock = lock.readLock();
+    writeLock = lock.writeLock();
+  }
+
+  /**
+   * Notify container allocated on a node.
+   *
+   * @param nodeId         allocated node.
+   * @param applicationId  applicationId
+   * @param containerId    container id.
+   * @param allocationTags allocation tags, see
+   *                       {@link SchedulingRequest#getAllocationTags()}
+   *                       application_id will be added to allocationTags.
+   */
+  public void addContainer(NodeId nodeId, ApplicationId applicationId,
+      ContainerId containerId, Set<String> allocationTags) {
+    String applicationIdTag =
+        AllocationTagsNamespaces.APP_ID + applicationId.toString();
+
+    boolean useSet = false;
+    if (allocationTags != null && !allocationTags.isEmpty()) {
+      // Copy before edit it.
+      allocationTags = new HashSet<>(allocationTags);
+      allocationTags.add(applicationIdTag);
+      useSet = true;
+    }
+
+    writeLock.lock();
+    try {
+      NodeToCountedTags perAppTagsMapping = perAppMappings.computeIfAbsent(
+          applicationId, k -> new NodeToCountedTags());
+
+      if (useSet) {
+        perAppTagsMapping.addTagsToNode(nodeId, allocationTags);
+        globalMapping.addTagsToNode(nodeId, allocationTags);
+      } else {
+        perAppTagsMapping.addTagToNode(nodeId, applicationIdTag);
+        globalMapping.addTagToNode(nodeId, applicationIdTag);
+      }
+
+      if (LOG.isDebugEnabled()) {
+        LOG.debug(
+            "Added container=" + containerId + " with tags=[" + StringUtils
+                .join(allocationTags, ",") + "]");
+      }
+    } finally {
+      writeLock.unlock();
+    }
+  }
+
+  /**
+   * Notify container removed.
+   *
+   * @param nodeId         nodeId
+   * @param applicationId  applicationId
+   * @param containerId    containerId.
+   * @param allocationTags allocation tags for given container
+   */
+  public void removeContainer(NodeId nodeId, ApplicationId applicationId,
+      ContainerId containerId, Set<String> allocationTags) {
+    String applicationIdTag =
+        AllocationTagsNamespaces.APP_ID + applicationId.toString();
+    boolean useSet = false;
+
+    if (allocationTags != null && !allocationTags.isEmpty()) {
+      // Copy before edit it.
+      allocationTags = new HashSet<>(allocationTags);
+      allocationTags.add(applicationIdTag);
+      useSet = true;
+    }
+
+    writeLock.lock();
+    try {
+      NodeToCountedTags perAppTagsMapping = perAppMappings.get(applicationId);
+      if (perAppTagsMapping == null) {
+        return;
+      }
+
+      if (useSet) {
+        perAppTagsMapping.removeTagsFromNode(nodeId, allocationTags);
+        globalMapping.removeTagsFromNode(nodeId, allocationTags);
+      } else {
+        perAppTagsMapping.removeTagFromNode(nodeId, applicationIdTag);
+        globalMapping.removeTagFromNode(nodeId, applicationIdTag);
+      }
+
+      if (perAppTagsMapping.isEmpty()) {
+        perAppMappings.remove(applicationId);
+      }
+
+      if (LOG.isDebugEnabled()) {
+        LOG.debug(
+            "Removed container=" + containerId + " with tags=[" + StringUtils
+                .join(allocationTags, ",") + "]");
+      }
+    } finally {
+      writeLock.unlock();
+    }
+  }
+
+  /**
+   * Get cardinality for following conditions. External can pass-in a binary op
+   * to implement customized logic.   *
+   * @param nodeId        nodeId, required.
+   * @param applicationId applicationId. When null is specified, return
+   *                      aggregated cardinality among all nodes.
+   * @param tag           allocation tag, see
+   *                      {@link SchedulingRequest#getAllocationTags()},
+   *                      When multiple tags specified. Returns cardinality
+   *                      depends on op. If a specified tag doesn't exist,
+   *                      0 will be its cardinality.
+   *                      When null/empty tags specified, all tags
+   *                      (of the node/app) will be considered.
+   * @return cardinality of specified query on the node.
+   * @throws InvalidAllocationTagsQueryException when illegal query
+   *                                            parameter specified
+   */
+  public long getNodeCardinality(NodeId nodeId, ApplicationId applicationId,
+      String tag) throws InvalidAllocationTagsQueryException {
+    readLock.lock();
+
+    try {
+      if (nodeId == null) {
+        throw new InvalidAllocationTagsQueryException(
+            "Must specify nodeId/tags/op to query cardinality");
+      }
+
+      NodeToCountedTags mapping;
+      if (applicationId != null) {
+        mapping = perAppMappings.get(applicationId);
+      } else{
+        mapping = globalMapping;
+      }
+
+      if (mapping == null) {
+        return 0;
+      }
+
+      return mapping.getCardinality(nodeId, tag);
+    } finally {
+      readLock.unlock();
+    }
+  }
+
+  /**
+   * Check if given tag exists on node.
+   *
+   * @param nodeId        nodeId, required.
+   * @param applicationId applicationId. When null is specified, return
+   *                      aggregated cardinality among all nodes.
+   * @param tag           allocation tag, see
+   *                      {@link SchedulingRequest#getAllocationTags()},
+   *                      When multiple tags specified. Returns cardinality
+   *                      depends on op. If a specified tag doesn't exist,
+   *                      0 will be its cardinality.
+   *                      When null/empty tags specified, all tags
+   *                      (of the node/app) will be considered.
+   * @return cardinality of specified query on the node.
+   * @throws InvalidAllocationTagsQueryException when illegal query
+   *                                            parameter specified
+   */
+  public boolean allocationTagExistsOnNode(NodeId nodeId,
+      ApplicationId applicationId, String tag)
+      throws InvalidAllocationTagsQueryException {
+    return getNodeCardinality(nodeId, applicationId, tag) > 0;
+  }
+
+  /**
+   * Get cardinality for following conditions. External can pass-in a binary op
+   * to implement customized logic.
+   *
+   * @param nodeId        nodeId, required.
+   * @param applicationId applicationId. When null is specified, return
+   *                      aggregated cardinality among all nodes.
+   * @param tags          allocation tags, see
+   *                      {@link SchedulingRequest#getAllocationTags()},
+   *                      When multiple tags specified. Returns cardinality
+   *                      depends on op. If a specified tag doesn't exist, 0
+   *                      will be its cardinality. When null/empty tags
+   *                      specified, all tags (of the node/app) will be
+   *                      considered.
+   * @param op            operator. Such as Long::max, Long::sum, etc. Required.
+   *                      This sparameter only take effect when #values >= 2.
+   * @return cardinality of specified query on the node.
+   * @throws InvalidAllocationTagsQueryException when illegal query
+   *                                            parameter specified
+   */
+  public long getNodeCardinalityByOp(NodeId nodeId, ApplicationId applicationId,
+      Set<String> tags, LongBinaryOperator op)
+      throws InvalidAllocationTagsQueryException {
+    readLock.lock();
+
+    try {
+      if (nodeId == null || op == null) {
+        throw new InvalidAllocationTagsQueryException(
+            "Must specify nodeId/tags/op to query cardinality");
+      }
+
+      NodeToCountedTags mapping;
+      if (applicationId != null) {
+        mapping = perAppMappings.get(applicationId);
+      } else{
+        mapping = globalMapping;
+      }
+
+      if (mapping == null) {
+        return 0;
+      }
+
+      return mapping.getCardinality(nodeId, tags, op);
+    } finally {
+      readLock.unlock();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/06eb63e6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsNamespaces.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsNamespaces.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsNamespaces.java
new file mode 100644
index 0000000..43fcfe5
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsNamespaces.java
@@ -0,0 +1,31 @@
+/*
+ * *
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ * /
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint;
+
+/**
+ * Predefined namespaces for tags
+ *
+ * Same as namespace  of resource types. Namespaces of placement tags are start
+ * with alphabets and ended with "/"
+ */
+public class AllocationTagsNamespaces {
+  public static final String APP_ID = "yarn_app_id/";
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/06eb63e6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/InvalidAllocationTagsQueryException.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/InvalidAllocationTagsQueryException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/InvalidAllocationTagsQueryException.java
new file mode 100644
index 0000000..29483a2
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/InvalidAllocationTagsQueryException.java
@@ -0,0 +1,35 @@
+/*
+ * *
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ * /
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint;
+
+import org.apache.hadoop.yarn.exceptions.YarnException;
+
+/**
+ * Exception when invalid parameter specified to do placement tags related
+ * queries.
+ */
+public class InvalidAllocationTagsQueryException extends YarnException {
+  private static final long serialVersionUID = 12312831974894L;
+
+  public InvalidAllocationTagsQueryException(String msg) {
+    super(msg);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/06eb63e6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/ConstraintPlacementAlgorithm.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/ConstraintPlacementAlgorithm.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/ConstraintPlacementAlgorithm.java
new file mode 100644
index 0000000..2651663
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/ConstraintPlacementAlgorithm.java
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api;
+
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+
+/**
+ * Marker interface for a Constraint Placement. The only contract is that it
+ * should be initialized with the RMContext.
+ */
+public interface ConstraintPlacementAlgorithm {
+
+  /**
+   * Initialize the Algorithm.
+   * @param rmContext RMContext.
+   */
+  void init(RMContext rmContext);
+
+  /**
+   * The Algorithm is expected to compute the placement of the provided
+   * ConstraintPlacementAlgorithmInput and use the collector to aggregate
+   * any output.
+   * @param algorithmInput Input to the Algorithm.
+   * @param collector Collector for output of algorithm.
+   */
+  void place(ConstraintPlacementAlgorithmInput algorithmInput,
+      ConstraintPlacementAlgorithmOutputCollector collector);
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/06eb63e6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/ConstraintPlacementAlgorithmInput.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/ConstraintPlacementAlgorithmInput.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/ConstraintPlacementAlgorithmInput.java
new file mode 100644
index 0000000..74572b8
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/ConstraintPlacementAlgorithmInput.java
@@ -0,0 +1,32 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api;
+
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
+
+import java.util.Collection;
+
+/**
+ * This encapsulates an input to the Constraint Placement Algorithm. At the
+ * very least it must consist of a collection of SchedulerRequests.
+ */
+public interface ConstraintPlacementAlgorithmInput {
+
+  Collection<SchedulingRequest> getSchedulingRequests();
+
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[28/32] hadoop git commit: YARN-7696. Add container tags to ContainerTokenIdentifier, api.Container and NMContainerStatus to handle all recovery cases. (asuresh)

Posted by as...@apache.org.
YARN-7696. Add container tags to ContainerTokenIdentifier, api.Container and NMContainerStatus to handle all recovery cases. (asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a5c1fc88
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a5c1fc88
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a5c1fc88

Branch: refs/heads/trunk
Commit: a5c1fc881e21ebf43da7ead5f3852808fce25492
Parents: 4eda58c
Author: Arun Suresh <as...@apache.org>
Authored: Fri Jan 12 14:37:06 2018 -0800
Committer: Arun Suresh <as...@apache.org>
Committed: Wed Jan 31 01:30:17 2018 -0800

----------------------------------------------------------------------
 .../hadoop/yarn/api/records/Container.java      | 15 +++++
 .../src/main/proto/yarn_protos.proto            |  1 +
 .../api/records/impl/pb/ContainerPBImpl.java    | 31 +++++++++
 .../yarn/security/ContainerTokenIdentifier.java | 69 +++++++++++++++++++-
 .../src/main/proto/yarn_security_token.proto    |  1 +
 .../api/protocolrecords/NMContainerStatus.java  | 14 ++++
 .../impl/pb/NMContainerStatusPBImpl.java        | 33 ++++++++++
 .../yarn_server_common_service_protos.proto     |  1 +
 .../containermanager/ContainerManagerImpl.java  |  3 +-
 .../container/ContainerImpl.java                | 19 +++---
 .../rmcontainer/RMContainerImpl.java            | 10 ++-
 .../scheduler/SchedulerApplicationAttempt.java  |  3 +-
 .../security/RMContainerTokenSecretManager.java | 21 ++----
 .../capacity/TestContainerAllocation.java       |  5 +-
 14 files changed, 194 insertions(+), 32 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5c1fc88/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Container.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Container.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Container.java
index 4fdc803..b9ca3f9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Container.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Container.java
@@ -27,6 +27,9 @@ import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
 import org.apache.hadoop.yarn.api.ContainerManagementProtocol;
 import org.apache.hadoop.yarn.util.Records;
 
+import java.util.Collections;
+import java.util.Set;
+
 /**
  * {@code Container} represents an allocated resource in the cluster.
  * <p>
@@ -256,4 +259,16 @@ public abstract class Container implements Comparable<Container> {
   public void setVersion(int version) {
     throw new UnsupportedOperationException();
   }
+
+  @Private
+  @Unstable
+  public Set<String> getAllocationTags() {
+    return Collections.EMPTY_SET;
+  }
+
+  @Private
+  @Unstable
+  public void setAllocationTags(Set<String> allocationTags) {
+
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5c1fc88/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
index 5cb1177..25c8569 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
@@ -152,6 +152,7 @@ message ContainerProto {
   optional ExecutionTypeProto execution_type = 7 [default = GUARANTEED];
   optional int64 allocation_request_id = 8 [default = -1];
   optional int32 version = 9 [default = 0];
+  repeated string allocation_tags = 10;
 }
 
 message ContainerReportProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5c1fc88/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerPBImpl.java
index be84938..47be2f0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerPBImpl.java
@@ -36,6 +36,9 @@ import org.apache.hadoop.yarn.proto.YarnProtos.PriorityProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ExecutionTypeProto;
 
+import java.util.HashSet;
+import java.util.Set;
+
 @Private
 @Unstable
 public class ContainerPBImpl extends Container {
@@ -49,6 +52,7 @@ public class ContainerPBImpl extends Container {
   private Resource resource = null;
   private Priority priority = null;
   private Token containerToken = null;
+  private Set<String> allocationTags = null;
 
   public ContainerPBImpl() {
     builder = ContainerProto.newBuilder();
@@ -106,6 +110,10 @@ public class ContainerPBImpl extends Container {
             builder.getContainerToken())) {
       builder.setContainerToken(convertToProtoFormat(this.containerToken));
     }
+    if (this.allocationTags != null) {
+      builder.clearAllocationTags();
+      builder.addAllAllocationTags(this.allocationTags);
+    }
   }
 
   private void mergeLocalToProto() {
@@ -284,6 +292,29 @@ public class ContainerPBImpl extends Container {
     builder.setVersion(version);
   }
 
+  private void initAllocationTags() {
+    if (this.allocationTags != null) {
+      return;
+    }
+    ContainerProtoOrBuilder p = viaProto ? proto : builder;
+    this.allocationTags = new HashSet<>();
+    this.allocationTags.addAll(p.getAllocationTagsList());
+  }
+
+  @Override
+  public Set<String> getAllocationTags() {
+    initAllocationTags();
+    return this.allocationTags;
+  }
+
+  @Override
+  public void setAllocationTags(Set<String> allocationTags) {
+    maybeInitBuilder();
+    builder.clearAllocationTags();
+    this.allocationTags = allocationTags;
+  }
+
+
   private ContainerIdPBImpl convertFromProtoFormat(ContainerIdProto p) {
     return new ContainerIdPBImpl(p);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5c1fc88/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java
index 9e7d132..70935cb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java
@@ -22,6 +22,9 @@ import java.io.DataInput;
 import java.io.DataInputStream;
 import java.io.DataOutput;
 import java.io.IOException;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Set;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -115,7 +118,7 @@ public class ContainerTokenIdentifier extends TokenIdentifier {
     this(containerID, 0, hostName, appSubmitter, r, expiryTimeStamp,
         masterKeyId, rmIdentifier, priority, creationTime,
         logAggregationContext, nodeLabelExpression, containerType,
-        ExecutionType.GUARANTEED, -1);
+        ExecutionType.GUARANTEED, -1, null);
   }
 
   public ContainerTokenIdentifier(ContainerId containerID, int containerVersion,
@@ -127,15 +130,66 @@ public class ContainerTokenIdentifier extends TokenIdentifier {
     this(containerID, containerVersion, hostName, appSubmitter, r,
         expiryTimeStamp, masterKeyId, rmIdentifier, priority, creationTime,
         logAggregationContext, nodeLabelExpression, containerType,
-        executionType, -1);
+        executionType, -1, null);
   }
 
+  /**
+   * Convenience Constructor for existing clients.
+   *
+   * @param containerID containerID
+   * @param containerVersion containerVersion
+   * @param hostName hostName
+   * @param appSubmitter appSubmitter
+   * @param r resource
+   * @param expiryTimeStamp expiryTimeStamp
+   * @param masterKeyId masterKeyId
+   * @param rmIdentifier rmIdentifier
+   * @param priority priority
+   * @param creationTime creationTime
+   * @param logAggregationContext logAggregationContext
+   * @param nodeLabelExpression nodeLabelExpression
+   * @param containerType containerType
+   * @param executionType executionType
+   * @param allocationRequestId allocationRequestId
+   */
   public ContainerTokenIdentifier(ContainerId containerID, int containerVersion,
       String hostName, String appSubmitter, Resource r, long expiryTimeStamp,
       int masterKeyId, long rmIdentifier, Priority priority, long creationTime,
       LogAggregationContext logAggregationContext, String nodeLabelExpression,
       ContainerType containerType, ExecutionType executionType,
       long allocationRequestId) {
+    this(containerID, containerVersion, hostName, appSubmitter, r,
+        expiryTimeStamp, masterKeyId, rmIdentifier, priority, creationTime,
+        logAggregationContext, nodeLabelExpression, containerType,
+        executionType, allocationRequestId, null);
+  }
+
+  /**
+   * Create a Container Token Identifier.
+   *
+   * @param containerID containerID
+   * @param containerVersion containerVersion
+   * @param hostName hostName
+   * @param appSubmitter appSubmitter
+   * @param r resource
+   * @param expiryTimeStamp expiryTimeStamp
+   * @param masterKeyId masterKeyId
+   * @param rmIdentifier rmIdentifier
+   * @param priority priority
+   * @param creationTime creationTime
+   * @param logAggregationContext logAggregationContext
+   * @param nodeLabelExpression nodeLabelExpression
+   * @param containerType containerType
+   * @param executionType executionType
+   * @param allocationRequestId allocationRequestId
+   * @param allocationTags Set of allocation Tags.
+   */
+  public ContainerTokenIdentifier(ContainerId containerID, int containerVersion,
+      String hostName, String appSubmitter, Resource r, long expiryTimeStamp,
+      int masterKeyId, long rmIdentifier, Priority priority, long creationTime,
+      LogAggregationContext logAggregationContext, String nodeLabelExpression,
+      ContainerType containerType, ExecutionType executionType,
+      long allocationRequestId, Set<String> allocationTags) {
     ContainerTokenIdentifierProto.Builder builder =
         ContainerTokenIdentifierProto.newBuilder();
     if (containerID != null) {
@@ -166,7 +220,9 @@ public class ContainerTokenIdentifier extends TokenIdentifier {
     builder.setContainerType(convertToProtoFormat(containerType));
     builder.setExecutionType(convertToProtoFormat(executionType));
     builder.setAllocationRequestId(allocationRequestId);
-
+    if (allocationTags != null) {
+      builder.addAllAllocationTags(allocationTags);
+    }
     proto = builder.build();
   }
 
@@ -308,6 +364,13 @@ public class ContainerTokenIdentifier extends TokenIdentifier {
     return CommonNodeLabelsManager.NO_LABEL;
   }
 
+  public Set<String> getAllcationTags() {
+    if (proto.getAllocationTagsList() != null) {
+      return new HashSet<>(proto.getAllocationTagsList());
+    }
+    return Collections.EMPTY_SET;
+  }
+
   // TODO: Needed?
   @InterfaceAudience.Private
   public static class Renewer extends Token.TrivialRenewer {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5c1fc88/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/proto/yarn_security_token.proto
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/proto/yarn_security_token.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/proto/yarn_security_token.proto
index d8288ac..9aabd48 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/proto/yarn_security_token.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/proto/yarn_security_token.proto
@@ -54,6 +54,7 @@ message ContainerTokenIdentifierProto {
   optional ExecutionTypeProto executionType = 13 [default = GUARANTEED];
   optional int32 version = 14 [default = 0];
   optional int64 allocation_request_id = 15 [default = -1];
+  repeated string allocation_tags = 16;
 }
 
 message ClientToAMTokenIdentifierProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5c1fc88/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NMContainerStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NMContainerStatus.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NMContainerStatus.java
index 1a095f2..77b3df6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NMContainerStatus.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NMContainerStatus.java
@@ -27,6 +27,9 @@ import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager;
 import org.apache.hadoop.yarn.util.Records;
 
+import java.util.Collections;
+import java.util.Set;
+
 /**
  * NMContainerStatus includes the current information of a container. This
  * record is used by YARN only, whereas {@link ContainerStatus} is used both
@@ -161,4 +164,15 @@ public abstract class NMContainerStatus {
   }
 
   public void setExecutionType(ExecutionType executionType) { }
+
+  /**
+   * Get and set the Allocation tags associated with the container.
+   */
+  public Set<String> getAllocationTags() {
+    return Collections.EMPTY_SET;
+  }
+
+  public void setAllocationTags(Set<String> allocationTags) {
+
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5c1fc88/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NMContainerStatusPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NMContainerStatusPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NMContainerStatusPBImpl.java
index 8ed02fa..14f2241 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NMContainerStatusPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NMContainerStatusPBImpl.java
@@ -37,6 +37,9 @@ import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NMContainerSta
 import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NMContainerStatusProtoOrBuilder;
 import org.apache.hadoop.yarn.server.api.protocolrecords.NMContainerStatus;
 
+import java.util.HashSet;
+import java.util.Set;
+
 public class NMContainerStatusPBImpl extends NMContainerStatus {
 
   NMContainerStatusProto proto = NMContainerStatusProto
@@ -47,6 +50,7 @@ public class NMContainerStatusPBImpl extends NMContainerStatus {
   private ContainerId containerId = null;
   private Resource resource = null;
   private Priority priority = null;
+  private Set<String> allocationTags = null;
 
   public NMContainerStatusPBImpl() {
     builder = NMContainerStatusProto.newBuilder();
@@ -91,8 +95,11 @@ public class NMContainerStatusPBImpl extends NMContainerStatus {
         .append("Diagnostics: ").append(getDiagnostics()).append(", ")
         .append("ExitStatus: ").append(getContainerExitStatus()).append(", ")
         .append("NodeLabelExpression: ").append(getNodeLabelExpression())
+        .append(", ")
         .append("Priority: ").append(getPriority()).append(", ")
         .append("AllocationRequestId: ").append(getAllocationRequestId())
+        .append(", ")
+        .append("AllocationTags: ").append(getAllocationTags()).append(", ")
         .append("]");
     return sb.toString();
   }
@@ -283,6 +290,28 @@ public class NMContainerStatusPBImpl extends NMContainerStatus {
     builder.setAllocationRequestId(allocationRequestId);
   }
 
+  private void initAllocationTags() {
+    if (this.allocationTags != null) {
+      return;
+    }
+    NMContainerStatusProtoOrBuilder p = viaProto ? proto : builder;
+    this.allocationTags = new HashSet<>();
+    this.allocationTags.addAll(p.getAllocationTagsList());
+  }
+
+  @Override
+  public Set<String> getAllocationTags() {
+    initAllocationTags();
+    return this.allocationTags;
+  }
+
+  @Override
+  public void setAllocationTags(Set<String> allocationTags) {
+    maybeInitBuilder();
+    builder.clearAllocationTags();
+    this.allocationTags = allocationTags;
+  }
+
   private void mergeLocalToBuilder() {
     if (this.containerId != null
         && !((ContainerIdPBImpl) containerId).getProto().equals(
@@ -297,6 +326,10 @@ public class NMContainerStatusPBImpl extends NMContainerStatus {
     if (this.priority != null) {
       builder.setPriority(convertToProtoFormat(this.priority));
     }
+    if (this.allocationTags != null) {
+      builder.clearAllocationTags();
+      builder.addAllAllocationTags(this.allocationTags);
+    }
   }
 
   private void mergeLocalToProto() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5c1fc88/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto
index 8c4fc69..e782cc2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto
@@ -177,6 +177,7 @@ message NMContainerStatusProto {
   optional int32 version = 9;
   optional ExecutionTypeProto executionType = 10 [default = GUARANTEED];
   optional int64 allocation_request_id = 11 [default = -1];
+  repeated string allocation_tags = 12;
 }
 
 message SCMUploaderNotifyRequestProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5c1fc88/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
index 44bfc68..6b4d517 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
@@ -451,7 +451,8 @@ public class ContainerManagerImpl extends CompositeService implements
           originalToken.getLogAggregationContext(),
           originalToken.getNodeLabelExpression(),
           originalToken.getContainerType(), originalToken.getExecutionType(),
-          originalToken.getAllocationRequestId());
+          originalToken.getAllocationRequestId(),
+          originalToken.getAllcationTags());
 
     } else {
       token = BuilderUtils.newContainerTokenIdentifier(req.getContainerToken());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5c1fc88/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
index 34be6c9..751beff 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
@@ -849,14 +849,17 @@ public class ContainerImpl implements Container {
   public NMContainerStatus getNMContainerStatus() {
     this.readLock.lock();
     try {
-      return NMContainerStatus.newInstance(this.containerId,
-          this.version, getCurrentState(), getResource(),
-          diagnostics.toString(), exitCode,
-          containerTokenIdentifier.getPriority(),
-          containerTokenIdentifier.getCreationTime(),
-          containerTokenIdentifier.getNodeLabelExpression(),
-          containerTokenIdentifier.getExecutionType(),
-          containerTokenIdentifier.getAllocationRequestId());
+      NMContainerStatus status =
+          NMContainerStatus.newInstance(this.containerId,
+              this.version, getCurrentState(), getResource(),
+              diagnostics.toString(), exitCode,
+              containerTokenIdentifier.getPriority(),
+              containerTokenIdentifier.getCreationTime(),
+              containerTokenIdentifier.getNodeLabelExpression(),
+              containerTokenIdentifier.getExecutionType(),
+              containerTokenIdentifier.getAllocationRequestId());
+      status.setAllocationTags(containerTokenIdentifier.getAllcationTags());
+      return status;
     } finally {
       this.readLock.unlock();
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5c1fc88/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
index 2c4ef7b..563df0d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
@@ -530,12 +530,18 @@ public class RMContainerImpl implements RMContainer {
         RMContainerEvent event) {
       NMContainerStatus report =
           ((RMContainerRecoverEvent) event).getContainerReport();
+      // Set the allocation tags from the
+      container.setAllocationTags(report.getAllocationTags());
+      // Notify AllocationTagsManager
+      container.rmContext.getAllocationTagsManager().addContainer(
+          container.getNodeId(), container.getContainerId(),
+          container.getAllocationTags());
+
       if (report.getContainerState().equals(ContainerState.COMPLETE)) {
         ContainerStatus status =
             ContainerStatus.newInstance(report.getContainerId(),
               report.getContainerState(), report.getDiagnostics(),
               report.getContainerExitStatus());
-
         new FinishedTransition().transition(container,
           new RMContainerFinishedEvent(container.getContainerId(), status,
             RMContainerEventType.FINISHED));
@@ -577,7 +583,7 @@ public class RMContainerImpl implements RMContainer {
 
     @Override
     public void transition(RMContainerImpl container, RMContainerEvent event) {
-      // Notify placementManager
+      // Notify AllocationTagsManager
       container.rmContext.getAllocationTagsManager().addContainer(
           container.getNodeId(), container.getContainerId(),
           container.getAllocationTags());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5c1fc88/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
index f02f113..88a9049 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
@@ -670,7 +670,8 @@ public class SchedulerApplicationAttempt implements SchedulableEntity {
               container.getPriority(), rmContainer.getCreationTime(),
               this.logAggregationContext, rmContainer.getNodeLabelExpression(),
               containerType, container.getExecutionType(),
-              container.getAllocationRequestId()));
+              container.getAllocationRequestId(),
+              rmContainer.getAllocationTags()));
       updateNMToken(container);
     } catch (IllegalArgumentException e) {
       // DNS might be down, skip returning this container.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5c1fc88/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMContainerTokenSecretManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMContainerTokenSecretManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMContainerTokenSecretManager.java
index 191900b..945d89e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMContainerTokenSecretManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMContainerTokenSecretManager.java
@@ -18,9 +18,11 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.security;
 
+import java.util.Set;
 import java.util.Timer;
 import java.util.TimerTask;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
@@ -166,25 +168,14 @@ public class RMContainerTokenSecretManager extends
     }
   }
 
-  /**
-   * Helper function for creating ContainerTokens.
-   *
-   * @param containerId Container Id
-   * @param containerVersion Container Version
-   * @param nodeId Node Id
-   * @param appSubmitter App Submitter
-   * @param capability Capability
-   * @param priority Priority
-   * @param createTime Create Time
-   * @return the container-token
-   */
+  @VisibleForTesting
   public Token createContainerToken(ContainerId containerId,
       int containerVersion, NodeId nodeId, String appSubmitter,
       Resource capability, Priority priority, long createTime) {
     return createContainerToken(containerId, containerVersion, nodeId,
         appSubmitter, capability, priority, createTime,
         null, null, ContainerType.TASK,
-        ExecutionType.GUARANTEED, -1);
+        ExecutionType.GUARANTEED, -1, null);
   }
 
   /**
@@ -209,7 +200,7 @@ public class RMContainerTokenSecretManager extends
       Resource capability, Priority priority, long createTime,
       LogAggregationContext logAggregationContext, String nodeLabelExpression,
       ContainerType containerType, ExecutionType execType,
-      long allocationRequestId) {
+      long allocationRequestId, Set<String> allocationTags) {
     byte[] password;
     ContainerTokenIdentifier tokenIdentifier;
     long expiryTimeStamp =
@@ -224,7 +215,7 @@ public class RMContainerTokenSecretManager extends
               this.currentMasterKey.getMasterKey().getKeyId(),
               ResourceManager.getClusterTimeStamp(), priority, createTime,
               logAggregationContext, nodeLabelExpression, containerType,
-              execType, allocationRequestId);
+              execType, allocationRequestId, allocationTags);
       password = this.createPassword(tokenIdentifier);
 
     } finally {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5c1fc88/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java
index 6f54d47..25e535a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
 
 import java.util.ArrayList;
 import java.util.List;
+import java.util.Set;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -296,12 +297,12 @@ public class TestContainerAllocation {
             Resource capability, Priority priority, long createTime,
             LogAggregationContext logAggregationContext, String nodeLabelExp,
             ContainerType containerType, ExecutionType executionType,
-            long allocationRequestId) {
+            long allocationRequestId, Set<String> allocationTags) {
           numRetries++;
           return super.createContainerToken(containerId, containerVersion,
               nodeId, appSubmitter, capability, priority, createTime,
               logAggregationContext, nodeLabelExp, containerType,
-              executionType, allocationRequestId);
+              executionType, allocationRequestId, allocationTags);
         }
       };
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[11/32] hadoop git commit: YARN-7669. API and interface modifications for placement constraint processor. (asuresh)

Posted by as...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/06eb63e6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/ConstraintPlacementAlgorithmOutput.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/ConstraintPlacementAlgorithmOutput.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/ConstraintPlacementAlgorithmOutput.java
new file mode 100644
index 0000000..9571f0e
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/ConstraintPlacementAlgorithmOutput.java
@@ -0,0 +1,58 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Encapsulates the output of the ConstraintPlacementAlgorithm. The Algorithm
+ * is free to produce multiple of output objects at the end of each run and it
+ * must use the provided ConstraintPlacementAlgorithmOutputCollector to
+ * aggregate/collect this output. Similar to the MapReduce Mapper/Reducer
+ * which is provided a collector to collect output.
+ */
+public class ConstraintPlacementAlgorithmOutput {
+
+  private final ApplicationId applicationId;
+
+  public ConstraintPlacementAlgorithmOutput(ApplicationId applicationId) {
+    this.applicationId = applicationId;
+  }
+
+  private final List<PlacedSchedulingRequest> placedRequests =
+      new ArrayList<>();
+
+  private final List<SchedulingRequest> rejectedRequests =
+      new ArrayList<>();
+
+  public List<PlacedSchedulingRequest> getPlacedRequests() {
+    return placedRequests;
+  }
+
+  public List<SchedulingRequest> getRejectedRequests() {
+    return rejectedRequests;
+  }
+
+  public ApplicationId getApplicationId() {
+    return applicationId;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/06eb63e6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/ConstraintPlacementAlgorithmOutputCollector.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/ConstraintPlacementAlgorithmOutputCollector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/ConstraintPlacementAlgorithmOutputCollector.java
new file mode 100644
index 0000000..131fd42
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/ConstraintPlacementAlgorithmOutputCollector.java
@@ -0,0 +1,32 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api;
+
+/**
+ * The ConstraintPlacementAlgorithm uses the
+ * ConstraintPlacementAlgorithmOutputCollector to collect any output it
+ * spits out.
+ */
+public interface ConstraintPlacementAlgorithmOutputCollector {
+
+  /**
+   * Collect an ConstraintPlacementAlgorithm output.
+   * @param algorithmOutput ConstraintPlacementAlgorithm Output.
+   */
+  void collect(ConstraintPlacementAlgorithmOutput algorithmOutput);
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/06eb63e6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/PlacedSchedulingRequest.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/PlacedSchedulingRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/PlacedSchedulingRequest.java
new file mode 100644
index 0000000..2cd90d6
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/PlacedSchedulingRequest.java
@@ -0,0 +1,79 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api;
+
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Class to encapsulate a Placed scheduling Request.
+ * It has the original Scheduling Request and a list of SchedulerNodes (one
+ * for each 'numAllocation' field in the corresponding ResourceSizing object.
+ *
+ * NOTE: Clients of this class SHOULD NOT rely on the value of
+ *       resourceSizing.numAllocations and instead should use the
+ *       size of collection returned by getNodes() instead.
+ */
+public class PlacedSchedulingRequest {
+
+  // The number of times the Algorithm tried to place the SchedulingRequest
+  // after it was rejected by the commit phase of the Scheduler (due to some
+  // transient state of the cluster. For eg: no space on Node / user limit etc.)
+  // The Algorithm can then try to probably place on a different node.
+  private int placementAttempt = 0;
+  private final SchedulingRequest request;
+  // One Node per numContainers in the SchedulingRequest;
+  private final List<SchedulerNode> nodes = new ArrayList<>();
+
+  public PlacedSchedulingRequest(SchedulingRequest request) {
+    this.request = request;
+  }
+
+  public SchedulingRequest getSchedulingRequest() {
+    return request;
+  }
+
+  /**
+   * List of Node locations on which this Scheduling Request can be placed.
+   * The size of this list = schedulingRequest.resourceSizing.numAllocations.
+   * @return List of Scheduler nodes.
+   */
+  public List<SchedulerNode> getNodes() {
+    return nodes;
+  }
+
+  public int getPlacementAttempt() {
+    return placementAttempt;
+  }
+
+  public void setPlacementAttempt(int attempt) {
+    this.placementAttempt = attempt;
+  }
+
+  @Override
+  public String toString() {
+    return "PlacedSchedulingRequest{" +
+        "placementAttempt=" + placementAttempt +
+        ", request=" + request +
+        ", nodes=" + nodes +
+        '}';
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/06eb63e6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/SchedulingResponse.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/SchedulingResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/SchedulingResponse.java
new file mode 100644
index 0000000..6c65d84
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/SchedulingResponse.java
@@ -0,0 +1,70 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
+
+/**
+ * This class encapsulates the response received from the ResourceScheduler's
+ * attemptAllocateOnNode method.
+ */
+public class SchedulingResponse {
+
+  private final boolean isSuccess;
+  private final ApplicationId applicationId;
+  private final SchedulingRequest schedulingRequest;
+
+  /**
+   * Create a SchedulingResponse.
+   * @param isSuccess did scheduler accept.
+   * @param applicationId Application Id.
+   * @param schedulingRequest Scheduling Request.
+   */
+  public SchedulingResponse(boolean isSuccess, ApplicationId applicationId,
+      SchedulingRequest schedulingRequest) {
+    this.isSuccess = isSuccess;
+    this.applicationId = applicationId;
+    this.schedulingRequest = schedulingRequest;
+  }
+
+  /**
+   * Returns true if Scheduler was able to accept and commit this request.
+   * @return isSuccessful.
+   */
+  public boolean isSuccess() {
+    return this.isSuccess;
+  }
+
+  /**
+   * Get Application Id.
+   * @return Application Id.
+   */
+  public ApplicationId getApplicationId() {
+    return this.applicationId;
+  }
+
+  /**
+   * Get Scheduling Request.
+   * @return Scheduling Request.
+   */
+  public SchedulingRequest getSchedulingRequest() {
+    return this.schedulingRequest;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/06eb63e6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/package-info.java
new file mode 100644
index 0000000..01ed713
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/package-info.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * This Package contains classes related to constrained placement of
+ * Requests.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/06eb63e6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/TestAllocationTagsManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/TestAllocationTagsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/TestAllocationTagsManager.java
deleted file mode 100644
index 0358792..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/TestAllocationTagsManager.java
+++ /dev/null
@@ -1,328 +0,0 @@
-/*
- * *
- *  Licensed to the Apache Software Foundation (ASF) under one
- *  or more contributor license agreements.  See the NOTICE file
- *  distributed with this work for additional information
- *  regarding copyright ownership.  The ASF licenses this file
- *  to you under the Apache License, Version 2.0 (the
- *  "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- * /
- */
-
-package org.apache.hadoop.yarn.server.resourcemanager.constraint;
-
-import com.google.common.collect.ImmutableSet;
-import org.apache.hadoop.yarn.api.records.NodeId;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.TestUtils;
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * Test functionality of AllocationTagsManager.
- */
-public class TestAllocationTagsManager {
-  @Test
-  public void testAllocationTagsManagerSimpleCases()
-      throws InvalidAllocationTagsQueryException {
-    AllocationTagsManager atm = new AllocationTagsManager();
-
-    /**
-     * Construct test case:
-     * Node1:
-     *    container_1_1 (mapper/reducer/app_1)
-     *    container_1_3 (service/app_1)
-     *
-     * Node2:
-     *    container_1_2 (mapper/reducer/app_1)
-     *    container_1_4 (reducer/app_1)
-     *    container_2_1 (service/app_2)
-     */
-
-    // 3 Containers from app1
-    atm.addContainer(NodeId.fromString("node1:1234"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 1),
-        ImmutableSet.of("mapper", "reducer"));
-
-    atm.addContainer(NodeId.fromString("node2:1234"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 2),
-        ImmutableSet.of("mapper", "reducer"));
-
-    atm.addContainer(NodeId.fromString("node1:1234"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 3),
-        ImmutableSet.of("service"));
-
-    atm.addContainer(NodeId.fromString("node2:1234"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 4),
-        ImmutableSet.of("reducer"));
-
-    // 1 Container from app2
-    atm.addContainer(NodeId.fromString("node2:1234"),
-        TestUtils.getMockApplicationId(2), TestUtils.getMockContainerId(2, 3),
-        ImmutableSet.of("service"));
-
-    // Get Cardinality of app1 on node1, with tag "mapper"
-    Assert.assertEquals(1,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node1:1234"),
-            TestUtils.getMockApplicationId(1), ImmutableSet.of("mapper"),
-            Long::max));
-
-    // Get Cardinality of app1 on node2, with tag "mapper/reducer", op=min
-    Assert.assertEquals(1,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
-            TestUtils.getMockApplicationId(1),
-            ImmutableSet.of("mapper", "reducer"), Long::min));
-
-    // Get Cardinality of app1 on node2, with tag "mapper/reducer", op=max
-    Assert.assertEquals(2,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
-            TestUtils.getMockApplicationId(1),
-            ImmutableSet.of("mapper", "reducer"), Long::max));
-
-    // Get Cardinality of app1 on node2, with tag "mapper/reducer", op=sum
-    Assert.assertEquals(3,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
-            TestUtils.getMockApplicationId(1),
-            ImmutableSet.of("mapper", "reducer"), Long::sum));
-
-    // Get Cardinality by passing single tag.
-    Assert.assertEquals(1,
-        atm.getNodeCardinality(NodeId.fromString("node2:1234"),
-            TestUtils.getMockApplicationId(1), "mapper"));
-
-    Assert.assertEquals(2,
-        atm.getNodeCardinality(NodeId.fromString("node2:1234"),
-            TestUtils.getMockApplicationId(1), "reducer"));
-
-    // Get Cardinality of app1 on node2, with tag "no_existed/reducer", op=min
-    Assert.assertEquals(0,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
-            TestUtils.getMockApplicationId(1),
-            ImmutableSet.of("no_existed", "reducer"), Long::min));
-
-    // Get Cardinality of app1 on node2, with tag "<applicationId>", op=max
-    // (Expect this returns #containers from app1 on node2)
-    Assert.assertEquals(2,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
-            TestUtils.getMockApplicationId(1), ImmutableSet
-                .of(AllocationTagsNamespaces.APP_ID + TestUtils
-                    .getMockApplicationId(1).toString()), Long::max));
-
-    // Get Cardinality of app1 on node2, with empty tag set, op=max
-    Assert.assertEquals(2,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
-            TestUtils.getMockApplicationId(1), ImmutableSet.of(), Long::max));
-
-    // Get Cardinality of all apps on node2, with empty tag set, op=sum
-    Assert.assertEquals(7,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"), null,
-            ImmutableSet.of(), Long::sum));
-
-    // Get Cardinality of app_1 on node2, with empty tag set, op=sum
-    Assert.assertEquals(5,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
-            TestUtils.getMockApplicationId(1), ImmutableSet.of(), Long::sum));
-
-    // Get Cardinality of app_1 on node2, with empty tag set, op=sum
-    Assert.assertEquals(2,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
-            TestUtils.getMockApplicationId(2), ImmutableSet.of(), Long::sum));
-
-    // Finish all containers:
-    atm.removeContainer(NodeId.fromString("node1:1234"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 1),
-        ImmutableSet.of("mapper", "reducer"));
-
-    atm.removeContainer(NodeId.fromString("node2:1234"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 2),
-        ImmutableSet.of("mapper", "reducer"));
-
-    atm.removeContainer(NodeId.fromString("node1:1234"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 3),
-        ImmutableSet.of("service"));
-
-    atm.removeContainer(NodeId.fromString("node2:1234"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 4),
-        ImmutableSet.of("reducer"));
-
-    atm.removeContainer(NodeId.fromString("node2:1234"),
-        TestUtils.getMockApplicationId(2), TestUtils.getMockContainerId(2, 3),
-        ImmutableSet.of("service"));
-
-    // Expect all cardinality to be 0
-    // Get Cardinality of app1 on node1, with tag "mapper"
-    Assert.assertEquals(0,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node1:1234"),
-            TestUtils.getMockApplicationId(1), ImmutableSet.of("mapper"),
-            Long::max));
-
-    // Get Cardinality of app1 on node2, with tag "mapper/reducer", op=min
-    Assert.assertEquals(0,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
-            TestUtils.getMockApplicationId(1),
-            ImmutableSet.of("mapper", "reducer"), Long::min));
-
-    // Get Cardinality of app1 on node2, with tag "mapper/reducer", op=max
-    Assert.assertEquals(0,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
-            TestUtils.getMockApplicationId(1),
-            ImmutableSet.of("mapper", "reducer"), Long::max));
-
-    // Get Cardinality of app1 on node2, with tag "mapper/reducer", op=sum
-    Assert.assertEquals(0,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
-            TestUtils.getMockApplicationId(1),
-            ImmutableSet.of("mapper", "reducer"), Long::sum));
-
-    // Get Cardinality of app1 on node2, with tag "<applicationId>", op=max
-    // (Expect this returns #containers from app1 on node2)
-    Assert.assertEquals(0,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
-            TestUtils.getMockApplicationId(1),
-            ImmutableSet.of(TestUtils.getMockApplicationId(1).toString()),
-            Long::max));
-
-    Assert.assertEquals(0,
-        atm.getNodeCardinality(NodeId.fromString("node2:1234"),
-            TestUtils.getMockApplicationId(1),
-            TestUtils.getMockApplicationId(1).toString()));
-
-    // Get Cardinality of app1 on node2, with empty tag set, op=max
-    Assert.assertEquals(0,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
-            TestUtils.getMockApplicationId(1), ImmutableSet.of(), Long::max));
-
-    // Get Cardinality of all apps on node2, with empty tag set, op=sum
-    Assert.assertEquals(0,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"), null,
-            ImmutableSet.of(), Long::sum));
-
-    // Get Cardinality of app_1 on node2, with empty tag set, op=sum
-    Assert.assertEquals(0,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
-            TestUtils.getMockApplicationId(1), ImmutableSet.of(), Long::sum));
-
-    // Get Cardinality of app_1 on node2, with empty tag set, op=sum
-    Assert.assertEquals(0,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
-            TestUtils.getMockApplicationId(2), ImmutableSet.of(), Long::sum));
-  }
-
-  @Test
-  public void testAllocationTagsManagerMemoryAfterCleanup()
-      throws InvalidAllocationTagsQueryException {
-    /**
-     * Make sure YARN cleans up all memory once container/app finishes.
-     */
-
-    AllocationTagsManager atm = new AllocationTagsManager();
-
-    // Add a bunch of containers
-    atm.addContainer(NodeId.fromString("node1:1234"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 1),
-        ImmutableSet.of("mapper", "reducer"));
-
-    atm.addContainer(NodeId.fromString("node2:1234"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 2),
-        ImmutableSet.of("mapper", "reducer"));
-
-    atm.addContainer(NodeId.fromString("node1:1234"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 3),
-        ImmutableSet.of("service"));
-
-    atm.addContainer(NodeId.fromString("node2:1234"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 4),
-        ImmutableSet.of("reducer"));
-
-    atm.addContainer(NodeId.fromString("node2:1234"),
-        TestUtils.getMockApplicationId(2), TestUtils.getMockContainerId(2, 3),
-        ImmutableSet.of("service"));
-
-    // Remove all these containers
-    atm.removeContainer(NodeId.fromString("node1:1234"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 1),
-        ImmutableSet.of("mapper", "reducer"));
-
-    atm.removeContainer(NodeId.fromString("node2:1234"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 2),
-        ImmutableSet.of("mapper", "reducer"));
-
-    atm.removeContainer(NodeId.fromString("node1:1234"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 3),
-        ImmutableSet.of("service"));
-
-    atm.removeContainer(NodeId.fromString("node2:1234"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 4),
-        ImmutableSet.of("reducer"));
-
-    atm.removeContainer(NodeId.fromString("node2:1234"),
-        TestUtils.getMockApplicationId(2), TestUtils.getMockContainerId(2, 3),
-        ImmutableSet.of("service"));
-
-    // Check internal data structure
-    Assert.assertEquals(0,
-        atm.getGlobalMapping().getNodeToTagsWithCount().size());
-    Assert.assertEquals(0, atm.getPerAppMappings().size());
-  }
-
-  @Test
-  public void testQueryCardinalityWithIllegalParameters()
-      throws InvalidAllocationTagsQueryException {
-    /**
-     * Make sure YARN cleans up all memory once container/app finishes.
-     */
-
-    AllocationTagsManager atm = new AllocationTagsManager();
-
-    // Add a bunch of containers
-    atm.addContainer(NodeId.fromString("node1:1234"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 1),
-        ImmutableSet.of("mapper", "reducer"));
-
-    atm.addContainer(NodeId.fromString("node2:1234"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 2),
-        ImmutableSet.of("mapper", "reducer"));
-
-    atm.addContainer(NodeId.fromString("node1:1234"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 3),
-        ImmutableSet.of("service"));
-
-    atm.addContainer(NodeId.fromString("node2:1234"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 4),
-        ImmutableSet.of("reducer"));
-
-    atm.addContainer(NodeId.fromString("node2:1234"),
-        TestUtils.getMockApplicationId(2), TestUtils.getMockContainerId(2, 3),
-        ImmutableSet.of("service"));
-
-    // No node-id
-    boolean caughtException = false;
-    try {
-      atm.getNodeCardinalityByOp(null, TestUtils.getMockApplicationId(2),
-          ImmutableSet.of("mapper"), Long::min);
-    } catch (InvalidAllocationTagsQueryException e) {
-      caughtException = true;
-    }
-    Assert.assertTrue("should fail because of nodeId specified",
-        caughtException);
-
-    // No op
-    caughtException = false;
-    try {
-      atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
-          TestUtils.getMockApplicationId(2), ImmutableSet.of("mapper"), null);
-    } catch (InvalidAllocationTagsQueryException e) {
-      caughtException = true;
-    }
-    Assert.assertTrue("should fail because of nodeId specified",
-        caughtException);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/06eb63e6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java
index 27ff311..538d128 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java
@@ -54,7 +54,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWriter;
 import org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher;
-import org.apache.hadoop.yarn.server.resourcemanager.constraint.AllocationTagsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEventType;
@@ -62,6 +61,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAt
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEventType;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.AllocationTagsManager;
 import org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.junit.Assert;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/06eb63e6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java
index 61a5555..e8734cc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java
@@ -42,12 +42,12 @@ import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWriter;
 import org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
-import org.apache.hadoop.yarn.server.resourcemanager.constraint.AllocationTagsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAllocationExpirer;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.AllocationTagsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.security.AMRMTokenSecretManager;
 import org.apache.hadoop.yarn.server.resourcemanager.security.ClientToAMTokenSecretManagerInRM;
 import org.apache.hadoop.yarn.server.resourcemanager.security.NMTokenSecretManagerInRM;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/06eb63e6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestAllocationTagsManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestAllocationTagsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestAllocationTagsManager.java
new file mode 100644
index 0000000..4bb2a18
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestAllocationTagsManager.java
@@ -0,0 +1,328 @@
+/*
+ * *
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ * /
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint;
+
+import com.google.common.collect.ImmutableSet;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.TestUtils;
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * Test functionality of AllocationTagsManager.
+ */
+public class TestAllocationTagsManager {
+  @Test
+  public void testAllocationTagsManagerSimpleCases()
+      throws InvalidAllocationTagsQueryException {
+    AllocationTagsManager atm = new AllocationTagsManager();
+
+    /**
+     * Construct test case:
+     * Node1:
+     *    container_1_1 (mapper/reducer/app_1)
+     *    container_1_3 (service/app_1)
+     *
+     * Node2:
+     *    container_1_2 (mapper/reducer/app_1)
+     *    container_1_4 (reducer/app_1)
+     *    container_2_1 (service/app_2)
+     */
+
+    // 3 Containers from app1
+    atm.addContainer(NodeId.fromString("node1:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 1),
+        ImmutableSet.of("mapper", "reducer"));
+
+    atm.addContainer(NodeId.fromString("node2:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 2),
+        ImmutableSet.of("mapper", "reducer"));
+
+    atm.addContainer(NodeId.fromString("node1:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 3),
+        ImmutableSet.of("service"));
+
+    atm.addContainer(NodeId.fromString("node2:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 4),
+        ImmutableSet.of("reducer"));
+
+    // 1 Container from app2
+    atm.addContainer(NodeId.fromString("node2:1234"),
+        TestUtils.getMockApplicationId(2), TestUtils.getMockContainerId(2, 3),
+        ImmutableSet.of("service"));
+
+    // Get Cardinality of app1 on node1, with tag "mapper"
+    Assert.assertEquals(1,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node1:1234"),
+            TestUtils.getMockApplicationId(1), ImmutableSet.of("mapper"),
+            Long::max));
+
+    // Get Cardinality of app1 on node2, with tag "mapper/reducer", op=min
+    Assert.assertEquals(1,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1),
+            ImmutableSet.of("mapper", "reducer"), Long::min));
+
+    // Get Cardinality of app1 on node2, with tag "mapper/reducer", op=max
+    Assert.assertEquals(2,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1),
+            ImmutableSet.of("mapper", "reducer"), Long::max));
+
+    // Get Cardinality of app1 on node2, with tag "mapper/reducer", op=sum
+    Assert.assertEquals(3,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1),
+            ImmutableSet.of("mapper", "reducer"), Long::sum));
+
+    // Get Cardinality by passing single tag.
+    Assert.assertEquals(1,
+        atm.getNodeCardinality(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1), "mapper"));
+
+    Assert.assertEquals(2,
+        atm.getNodeCardinality(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1), "reducer"));
+
+    // Get Cardinality of app1 on node2, with tag "no_existed/reducer", op=min
+    Assert.assertEquals(0,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1),
+            ImmutableSet.of("no_existed", "reducer"), Long::min));
+
+    // Get Cardinality of app1 on node2, with tag "<applicationId>", op=max
+    // (Expect this returns #containers from app1 on node2)
+    Assert.assertEquals(2,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1), ImmutableSet
+                .of(AllocationTagsNamespaces.APP_ID + TestUtils
+                    .getMockApplicationId(1).toString()), Long::max));
+
+    // Get Cardinality of app1 on node2, with empty tag set, op=max
+    Assert.assertEquals(2,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1), ImmutableSet.of(), Long::max));
+
+    // Get Cardinality of all apps on node2, with empty tag set, op=sum
+    Assert.assertEquals(7,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"), null,
+            ImmutableSet.of(), Long::sum));
+
+    // Get Cardinality of app_1 on node2, with empty tag set, op=sum
+    Assert.assertEquals(5,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1), ImmutableSet.of(), Long::sum));
+
+    // Get Cardinality of app_1 on node2, with empty tag set, op=sum
+    Assert.assertEquals(2,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(2), ImmutableSet.of(), Long::sum));
+
+    // Finish all containers:
+    atm.removeContainer(NodeId.fromString("node1:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 1),
+        ImmutableSet.of("mapper", "reducer"));
+
+    atm.removeContainer(NodeId.fromString("node2:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 2),
+        ImmutableSet.of("mapper", "reducer"));
+
+    atm.removeContainer(NodeId.fromString("node1:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 3),
+        ImmutableSet.of("service"));
+
+    atm.removeContainer(NodeId.fromString("node2:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 4),
+        ImmutableSet.of("reducer"));
+
+    atm.removeContainer(NodeId.fromString("node2:1234"),
+        TestUtils.getMockApplicationId(2), TestUtils.getMockContainerId(2, 3),
+        ImmutableSet.of("service"));
+
+    // Expect all cardinality to be 0
+    // Get Cardinality of app1 on node1, with tag "mapper"
+    Assert.assertEquals(0,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node1:1234"),
+            TestUtils.getMockApplicationId(1), ImmutableSet.of("mapper"),
+            Long::max));
+
+    // Get Cardinality of app1 on node2, with tag "mapper/reducer", op=min
+    Assert.assertEquals(0,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1),
+            ImmutableSet.of("mapper", "reducer"), Long::min));
+
+    // Get Cardinality of app1 on node2, with tag "mapper/reducer", op=max
+    Assert.assertEquals(0,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1),
+            ImmutableSet.of("mapper", "reducer"), Long::max));
+
+    // Get Cardinality of app1 on node2, with tag "mapper/reducer", op=sum
+    Assert.assertEquals(0,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1),
+            ImmutableSet.of("mapper", "reducer"), Long::sum));
+
+    // Get Cardinality of app1 on node2, with tag "<applicationId>", op=max
+    // (Expect this returns #containers from app1 on node2)
+    Assert.assertEquals(0,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1),
+            ImmutableSet.of(TestUtils.getMockApplicationId(1).toString()),
+            Long::max));
+
+    Assert.assertEquals(0,
+        atm.getNodeCardinality(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1),
+            TestUtils.getMockApplicationId(1).toString()));
+
+    // Get Cardinality of app1 on node2, with empty tag set, op=max
+    Assert.assertEquals(0,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1), ImmutableSet.of(), Long::max));
+
+    // Get Cardinality of all apps on node2, with empty tag set, op=sum
+    Assert.assertEquals(0,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"), null,
+            ImmutableSet.of(), Long::sum));
+
+    // Get Cardinality of app_1 on node2, with empty tag set, op=sum
+    Assert.assertEquals(0,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1), ImmutableSet.of(), Long::sum));
+
+    // Get Cardinality of app_1 on node2, with empty tag set, op=sum
+    Assert.assertEquals(0,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(2), ImmutableSet.of(), Long::sum));
+  }
+
+  @Test
+  public void testAllocationTagsManagerMemoryAfterCleanup()
+      throws InvalidAllocationTagsQueryException {
+    /**
+     * Make sure YARN cleans up all memory once container/app finishes.
+     */
+
+    AllocationTagsManager atm = new AllocationTagsManager();
+
+    // Add a bunch of containers
+    atm.addContainer(NodeId.fromString("node1:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 1),
+        ImmutableSet.of("mapper", "reducer"));
+
+    atm.addContainer(NodeId.fromString("node2:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 2),
+        ImmutableSet.of("mapper", "reducer"));
+
+    atm.addContainer(NodeId.fromString("node1:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 3),
+        ImmutableSet.of("service"));
+
+    atm.addContainer(NodeId.fromString("node2:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 4),
+        ImmutableSet.of("reducer"));
+
+    atm.addContainer(NodeId.fromString("node2:1234"),
+        TestUtils.getMockApplicationId(2), TestUtils.getMockContainerId(2, 3),
+        ImmutableSet.of("service"));
+
+    // Remove all these containers
+    atm.removeContainer(NodeId.fromString("node1:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 1),
+        ImmutableSet.of("mapper", "reducer"));
+
+    atm.removeContainer(NodeId.fromString("node2:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 2),
+        ImmutableSet.of("mapper", "reducer"));
+
+    atm.removeContainer(NodeId.fromString("node1:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 3),
+        ImmutableSet.of("service"));
+
+    atm.removeContainer(NodeId.fromString("node2:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 4),
+        ImmutableSet.of("reducer"));
+
+    atm.removeContainer(NodeId.fromString("node2:1234"),
+        TestUtils.getMockApplicationId(2), TestUtils.getMockContainerId(2, 3),
+        ImmutableSet.of("service"));
+
+    // Check internal data structure
+    Assert.assertEquals(0,
+        atm.getGlobalMapping().getNodeToTagsWithCount().size());
+    Assert.assertEquals(0, atm.getPerAppMappings().size());
+  }
+
+  @Test
+  public void testQueryCardinalityWithIllegalParameters()
+      throws InvalidAllocationTagsQueryException {
+    /**
+     * Make sure YARN cleans up all memory once container/app finishes.
+     */
+
+    AllocationTagsManager atm = new AllocationTagsManager();
+
+    // Add a bunch of containers
+    atm.addContainer(NodeId.fromString("node1:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 1),
+        ImmutableSet.of("mapper", "reducer"));
+
+    atm.addContainer(NodeId.fromString("node2:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 2),
+        ImmutableSet.of("mapper", "reducer"));
+
+    atm.addContainer(NodeId.fromString("node1:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 3),
+        ImmutableSet.of("service"));
+
+    atm.addContainer(NodeId.fromString("node2:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 4),
+        ImmutableSet.of("reducer"));
+
+    atm.addContainer(NodeId.fromString("node2:1234"),
+        TestUtils.getMockApplicationId(2), TestUtils.getMockContainerId(2, 3),
+        ImmutableSet.of("service"));
+
+    // No node-id
+    boolean caughtException = false;
+    try {
+      atm.getNodeCardinalityByOp(null, TestUtils.getMockApplicationId(2),
+          ImmutableSet.of("mapper"), Long::min);
+    } catch (InvalidAllocationTagsQueryException e) {
+      caughtException = true;
+    }
+    Assert.assertTrue("should fail because of nodeId specified",
+        caughtException);
+
+    // No op
+    caughtException = false;
+    try {
+      atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+          TestUtils.getMockApplicationId(2), ImmutableSet.of("mapper"), null);
+    } catch (InvalidAllocationTagsQueryException e) {
+      caughtException = true;
+    }
+    Assert.assertTrue("should fail because of nodeId specified",
+        caughtException);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/06eb63e6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java
index 4b902a7..db749ac 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java
@@ -74,7 +74,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWri
 import org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.NullRMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
-import org.apache.hadoop.yarn.server.resourcemanager.constraint.AllocationTagsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
@@ -93,6 +92,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.TestSchedulerUtils;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.AllocationTagsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptAddedSchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[19/32] hadoop git commit: YARN-7709. Remove SELF from TargetExpression type. (Konstantinos Karanasos via asuresh)

Posted by as...@apache.org.
YARN-7709. Remove SELF from TargetExpression type. (Konstantinos Karanasos via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8779a357
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8779a357
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8779a357

Branch: refs/heads/trunk
Commit: 8779a35742085fadddccc21342b55d4f17fae5c2
Parents: 29d9e4d
Author: Arun Suresh <as...@apache.org>
Authored: Thu Jan 18 04:29:57 2018 -0800
Committer: Arun Suresh <as...@apache.org>
Committed: Wed Jan 31 01:30:17 2018 -0800

----------------------------------------------------------------------
 .../yarn/api/resource/PlacementConstraint.java  | 32 ++++++++++++++----
 .../yarn/api/resource/PlacementConstraints.java | 35 +++++++++-----------
 .../api/resource/TestPlacementConstraints.java  |  3 +-
 .../PlacementConstraintTransformations.java     | 19 +++--------
 .../TestPlacementConstraintTransformations.java | 35 +++++---------------
 .../constraint/PlacementConstraintsUtil.java    | 10 ++++--
 6 files changed, 64 insertions(+), 70 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8779a357/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java
index b6e851a..4d998ac 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java
@@ -242,7 +242,7 @@ public class PlacementConstraint {
      * Enum specifying the type of the target expression.
      */
     public enum TargetType {
-      NODE_ATTRIBUTE, ALLOCATION_TAG, SELF
+      NODE_ATTRIBUTE, ALLOCATION_TAG
     }
 
     private TargetType targetType;
@@ -418,23 +418,25 @@ public class PlacementConstraint {
   }
 
   /**
-   * Class that represents a cardinality constraint. Such a constraint the
-   * number of allocations within a given scope to some minimum and maximum
-   * values.
+   * Class that represents a cardinality constraint. Such a constraint allows
+   * the number of allocations with a specific set of tags and within a given
+   * scope to be between some minimum and maximum values.
    *
    * It is a specialized version of the {@link SingleConstraint}, where the
-   * target is self (i.e., the allocation to which the constraint is attached).
+   * target is a set of allocation tags.
    */
   public static class CardinalityConstraint extends AbstractConstraint {
     private String scope;
     private int minCardinality;
     private int maxCardinality;
+    private Set<String> allocationTags;
 
     public CardinalityConstraint(String scope, int minCardinality,
-        int maxCardinality) {
+        int maxCardinality, Set<String> allocationTags) {
       this.scope = scope;
       this.minCardinality = minCardinality;
       this.maxCardinality = maxCardinality;
+      this.allocationTags = allocationTags;
     }
 
     /**
@@ -464,11 +466,21 @@ public class PlacementConstraint {
       return maxCardinality;
     }
 
+    /**
+     * Get the allocation tags of the constraint.
+     *
+     * @return the allocation tags of the constraint
+     */
+    public Set<String> getAllocationTags() {
+      return allocationTags;
+    }
+
     @Override
     public <T> T accept(Visitor<T> visitor) {
       return visitor.visit(this);
     }
 
+
     @Override
     public boolean equals(Object o) {
       if (this == o) {
@@ -486,7 +498,11 @@ public class PlacementConstraint {
       if (maxCardinality != that.maxCardinality) {
         return false;
       }
-      return scope != null ? scope.equals(that.scope) : that.scope == null;
+      if (scope != null ? !scope.equals(that.scope) : that.scope != null) {
+        return false;
+      }
+      return allocationTags != null ? allocationTags.equals(that.allocationTags)
+          : that.allocationTags == null;
     }
 
     @Override
@@ -494,6 +510,8 @@ public class PlacementConstraint {
       int result = scope != null ? scope.hashCode() : 0;
       result = 31 * result + minCardinality;
       result = 31 * result + maxCardinality;
+      result = 31 * result
+          + (allocationTags != null ? allocationTags.hashCode() : 0);
       return result;
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8779a357/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraints.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraints.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraints.java
index 8e84280..c8991cb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraints.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraints.java
@@ -95,40 +95,45 @@ public final class PlacementConstraints {
    *          the scope
    * @param maxCardinality determines the maximum number of allocations within
    *          the scope
+   * @param allocationTags the constraint targets allocations with these tags
    * @return the resulting placement constraint
    */
   public static AbstractConstraint cardinality(String scope, int minCardinality,
-      int maxCardinality) {
+      int maxCardinality, String... allocationTags) {
     return new SingleConstraint(scope, minCardinality, maxCardinality,
-        PlacementTargets.self());
+        PlacementTargets.allocationTag(allocationTags));
   }
 
   /**
-   * Similar to {@link #cardinality(String, int, int)}, but determines only the
-   * minimum cardinality (the maximum cardinality is unbound).
+   * Similar to {@link #cardinality(String, int, int, String...)}, but
+   * determines only the minimum cardinality (the maximum cardinality is
+   * unbound).
    *
    * @param scope the scope of the constraint
    * @param minCardinality determines the minimum number of allocations within
    *          the scope
+   * @param allocationTags the constraint targets allocations with these tags
    * @return the resulting placement constraint
    */
   public static AbstractConstraint minCardinality(String scope,
-      int minCardinality) {
-    return cardinality(scope, minCardinality, Integer.MAX_VALUE);
+      int minCardinality, String... allocationTags) {
+    return cardinality(scope, minCardinality, Integer.MAX_VALUE,
+        allocationTags);
   }
 
   /**
-   * Similar to {@link #cardinality(String, int, int)}, but determines only the
-   * maximum cardinality (the minimum can be as low as 0).
+   * Similar to {@link #cardinality(String, int, int, String...)}, but
+   * determines only the maximum cardinality (the minimum can be as low as 0).
    *
    * @param scope the scope of the constraint
    * @param maxCardinality determines the maximum number of allocations within
    *          the scope
+   * @param allocationTags the constraint targets allocations with these tags
    * @return the resulting placement constraint
    */
   public static AbstractConstraint maxCardinality(String scope,
-      int maxCardinality) {
-    return cardinality(scope, 0, maxCardinality);
+      int maxCardinality, String... allocationTags) {
+    return cardinality(scope, 0, maxCardinality, allocationTags);
   }
 
   /**
@@ -193,16 +198,6 @@ public final class PlacementConstraints {
       return new TargetExpression(TargetType.ALLOCATION_TAG, null,
           allocationTags);
     }
-
-    /**
-     * The default target expression that uses as target the allocation that
-     * specifies the constraint.
-     *
-     * @return the self-target
-     */
-    public static TargetExpression self() {
-      return new TargetExpression(TargetType.SELF);
-    }
   }
 
   // Creation of compound constraints.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8779a357/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/api/resource/TestPlacementConstraints.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/api/resource/TestPlacementConstraints.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/api/resource/TestPlacementConstraints.java
index e25d477..2f8cc62 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/api/resource/TestPlacementConstraints.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/api/resource/TestPlacementConstraints.java
@@ -86,7 +86,8 @@ public class TestPlacementConstraints {
   @Test
   public void testAndConstraint() {
     AbstractConstraint constraintExpr =
-        and(targetIn(RACK, allocationTag("spark")), maxCardinality(NODE, 3),
+        and(targetIn(RACK, allocationTag("spark")),
+            maxCardinality(NODE, 3, "spark"),
             targetCardinality(RACK, 2, 10, allocationTag("zk")));
 
     And andExpr = (And) constraintExpr;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8779a357/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraintTransformations.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraintTransformations.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraintTransformations.java
index e9eda6f..c5d21af 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraintTransformations.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraintTransformations.java
@@ -162,15 +162,16 @@ public class PlacementConstraintTransformations {
     public AbstractConstraint visit(CardinalityConstraint constraint) {
       return new SingleConstraint(constraint.getScope(),
           constraint.getMinCardinality(), constraint.getMaxCardinality(),
-          new TargetExpression(TargetExpression.TargetType.SELF));
+          new TargetExpression(TargetExpression.TargetType.ALLOCATION_TAG, null,
+              constraint.getAllocationTags()));
     }
   }
 
   /**
    * Visits a {@link PlacementConstraint} tree and, whenever possible,
-   * substitutes each {@link SingleConstraint} with a {@link TargetConstraint}
-   * or a {@link CardinalityConstraint}. When such a substitution is not
-   * possible, we keep the original {@link SingleConstraint}.
+   * substitutes each {@link SingleConstraint} with a {@link TargetConstraint}.
+   * When such a substitution is not possible, we keep the original
+   * {@link SingleConstraint}.
    */
   public static class SpecializedConstraintTransformer
       extends AbstractTransformer {
@@ -182,16 +183,6 @@ public class PlacementConstraintTransformations {
     @Override
     public AbstractConstraint visit(SingleConstraint constraint) {
       AbstractConstraint transformedConstraint = constraint;
-      // Check if it is a cardinality constraint.
-      if (constraint.getTargetExpressions().size() == 1) {
-        TargetExpression targetExpr =
-            constraint.getTargetExpressions().iterator().next();
-        if (targetExpr.getTargetType() == TargetExpression.TargetType.SELF) {
-          transformedConstraint = new CardinalityConstraint(
-              constraint.getScope(), constraint.getMinCardinality(),
-              constraint.getMaxCardinality());
-        }
-      }
       // Check if it is a target constraint.
       if (constraint.getMinCardinality() == 1
           && constraint.getMaxCardinality() == Integer.MAX_VALUE) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8779a357/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/resource/TestPlacementConstraintTransformations.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/resource/TestPlacementConstraintTransformations.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/resource/TestPlacementConstraintTransformations.java
index 1763735..62da092 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/resource/TestPlacementConstraintTransformations.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/resource/TestPlacementConstraintTransformations.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.yarn.api.resource;
 
 import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.NODE;
 import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.RACK;
-import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.cardinality;
 import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.maxCardinality;
 import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.or;
 import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.targetCardinality;
@@ -89,42 +88,26 @@ public class TestPlacementConstraintTransformations {
 
   @Test
   public void testCardinalityConstraint() {
-    AbstractConstraint sConstraintExpr = cardinality(RACK, 3, 10);
-    Assert.assertTrue(sConstraintExpr instanceof SingleConstraint);
-    PlacementConstraint sConstraint =
-        PlacementConstraints.build(sConstraintExpr);
-
-    // Transform from SimpleConstraint to specialized CardinalityConstraint
-    SpecializedConstraintTransformer specTransformer =
-        new SpecializedConstraintTransformer(sConstraint);
-    PlacementConstraint cConstraint = specTransformer.transform();
-
-    AbstractConstraint cConstraintExpr = cConstraint.getConstraintExpr();
-    Assert.assertTrue(cConstraintExpr instanceof CardinalityConstraint);
-
-    SingleConstraint single = (SingleConstraint) sConstraintExpr;
-    CardinalityConstraint cardinality = (CardinalityConstraint) cConstraintExpr;
-    Assert.assertEquals(single.getScope(), cardinality.getScope());
-    Assert.assertEquals(single.getMinCardinality(),
-        cardinality.getMinCardinality());
-    Assert.assertEquals(single.getMaxCardinality(),
-        cardinality.getMaxCardinality());
+    CardinalityConstraint cardinality = new CardinalityConstraint(RACK, 3, 10,
+        new HashSet<>(Arrays.asList("hb")));
+    PlacementConstraint cConstraint = PlacementConstraints.build(cardinality);
 
     // Transform from specialized CardinalityConstraint to SimpleConstraint
     SingleConstraintTransformer singleTransformer =
         new SingleConstraintTransformer(cConstraint);
-    sConstraint = singleTransformer.transform();
+    PlacementConstraint sConstraint = singleTransformer.transform();
 
-    sConstraintExpr = sConstraint.getConstraintExpr();
+    AbstractConstraint sConstraintExpr = sConstraint.getConstraintExpr();
     Assert.assertTrue(sConstraintExpr instanceof SingleConstraint);
 
-    single = (SingleConstraint) sConstraintExpr;
+    SingleConstraint single = (SingleConstraint) sConstraintExpr;
     Assert.assertEquals(cardinality.getScope(), single.getScope());
     Assert.assertEquals(cardinality.getMinCardinality(),
         single.getMinCardinality());
     Assert.assertEquals(cardinality.getMaxCardinality(),
         single.getMaxCardinality());
-    Assert.assertEquals(new HashSet<>(Arrays.asList(PlacementTargets.self())),
+    Assert.assertEquals(
+        new HashSet<>(Arrays.asList(PlacementTargets.allocationTag("hb"))),
         single.getTargetExpressions());
   }
 
@@ -166,7 +149,7 @@ public class TestPlacementConstraintTransformations {
     List<AbstractConstraint> specChildren = specOrExpr.getChildren();
     Assert.assertEquals(3, specChildren.size());
     Assert.assertTrue(specChildren.get(0) instanceof TargetConstraint);
-    Assert.assertTrue(specChildren.get(1) instanceof CardinalityConstraint);
+    Assert.assertTrue(specChildren.get(1) instanceof SingleConstraint);
     Assert.assertTrue(specChildren.get(2) instanceof SingleConstraint);
 
     // Transform from specialized TargetConstraint to SimpleConstraint

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8779a357/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java
index c4b82e8..73b4f9e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java
@@ -24,10 +24,10 @@ import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
-import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TargetExpression;
-import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TargetExpression.TargetType;
 import org.apache.hadoop.yarn.api.resource.PlacementConstraint.AbstractConstraint;
 import org.apache.hadoop.yarn.api.resource.PlacementConstraint.SingleConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TargetExpression;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TargetExpression.TargetType;
 import org.apache.hadoop.yarn.api.resource.PlacementConstraintTransformations.SingleConstraintTransformer;
 import org.apache.hadoop.yarn.api.resource.PlacementConstraints;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
@@ -118,6 +118,12 @@ public final class PlacementConstraintsUtil {
       TargetExpression currentExp = expIt.next();
       // Supporting AllocationTag Expressions for now
       if (currentExp.getTargetType().equals(TargetType.ALLOCATION_TAG)) {
+        // If source and tag allocation tags are the same, we do not enforce
+        // constraints with minimum cardinality.
+        if (currentExp.getTargetValues().equals(allocationTags)
+            && single.getMinCardinality() > 0) {
+          return true;
+        }
         // Check if conditions are met
         if (!canSatisfySingleConstraintExpression(appId, single, currentExp,
             node, tagsManager)) {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[05/32] hadoop git commit: YARN-6593. [API] Introduce Placement Constraint object. (Konstantinos Karanasos via wangda)

Posted by as...@apache.org.
YARN-6593. [API] Introduce Placement Constraint object. (Konstantinos Karanasos via wangda)

Change-Id: Id00edb7185fdf01cce6e40f920cac3585f8cbe9c


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/33a796d9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/33a796d9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/33a796d9

Branch: refs/heads/trunk
Commit: 33a796d9b778bf7350e87a4e36ca30c925cf7036
Parents: 1453a04
Author: Wangda Tan <wa...@apache.org>
Authored: Thu Aug 3 14:03:55 2017 -0700
Committer: Arun Suresh <as...@apache.org>
Committed: Wed Jan 31 01:30:17 2018 -0800

----------------------------------------------------------------------
 .../yarn/api/resource/PlacementConstraint.java  | 567 +++++++++++++++++++
 .../yarn/api/resource/PlacementConstraints.java | 286 ++++++++++
 .../hadoop/yarn/api/resource/package-info.java  |  23 +
 .../src/main/proto/yarn_protos.proto            |  55 ++
 .../api/resource/TestPlacementConstraints.java  | 106 ++++
 .../PlacementConstraintFromProtoConverter.java  | 116 ++++
 .../pb/PlacementConstraintToProtoConverter.java | 174 ++++++
 .../apache/hadoop/yarn/api/pb/package-info.java |  23 +
 .../yarn/api/records/impl/pb/ProtoUtils.java    |  27 +
 .../PlacementConstraintTransformations.java     | 209 +++++++
 .../hadoop/yarn/api/resource/package-info.java  |  23 +
 .../TestPlacementConstraintPBConversion.java    | 195 +++++++
 .../TestPlacementConstraintTransformations.java | 183 ++++++
 13 files changed, 1987 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/33a796d9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java
new file mode 100644
index 0000000..f0e3982
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java
@@ -0,0 +1,567 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.resource;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+
+/**
+ * {@code PlacementConstraint} represents a placement constraint for a resource
+ * allocation.
+ */
+@Public
+@Unstable
+public class PlacementConstraint {
+
+  /**
+   * The constraint expression tree.
+   */
+  private AbstractConstraint constraintExpr;
+
+  public PlacementConstraint(AbstractConstraint constraintExpr) {
+    this.constraintExpr = constraintExpr;
+  }
+
+  /**
+   * Get the constraint expression of the placement constraint.
+   *
+   * @return the constraint expression
+   */
+  public AbstractConstraint getConstraintExpr() {
+    return constraintExpr;
+  }
+
+  /**
+   * Interface used to enable the elements of the constraint tree to be visited.
+   */
+  @Private
+  public interface Visitable {
+    /**
+     * Visitor pattern.
+     *
+     * @param visitor visitor to be used
+     * @param <T> defines the type that the visitor will use and the return type
+     *          of the accept.
+     * @return the result of visiting a given object.
+     */
+    <T> T accept(Visitor<T> visitor);
+
+  }
+
+  /**
+   * Visitor API for a constraint tree.
+   *
+   * @param <T> determines the return type of the visit methods.
+   */
+  @Private
+  public interface Visitor<T> {
+    T visit(SingleConstraint constraint);
+
+    T visit(TargetExpression target);
+
+    T visit(TargetConstraint constraint);
+
+    T visit(CardinalityConstraint constraint);
+
+    T visit(And constraint);
+
+    T visit(Or constraint);
+
+    T visit(DelayedOr constraint);
+
+    T visit(TimedPlacementConstraint constraint);
+  }
+
+  /**
+   * Abstract class that acts as the superclass of all placement constraint
+   * classes.
+   */
+  public abstract static class AbstractConstraint implements Visitable {
+    public PlacementConstraint build() {
+      return new PlacementConstraint(this);
+    }
+  }
+
+  static final String NODE_SCOPE = "node";
+  static final String RACK_SCOPE = "rack";
+
+  /**
+   * Consider a set of nodes N that belongs to the scope specified in the
+   * constraint. If the target expressions are satisfied at least minCardinality
+   * times and at most max-cardinality times in the node set N, then the
+   * constraint is satisfied.
+   *
+   * For example, a constraint of the form {@code {RACK, 2, 10,
+   * allocationTag("zk")}}, requires an allocation to be placed within a rack
+   * that has at least 2 and at most 10 other allocations with tag "zk".
+   */
+  public static class SingleConstraint extends AbstractConstraint {
+    private String scope;
+    private int minCardinality;
+    private int maxCardinality;
+    private Set<TargetExpression> targetExpressions;
+
+    public SingleConstraint(String scope, int minCardinality,
+        int maxCardinality, Set<TargetExpression> targetExpressions) {
+      this.scope = scope;
+      this.minCardinality = minCardinality;
+      this.maxCardinality = maxCardinality;
+      this.targetExpressions = targetExpressions;
+    }
+
+    public SingleConstraint(String scope, int minC, int maxC,
+        TargetExpression... targetExpressions) {
+      this(scope, minC, maxC, new HashSet<>(Arrays.asList(targetExpressions)));
+    }
+
+    /**
+     * Get the scope of the constraint.
+     *
+     * @return the scope of the constraint
+     */
+    public String getScope() {
+      return scope;
+    }
+
+    /**
+     * Get the minimum cardinality of the constraint.
+     *
+     * @return the minimum cardinality of the constraint
+     */
+    public int getMinCardinality() {
+      return minCardinality;
+    }
+
+    /**
+     * Get the maximum cardinality of the constraint.
+     *
+     * @return the maximum cardinality of the constraint
+     */
+    public int getMaxCardinality() {
+      return maxCardinality;
+    }
+
+    /**
+     * Get the target expressions of the constraint.
+     *
+     * @return the set of target expressions
+     */
+    public Set<TargetExpression> getTargetExpressions() {
+      return targetExpressions;
+    }
+
+    @Override
+    public <T> T accept(Visitor<T> visitor) {
+      return visitor.visit(this);
+    }
+  }
+
+  /**
+   * Class representing the target expressions that are used in placement
+   * constraints. They might refer to expressions on node attributes, allocation
+   * tags, or be self-targets (referring to the allocation to which the
+   * constraint is attached).
+   */
+  public static class TargetExpression implements Visitable {
+    /**
+     * Enum specifying the type of the target expression.
+     */
+    public enum TargetType {
+      NODE_ATTRIBUTE, ALLOCATION_TAG, SELF
+    }
+
+    private TargetType targetType;
+    private String targetKey;
+    private Set<String> targetValues;
+
+    public TargetExpression(TargetType targetType, String targetKey,
+        Set<String> targetValues) {
+      this.targetType = targetType;
+      this.targetKey = targetKey;
+      this.targetValues = targetValues;
+    }
+
+    public TargetExpression(TargetType targetType) {
+      this(targetType, null, new HashSet<>());
+    }
+
+    public TargetExpression(TargetType targetType, String targetKey,
+        String... targetValues) {
+      this(targetType, targetKey, new HashSet<>(Arrays.asList(targetValues)));
+    }
+
+    /**
+     * Get the type of the target expression.
+     *
+     * @return the type of the target expression
+     */
+    public TargetType getTargetType() {
+      return targetType;
+    }
+
+    /**
+     * Get the key of the target expression.
+     *
+     * @return the key of the target expression
+     */
+    public String getTargetKey() {
+      return targetKey;
+    }
+
+    /**
+     * Get the set of values of the target expression.
+     *
+     * @return the set of values of the target expression
+     */
+    public Set<String> getTargetValues() {
+      return targetValues;
+    }
+
+    @Override
+    public int hashCode() {
+      int result = targetType != null ? targetType.hashCode() : 0;
+      result = 31 * result + (targetKey != null ? targetKey.hashCode() : 0);
+      result =
+          31 * result + (targetValues != null ? targetValues.hashCode() : 0);
+      return result;
+    }
+
+    @Override
+    public boolean equals(Object o) {
+      if (this == o) {
+        return true;
+      }
+      if (o == null) {
+        return false;
+      }
+      if (!(o instanceof TargetExpression)) {
+        return false;
+      }
+
+      TargetExpression that = (TargetExpression) o;
+      if (targetType != that.targetType) {
+        return false;
+      }
+      if (targetKey != null ? !targetKey.equals(that.targetKey)
+          : that.targetKey != null) {
+        return false;
+      }
+      return targetValues != null ? targetValues.equals(that.targetValues)
+          : that.targetValues == null;
+    }
+
+    @Override
+    public <T> T accept(Visitor<T> visitor) {
+      return visitor.visit(this);
+    }
+  }
+
+  /**
+   * Class that represents a target constraint. Such a constraint requires an
+   * allocation to be placed within a scope that satisfies some specified
+   * expressions on node attributes and allocation tags.
+   *
+   * It is a specialized version of the {@link SingleConstraint}, where the
+   * minimum and the maximum cardinalities take specific values based on the
+   * {@link TargetOperator} used.
+   */
+  public static class TargetConstraint extends AbstractConstraint {
+    enum TargetOperator {
+      IN, NOT_IN
+    }
+
+    private TargetOperator op;
+    private String scope;
+    private Set<TargetExpression> targetExpressions;
+
+    public TargetConstraint(TargetOperator op, String scope,
+        Set<TargetExpression> targetExpressions) {
+      this.op = op;
+      this.scope = scope;
+      this.targetExpressions = targetExpressions;
+    }
+
+    /**
+     * Get the target operator of the constraint.
+     *
+     * @return the target operator
+     */
+    public TargetOperator getOp() {
+      return op;
+    }
+
+    /**
+     * Get the scope of the constraint.
+     *
+     * @return the scope of the constraint
+     */
+    public String getScope() {
+      return scope;
+    }
+
+    /**
+     * Get the set of target expressions.
+     *
+     * @return the set of target expressions
+     */
+    public Set<TargetExpression> getTargetExpressions() {
+      return targetExpressions;
+    }
+
+    @Override
+    public <T> T accept(Visitor<T> visitor) {
+      return visitor.visit(this);
+    }
+  }
+
+  /**
+   * Class that represents a cardinality constraint. Such a constraint the
+   * number of allocations within a given scope to some minimum and maximum
+   * values.
+   *
+   * It is a specialized version of the {@link SingleConstraint}, where the
+   * target is self (i.e., the allocation to which the constraint is attached).
+   */
+  public static class CardinalityConstraint extends AbstractConstraint {
+    private String scope;
+    private int minCardinality;
+    private int maxCardinality;
+
+    public CardinalityConstraint(String scope, int minCardinality,
+        int maxCardinality) {
+      this.scope = scope;
+      this.minCardinality = minCardinality;
+      this.maxCardinality = maxCardinality;
+    }
+
+    /**
+     * Get the scope of the constraint.
+     *
+     * @return the scope of the constraint
+     */
+    public String getScope() {
+      return scope;
+    }
+
+    /**
+     * Get the minimum cardinality of the constraint.
+     *
+     * @return the minimum cardinality of the constraint
+     */
+    public int getMinCardinality() {
+      return minCardinality;
+    }
+
+    /**
+     * Get the maximum cardinality of the constraint.
+     *
+     * @return the maximum cardinality of the constraint
+     */
+    public int getMaxCardinality() {
+      return maxCardinality;
+    }
+
+    @Override
+    public <T> T accept(Visitor<T> visitor) {
+      return visitor.visit(this);
+    }
+  }
+
+  /**
+   * Class that represents composite constraints, which comprise other
+   * constraints, forming a constraint tree.
+   *
+   * @param <R> the type of constraints that are used as children of the
+   *          specific composite constraint
+   */
+  public abstract static class CompositeConstraint<R extends Visitable>
+      extends AbstractConstraint {
+
+    /**
+     * Get the children of this composite constraint.
+     *
+     * @return the children of the composite constraint
+     */
+    public abstract List<R> getChildren();
+  }
+
+  /**
+   * Class that represents a composite constraint that is a conjunction of other
+   * constraints.
+   */
+  public static class And extends CompositeConstraint<AbstractConstraint> {
+    private List<AbstractConstraint> children;
+
+    public And(List<AbstractConstraint> children) {
+      this.children = children;
+    }
+
+    public And(AbstractConstraint... children) {
+      this(Arrays.asList(children));
+    }
+
+    @Override
+    public List<AbstractConstraint> getChildren() {
+      return children;
+    }
+
+    @Override
+    public <T> T accept(Visitor<T> visitor) {
+      return visitor.visit(this);
+    }
+  }
+
+  /**
+   * Class that represents a composite constraint that is a disjunction of other
+   * constraints.
+   */
+  public static class Or extends CompositeConstraint<AbstractConstraint> {
+    private List<AbstractConstraint> children;
+
+    public Or(List<AbstractConstraint> children) {
+      this.children = children;
+    }
+
+    public Or(AbstractConstraint... children) {
+      this(Arrays.asList(children));
+    }
+
+    @Override
+    public List<AbstractConstraint> getChildren() {
+      return children;
+    }
+
+    @Override
+    public <T> T accept(Visitor<T> visitor) {
+      return visitor.visit(this);
+    }
+  }
+
+  /**
+   * Class that represents a composite constraint that comprises a list of timed
+   * placement constraints (see {@link TimedPlacementConstraint}). The scheduler
+   * should try to satisfy first the first timed child constraint within the
+   * specified time window. If this is not possible, it should attempt to
+   * satisfy the second, and so on.
+   */
+  public static class DelayedOr
+      extends CompositeConstraint<TimedPlacementConstraint> {
+    private List<TimedPlacementConstraint> children = new ArrayList<>();
+
+    public DelayedOr(List<TimedPlacementConstraint> children) {
+      this.children = children;
+    }
+
+    public DelayedOr(TimedPlacementConstraint... children) {
+      this(Arrays.asList(children));
+    }
+
+    @Override
+    public List<TimedPlacementConstraint> getChildren() {
+      return children;
+    }
+
+    @Override
+    public <T> T accept(Visitor<T> visitor) {
+      return visitor.visit(this);
+    }
+  }
+
+  /**
+   * Represents a timed placement constraint that has to be satisfied within a
+   * time window.
+   */
+  public static class TimedPlacementConstraint implements Visitable {
+    /**
+     * The unit of scheduling delay.
+     */
+    public enum DelayUnit {
+      MILLISECONDS, OPPORTUNITIES
+    }
+
+    private AbstractConstraint constraint;
+    private long schedulingDelay;
+    private DelayUnit delayUnit;
+
+    public TimedPlacementConstraint(AbstractConstraint constraint,
+        long schedulingDelay, DelayUnit delayUnit) {
+      this.constraint = constraint;
+      this.schedulingDelay = schedulingDelay;
+      this.delayUnit = delayUnit;
+    }
+
+    public TimedPlacementConstraint(AbstractConstraint constraint,
+        long schedulingDelay) {
+      this(constraint, schedulingDelay, DelayUnit.MILLISECONDS);
+    }
+
+    public TimedPlacementConstraint(AbstractConstraint constraint) {
+      this(constraint, Long.MAX_VALUE, DelayUnit.MILLISECONDS);
+    }
+
+    /**
+     * Get the constraint that has to be satisfied within the time window.
+     *
+     * @return the constraint to be satisfied
+     */
+    public AbstractConstraint getConstraint() {
+      return constraint;
+    }
+
+    /**
+     * Sets the constraint that has to be satisfied within the time window.
+     *
+     * @param constraint the constraint to be satisfied
+     */
+    public void setConstraint(AbstractConstraint constraint) {
+      this.constraint = constraint;
+    }
+
+    /**
+     * Get the scheduling delay value that determines the time window within
+     * which the constraint has to be satisfied.
+     *
+     * @return the value of the scheduling delay
+     */
+    public long getSchedulingDelay() {
+      return schedulingDelay;
+    }
+
+    /**
+     * The unit of the scheduling delay.
+     *
+     * @return the unit of the delay
+     */
+    public DelayUnit getDelayUnit() {
+      return delayUnit;
+    }
+
+    @Override
+    public <T> T accept(Visitor<T> visitor) {
+      return visitor.visit(this);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/33a796d9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraints.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraints.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraints.java
new file mode 100644
index 0000000..8e84280
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraints.java
@@ -0,0 +1,286 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.resource;
+
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.AbstractConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.And;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.DelayedOr;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.Or;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.SingleConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TargetExpression;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TargetExpression.TargetType;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TimedPlacementConstraint;
+
+/**
+ * This class contains various static methods for the applications to create
+ * placement constraints (see also {@link PlacementConstraint}).
+ */
+@Public
+@Unstable
+public final class PlacementConstraints {
+
+  // Suppresses default constructor, ensuring non-instantiability.
+  private PlacementConstraints() {
+  }
+
+  // Creation of simple constraints.
+
+  public static final String NODE = PlacementConstraint.NODE_SCOPE;
+  public static final String RACK = PlacementConstraint.RACK_SCOPE;
+
+  /**
+   * Creates a constraint that requires allocations to be placed on nodes that
+   * satisfy all target expressions within the given scope (e.g., node or rack).
+   *
+   * For example, {@code targetIn(RACK, allocationTag("hbase-m"))}, allows
+   * allocations on nodes that belong to a rack that has at least one tag with
+   * value "hbase-m".
+   *
+   * @param scope the scope within which the target expressions should be
+   *          satisfied
+   * @param targetExpressions the expressions that need to be satisfied within
+   *          the scope
+   * @return the resulting placement constraint
+   */
+  public static AbstractConstraint targetIn(String scope,
+      TargetExpression... targetExpressions) {
+    return new SingleConstraint(scope, 1, Integer.MAX_VALUE, targetExpressions);
+  }
+
+  /**
+   * Creates a constraint that requires allocations to be placed on nodes that
+   * belong to a scope (e.g., node or rack) that does not satisfy any of the
+   * target expressions.
+   *
+   * @param scope the scope within which the target expressions should not be
+   *          true
+   * @param targetExpressions the expressions that need to not be true within
+   *          the scope
+   * @return the resulting placement constraint
+   */
+  public static AbstractConstraint targetNotIn(String scope,
+      TargetExpression... targetExpressions) {
+    return new SingleConstraint(scope, 0, 0, targetExpressions);
+  }
+
+  /**
+   * Creates a constraint that restricts the number of allocations within a
+   * given scope (e.g., node or rack).
+   *
+   * For example, {@code cardinality(NODE, 3, 10)}, restricts the number of
+   * allocations per node to be no less than 3 and no more than 10.
+   *
+   * @param scope the scope of the constraint
+   * @param minCardinality determines the minimum number of allocations within
+   *          the scope
+   * @param maxCardinality determines the maximum number of allocations within
+   *          the scope
+   * @return the resulting placement constraint
+   */
+  public static AbstractConstraint cardinality(String scope, int minCardinality,
+      int maxCardinality) {
+    return new SingleConstraint(scope, minCardinality, maxCardinality,
+        PlacementTargets.self());
+  }
+
+  /**
+   * Similar to {@link #cardinality(String, int, int)}, but determines only the
+   * minimum cardinality (the maximum cardinality is unbound).
+   *
+   * @param scope the scope of the constraint
+   * @param minCardinality determines the minimum number of allocations within
+   *          the scope
+   * @return the resulting placement constraint
+   */
+  public static AbstractConstraint minCardinality(String scope,
+      int minCardinality) {
+    return cardinality(scope, minCardinality, Integer.MAX_VALUE);
+  }
+
+  /**
+   * Similar to {@link #cardinality(String, int, int)}, but determines only the
+   * maximum cardinality (the minimum can be as low as 0).
+   *
+   * @param scope the scope of the constraint
+   * @param maxCardinality determines the maximum number of allocations within
+   *          the scope
+   * @return the resulting placement constraint
+   */
+  public static AbstractConstraint maxCardinality(String scope,
+      int maxCardinality) {
+    return cardinality(scope, 0, maxCardinality);
+  }
+
+  /**
+   * This constraint generalizes the cardinality and target constraints.
+   *
+   * Consider a set of nodes N that belongs to the scope specified in the
+   * constraint. If the target expressions are satisfied at least minCardinality
+   * times and at most max-cardinality times in the node set N, then the
+   * constraint is satisfied.
+   *
+   * For example, {@code targetCardinality(RACK, 2, 10, allocationTag("zk"))},
+   * requires an allocation to be placed within a rack that has at least 2 and
+   * at most 10 other allocations with tag "zk".
+   *
+   * @param scope the scope of the constraint
+   * @param minCardinality the minimum number of times the target expressions
+   *          have to be satisfied with the given scope
+   * @param maxCardinality the maximum number of times the target expressions
+   *          have to be satisfied with the given scope
+   * @param targetExpressions the target expressions
+   * @return the resulting placement constraint
+   */
+  public static AbstractConstraint targetCardinality(String scope,
+      int minCardinality, int maxCardinality,
+      TargetExpression... targetExpressions) {
+    return new SingleConstraint(scope, minCardinality, maxCardinality,
+        targetExpressions);
+  }
+
+  // Creation of target expressions to be used in simple constraints.
+
+  /**
+   * Class with static methods for constructing target expressions to be used in
+   * placement constraints.
+   */
+  public static class PlacementTargets {
+
+    /**
+     * Constructs a target expression on a node attribute. It is satisfied if
+     * the specified node attribute has one of the specified values.
+     *
+     * @param attributeKey the name of the node attribute
+     * @param attributeValues the set of values that the attribute should take
+     *          values from
+     * @return the resulting expression on the node attribute
+     */
+    public static TargetExpression nodeAttribute(String attributeKey,
+        String... attributeValues) {
+      return new TargetExpression(TargetType.NODE_ATTRIBUTE, attributeKey,
+          attributeValues);
+    }
+
+    /**
+     * Constructs a target expression on an allocation tag. It is satisfied if
+     * the there are allocations with one of the given tags.
+     *
+     * @param allocationTags the set of tags that the attribute should take
+     *          values from
+     * @return the resulting expression on the allocation tags
+     */
+    public static TargetExpression allocationTag(String... allocationTags) {
+      return new TargetExpression(TargetType.ALLOCATION_TAG, null,
+          allocationTags);
+    }
+
+    /**
+     * The default target expression that uses as target the allocation that
+     * specifies the constraint.
+     *
+     * @return the self-target
+     */
+    public static TargetExpression self() {
+      return new TargetExpression(TargetType.SELF);
+    }
+  }
+
+  // Creation of compound constraints.
+
+  /**
+   * A conjunction of constraints.
+   *
+   * @param children the children constraints that should all be satisfied
+   * @return the resulting placement constraint
+   */
+  public static And and(AbstractConstraint... children) {
+    return new And(children);
+  }
+
+  /**
+   * A disjunction of constraints.
+   *
+   * @param children the children constraints, one of which should be satisfied
+   * @return the resulting placement constraint
+   */
+  public static Or or(AbstractConstraint... children) {
+    return new Or(children);
+  }
+
+  /**
+   * Creates a composite constraint that includes a list of timed placement
+   * constraints. The scheduler should try to satisfy first the first timed
+   * child constraint within the specified time window. If this is not possible,
+   * it should attempt to satisfy the second, and so on.
+   *
+   * @param children the timed children constraints
+   * @return the resulting composite constraint
+   */
+  public static DelayedOr delayedOr(TimedPlacementConstraint... children) {
+    return new DelayedOr(children);
+  }
+
+  // Creation of timed constraints to be used in a DELAYED_OR constraint.
+
+  /**
+   * Creates a placement constraint that has to be satisfied within a time
+   * window.
+   *
+   * @param constraint the placement constraint
+   * @param delay the length of the time window within which the constraint has
+   *          to be satisfied
+   * @param timeUnit the unit of time of the time window
+   * @return the resulting timed placement constraint
+   */
+  public static TimedPlacementConstraint timedClockConstraint(
+      AbstractConstraint constraint, long delay, TimeUnit timeUnit) {
+    return new TimedPlacementConstraint(constraint, timeUnit.toMillis(delay),
+        TimedPlacementConstraint.DelayUnit.MILLISECONDS);
+  }
+
+  /**
+   * Creates a placement constraint that has to be satisfied within a number of
+   * placement opportunities (invocations of the scheduler).
+   *
+   * @param constraint the placement constraint
+   * @param delay the number of scheduling opportunities within which the
+   *          constraint has to be satisfied
+   * @return the resulting timed placement constraint
+   */
+  public static TimedPlacementConstraint timedOpportunitiesConstraint(
+      AbstractConstraint constraint, long delay) {
+    return new TimedPlacementConstraint(constraint, delay,
+        TimedPlacementConstraint.DelayUnit.OPPORTUNITIES);
+  }
+
+  /**
+   * Creates a {@link PlacementConstraint} given a constraint expression.
+   *
+   * @param constraintExpr the constraint expression
+   * @return the placement constraint
+   */
+  public static PlacementConstraint build(AbstractConstraint constraintExpr) {
+    return constraintExpr.build();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/33a796d9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/package-info.java
new file mode 100644
index 0000000..660dc02
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/package-info.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * API related to resources.
+ */
+@InterfaceAudience.Private
+package org.apache.hadoop.yarn.api.resource;
+import org.apache.hadoop.classification.InterfaceAudience;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/33a796d9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
index b6ea5f9..ff0d54b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
@@ -579,6 +579,61 @@ enum SignalContainerCommandProto {
   FORCEFUL_SHUTDOWN = 3;
 }
 
+////////////////////////////////////////////////////////////////////////
+////// Placement constraints ///////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////
+
+message PlacementConstraintProto {
+  optional SimplePlacementConstraintProto simpleConstraint = 1;
+  optional CompositePlacementConstraintProto compositeConstraint = 2;
+}
+
+message SimplePlacementConstraintProto {
+  required string scope = 1;
+  repeated PlacementConstraintTargetProto targetExpressions = 2;
+  optional int32 minCardinality = 3;
+  optional int32 maxCardinality = 4;
+}
+
+message PlacementConstraintTargetProto {
+  enum TargetType {
+    NODE_ATTRIBUTE = 1;
+    ALLOCATION_TAG = 2;
+    SELF = 3;
+  }
+
+  required TargetType targetType = 1;
+  optional string targetKey = 2;
+  repeated string targetValues = 3;
+}
+
+message TimedPlacementConstraintProto {
+  enum DelayUnit {
+    MILLISECONDS = 1;
+    OPPORTUNITIES = 2;
+  }
+
+  required PlacementConstraintProto placementConstraint = 1;
+  required int64 schedulingDelay = 2;
+  optional DelayUnit delayUnit = 3 [ default = MILLISECONDS ];
+}
+
+message CompositePlacementConstraintProto {
+  enum CompositeType {
+    // All children constraints have to be satisfied.
+    AND = 1;
+    // One of the children constraints has to be satisfied.
+    OR = 2;
+    // Attempt to satisfy the first child constraint for delays[0] units (e.g.,
+    // millisec or heartbeats). If this fails, try to satisfy the second child
+    // constraint for delays[1] units and so on.
+    DELAYED_OR = 3;
+  }
+
+  required CompositeType compositeType = 1;
+  repeated PlacementConstraintProto childConstraints = 2;
+  repeated TimedPlacementConstraintProto timedChildConstraints = 3;
+}
 
 ////////////////////////////////////////////////////////////////////////
 ////// From reservation_protocol /////////////////////////////////////

http://git-wip-us.apache.org/repos/asf/hadoop/blob/33a796d9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/api/resource/TestPlacementConstraints.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/api/resource/TestPlacementConstraints.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/api/resource/TestPlacementConstraints.java
new file mode 100644
index 0000000..e25d477
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/api/resource/TestPlacementConstraints.java
@@ -0,0 +1,106 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.resource;
+
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.NODE;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.RACK;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.and;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.maxCardinality;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.targetCardinality;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.targetIn;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.targetNotIn;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.PlacementTargets.allocationTag;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.PlacementTargets.nodeAttribute;
+
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.AbstractConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.And;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.SingleConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TargetExpression;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TargetExpression.TargetType;
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * Test class for the various static methods in
+ * {@link org.apache.hadoop.yarn.api.resource.PlacementConstraints}.
+ */
+public class TestPlacementConstraints {
+
+  @Test
+  public void testNodeAffinityToTag() {
+    AbstractConstraint constraintExpr =
+        targetIn(NODE, allocationTag("hbase-m"));
+
+    SingleConstraint sConstraint = (SingleConstraint) constraintExpr;
+    Assert.assertEquals(NODE, sConstraint.getScope());
+    Assert.assertEquals(1, sConstraint.getMinCardinality());
+    Assert.assertEquals(Integer.MAX_VALUE, sConstraint.getMaxCardinality());
+
+    Assert.assertEquals(1, sConstraint.getTargetExpressions().size());
+    TargetExpression tExpr =
+        sConstraint.getTargetExpressions().iterator().next();
+    Assert.assertNull(tExpr.getTargetKey());
+    Assert.assertEquals(TargetType.ALLOCATION_TAG, tExpr.getTargetType());
+    Assert.assertEquals(1, tExpr.getTargetValues().size());
+    Assert.assertEquals("hbase-m", tExpr.getTargetValues().iterator().next());
+
+    PlacementConstraint constraint = PlacementConstraints.build(constraintExpr);
+    Assert.assertNotNull(constraint.getConstraintExpr());
+  }
+
+  @Test
+  public void testNodeAntiAffinityToAttribute() {
+    AbstractConstraint constraintExpr =
+        targetNotIn(NODE, nodeAttribute("java", "1.8"));
+
+    SingleConstraint sConstraint = (SingleConstraint) constraintExpr;
+    Assert.assertEquals(NODE, sConstraint.getScope());
+    Assert.assertEquals(0, sConstraint.getMinCardinality());
+    Assert.assertEquals(0, sConstraint.getMaxCardinality());
+
+    Assert.assertEquals(1, sConstraint.getTargetExpressions().size());
+    TargetExpression tExpr =
+        sConstraint.getTargetExpressions().iterator().next();
+    Assert.assertEquals("java", tExpr.getTargetKey());
+    Assert.assertEquals(TargetType.NODE_ATTRIBUTE, tExpr.getTargetType());
+    Assert.assertEquals(1, tExpr.getTargetValues().size());
+    Assert.assertEquals("1.8", tExpr.getTargetValues().iterator().next());
+  }
+
+  @Test
+  public void testAndConstraint() {
+    AbstractConstraint constraintExpr =
+        and(targetIn(RACK, allocationTag("spark")), maxCardinality(NODE, 3),
+            targetCardinality(RACK, 2, 10, allocationTag("zk")));
+
+    And andExpr = (And) constraintExpr;
+    Assert.assertEquals(3, andExpr.getChildren().size());
+    SingleConstraint sConstr = (SingleConstraint) andExpr.getChildren().get(0);
+    TargetExpression tExpr = sConstr.getTargetExpressions().iterator().next();
+    Assert.assertEquals("spark", tExpr.getTargetValues().iterator().next());
+
+    sConstr = (SingleConstraint) andExpr.getChildren().get(1);
+    Assert.assertEquals(0, sConstr.getMinCardinality());
+    Assert.assertEquals(3, sConstr.getMaxCardinality());
+
+    sConstr = (SingleConstraint) andExpr.getChildren().get(2);
+    Assert.assertEquals(2, sConstr.getMinCardinality());
+    Assert.assertEquals(10, sConstr.getMaxCardinality());
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/33a796d9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/pb/PlacementConstraintFromProtoConverter.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/pb/PlacementConstraintFromProtoConverter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/pb/PlacementConstraintFromProtoConverter.java
new file mode 100644
index 0000000..926b6fa
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/pb/PlacementConstraintFromProtoConverter.java
@@ -0,0 +1,116 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.pb;
+
+import static org.apache.hadoop.yarn.proto.YarnProtos.CompositePlacementConstraintProto.CompositeType.AND;
+
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.yarn.api.records.impl.pb.ProtoUtils;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.AbstractConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.And;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.DelayedOr;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.Or;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.SingleConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TargetExpression;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TimedPlacementConstraint;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+import org.apache.hadoop.yarn.proto.YarnProtos.CompositePlacementConstraintProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.PlacementConstraintProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.PlacementConstraintTargetProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.SimplePlacementConstraintProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.TimedPlacementConstraintProto;
+
+/**
+ * {@code PlacementConstraintFromProtoConverter} generates an
+ * {@link PlacementConstraint.AbstractConstraint} given a
+ * {@link PlacementConstraintProto}.
+ */
+@Private
+public class PlacementConstraintFromProtoConverter {
+
+  private PlacementConstraintProto constraintProto;
+
+  public PlacementConstraintFromProtoConverter(
+      PlacementConstraintProto constraintProto) {
+    this.constraintProto = constraintProto;
+  }
+
+  public PlacementConstraint convert() {
+    return new PlacementConstraint(convert(constraintProto));
+  }
+
+  private AbstractConstraint convert(PlacementConstraintProto proto) {
+    return proto.hasSimpleConstraint() ? convert(proto.getSimpleConstraint())
+        : convert(proto.getCompositeConstraint());
+  }
+
+  private SingleConstraint convert(SimplePlacementConstraintProto proto) {
+    Set<TargetExpression> targets = new HashSet<>();
+    for (PlacementConstraintTargetProto tp : proto.getTargetExpressionsList()) {
+      targets.add(convert(tp));
+    }
+
+    return new SingleConstraint(proto.getScope(), proto.getMinCardinality(),
+        proto.getMaxCardinality(), targets);
+  }
+
+  private TargetExpression convert(PlacementConstraintTargetProto proto) {
+    return new TargetExpression(
+        ProtoUtils.convertFromProtoFormat(proto.getTargetType()),
+        proto.hasTargetKey() ? proto.getTargetKey() : null,
+        new HashSet<>(proto.getTargetValuesList()));
+  }
+
+  private AbstractConstraint convert(CompositePlacementConstraintProto proto) {
+    switch (proto.getCompositeType()) {
+    case AND:
+    case OR:
+      List<AbstractConstraint> children = new ArrayList<>();
+      for (PlacementConstraintProto cp : proto.getChildConstraintsList()) {
+        children.add(convert(cp));
+      }
+      return (proto.getCompositeType() == AND) ? new And(children)
+          : new Or(children);
+    case DELAYED_OR:
+      List<TimedPlacementConstraint> tChildren = new ArrayList<>();
+      for (TimedPlacementConstraintProto cp : proto
+          .getTimedChildConstraintsList()) {
+        tChildren.add(convert(cp));
+      }
+      return new DelayedOr(tChildren);
+    default:
+      throw new YarnRuntimeException(
+          "Encountered unexpected type of composite constraint.");
+    }
+  }
+
+  private TimedPlacementConstraint convert(
+      TimedPlacementConstraintProto proto) {
+    AbstractConstraint pConstraint = convert(proto.getPlacementConstraint());
+
+    return new TimedPlacementConstraint(pConstraint, proto.getSchedulingDelay(),
+        ProtoUtils.convertFromProtoFormat(proto.getDelayUnit()));
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/33a796d9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/pb/PlacementConstraintToProtoConverter.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/pb/PlacementConstraintToProtoConverter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/pb/PlacementConstraintToProtoConverter.java
new file mode 100644
index 0000000..7816e18
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/pb/PlacementConstraintToProtoConverter.java
@@ -0,0 +1,174 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.pb;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.yarn.api.records.impl.pb.ProtoUtils;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.AbstractConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.And;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.CardinalityConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.CompositeConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.DelayedOr;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.Or;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.SingleConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TargetConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TargetExpression;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TimedPlacementConstraint;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+import org.apache.hadoop.yarn.proto.YarnProtos.CompositePlacementConstraintProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.CompositePlacementConstraintProto.CompositeType;
+import org.apache.hadoop.yarn.proto.YarnProtos.PlacementConstraintProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.PlacementConstraintTargetProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.SimplePlacementConstraintProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.TimedPlacementConstraintProto;
+
+import com.google.protobuf.GeneratedMessage;
+
+/**
+ * {@code PlacementConstraintToProtoConverter} generates a
+ * {@link PlacementConstraintProto} given a
+ * {@link PlacementConstraint.AbstractConstraint}.
+ */
+@Private
+public class PlacementConstraintToProtoConverter
+    implements PlacementConstraint.Visitor<GeneratedMessage> {
+
+  private PlacementConstraint placementConstraint;
+
+  public PlacementConstraintToProtoConverter(
+      PlacementConstraint placementConstraint) {
+    this.placementConstraint = placementConstraint;
+  }
+
+  public PlacementConstraintProto convert() {
+    return (PlacementConstraintProto) placementConstraint.getConstraintExpr()
+        .accept(this);
+  }
+
+  @Override
+  public GeneratedMessage visit(SingleConstraint constraint) {
+    SimplePlacementConstraintProto.Builder sb =
+        SimplePlacementConstraintProto.newBuilder();
+
+    if (constraint.getScope() != null) {
+      sb.setScope(constraint.getScope());
+    }
+    sb.setMinCardinality(constraint.getMinCardinality());
+    sb.setMaxCardinality(constraint.getMaxCardinality());
+    if (constraint.getTargetExpressions() != null) {
+      for (TargetExpression target : constraint.getTargetExpressions()) {
+        sb.addTargetExpressions(
+            (PlacementConstraintTargetProto) target.accept(this));
+      }
+
+    }
+    SimplePlacementConstraintProto sProto = sb.build();
+
+    // Wrap around PlacementConstraintProto object.
+    PlacementConstraintProto.Builder pb = PlacementConstraintProto.newBuilder();
+    pb.setSimpleConstraint(sProto);
+    return pb.build();
+  }
+
+  @Override
+  public GeneratedMessage visit(TargetExpression target) {
+    PlacementConstraintTargetProto.Builder tb =
+        PlacementConstraintTargetProto.newBuilder();
+
+    tb.setTargetType(ProtoUtils.convertToProtoFormat(target.getTargetType()));
+    if (target.getTargetKey() != null) {
+      tb.setTargetKey(target.getTargetKey());
+    }
+    if (target.getTargetValues() != null) {
+      tb.addAllTargetValues(target.getTargetValues());
+    }
+    return tb.build();
+  }
+
+  @Override
+  public GeneratedMessage visit(TargetConstraint constraint) {
+    throw new YarnRuntimeException("Unexpected TargetConstraint found.");
+  }
+
+  @Override
+  public GeneratedMessage visit(CardinalityConstraint constraint) {
+    throw new YarnRuntimeException("Unexpected CardinalityConstraint found.");
+  }
+
+  private GeneratedMessage visitAndOr(
+      CompositeConstraint<AbstractConstraint> composite, CompositeType type) {
+    CompositePlacementConstraintProto.Builder cb =
+        CompositePlacementConstraintProto.newBuilder();
+
+    cb.setCompositeType(type);
+
+    for (AbstractConstraint c : composite.getChildren()) {
+      cb.addChildConstraints((PlacementConstraintProto) c.accept(this));
+    }
+    CompositePlacementConstraintProto cProto = cb.build();
+
+    // Wrap around PlacementConstraintProto object.
+    PlacementConstraintProto.Builder pb = PlacementConstraintProto.newBuilder();
+    pb.setCompositeConstraint(cProto);
+    return pb.build();
+  }
+
+  @Override
+  public GeneratedMessage visit(And constraint) {
+    return visitAndOr(constraint, CompositeType.AND);
+  }
+
+  @Override
+  public GeneratedMessage visit(Or constraint) {
+    return visitAndOr(constraint, CompositeType.OR);
+  }
+
+  @Override
+  public GeneratedMessage visit(DelayedOr constraint) {
+    CompositePlacementConstraintProto.Builder cb =
+        CompositePlacementConstraintProto.newBuilder();
+
+    cb.setCompositeType(CompositeType.DELAYED_OR);
+
+    for (TimedPlacementConstraint c : constraint.getChildren()) {
+      cb.addTimedChildConstraints(
+          (TimedPlacementConstraintProto) c.accept(this));
+    }
+    CompositePlacementConstraintProto cProto = cb.build();
+
+    // Wrap around PlacementConstraintProto object.
+    PlacementConstraintProto.Builder pb = PlacementConstraintProto.newBuilder();
+    pb.setCompositeConstraint(cProto);
+    return pb.build();
+  }
+
+  @Override
+  public GeneratedMessage visit(TimedPlacementConstraint constraint) {
+    TimedPlacementConstraintProto.Builder tb =
+        TimedPlacementConstraintProto.newBuilder();
+
+    tb.setDelayUnit(ProtoUtils.convertToProtoFormat(constraint.getDelayUnit()));
+    tb.setSchedulingDelay(constraint.getSchedulingDelay());
+    tb.setPlacementConstraint(
+        (PlacementConstraintProto) constraint.getConstraint().accept(this));
+
+    return tb.build();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/33a796d9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/pb/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/pb/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/pb/package-info.java
new file mode 100644
index 0000000..18da80f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/pb/package-info.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * API related to protobuf objects that are not backed by PBImpl classes.
+ */
+@InterfaceAudience.Private
+package org.apache.hadoop.yarn.api.pb;
+import org.apache.hadoop.classification.InterfaceAudience;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/33a796d9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java
index f3e665b..168d864 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java
@@ -56,6 +56,8 @@ import org.apache.hadoop.yarn.api.records.UpdateContainerError;
 import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
 import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TargetExpression;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TimedPlacementConstraint;
 import org.apache.hadoop.yarn.proto.YarnProtos;
 import org.apache.hadoop.yarn.proto.YarnProtos.AMCommandProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationAccessTypeProto;
@@ -70,10 +72,12 @@ import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceVisibilityProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.LogAggregationStatusProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.NodeStateProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.PlacementConstraintTargetProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.QueueACLProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.QueueStateProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ReservationRequestInterpreterProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.TimedPlacementConstraintProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.YarnApplicationAttemptStateProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.YarnApplicationStateProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ContainerRetryPolicyProto;
@@ -507,6 +511,29 @@ public class ProtoUtils {
     }
     return ret;
   }
+
+  public static PlacementConstraintTargetProto.TargetType convertToProtoFormat(
+          TargetExpression.TargetType t) {
+    return PlacementConstraintTargetProto.TargetType.valueOf(t.name());
+  }
+
+  public static TargetExpression.TargetType convertFromProtoFormat(
+          PlacementConstraintTargetProto.TargetType t) {
+    return TargetExpression.TargetType.valueOf(t.name());
+  }
+
+  /*
+   * TimedPlacementConstraint.DelayUnit
+   */
+  public static TimedPlacementConstraintProto.DelayUnit convertToProtoFormat(
+          TimedPlacementConstraint.DelayUnit u) {
+    return TimedPlacementConstraintProto.DelayUnit.valueOf(u.name());
+  }
+
+  public static TimedPlacementConstraint.DelayUnit convertFromProtoFormat(
+          TimedPlacementConstraintProto.DelayUnit u) {
+    return TimedPlacementConstraint.DelayUnit.valueOf(u.name());
+  }
 }
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/33a796d9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraintTransformations.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraintTransformations.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraintTransformations.java
new file mode 100644
index 0000000..e9eda6f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraintTransformations.java
@@ -0,0 +1,209 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.resource;
+
+import java.util.ListIterator;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.AbstractConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.And;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.CardinalityConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.CompositeConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.DelayedOr;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.Or;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.SingleConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TargetConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TargetConstraint.TargetOperator;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TargetExpression;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TimedPlacementConstraint;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+
+/**
+ * This class contains inner classes that define transformation on a
+ * {@link PlacementConstraint} expression.
+ */
+@Private
+public class PlacementConstraintTransformations {
+
+  /**
+   * The default implementation of the {@link PlacementConstraint.Visitor} that
+   * does a traversal of the constraint tree, performing no action for the lead
+   * constraints.
+   */
+  public static class AbstractTransformer
+      implements PlacementConstraint.Visitor<AbstractConstraint> {
+
+    private PlacementConstraint placementConstraint;
+
+    public AbstractTransformer(PlacementConstraint placementConstraint) {
+      this.placementConstraint = placementConstraint;
+    }
+
+    /**
+     * This method performs the transformation of the
+     * {@link #placementConstraint}.
+     *
+     * @return the transformed placement constraint.
+     */
+    public PlacementConstraint transform() {
+      AbstractConstraint constraintExpr =
+          placementConstraint.getConstraintExpr();
+
+      // Visit the constraint tree to perform the transformation.
+      constraintExpr = constraintExpr.accept(this);
+
+      return new PlacementConstraint(constraintExpr);
+    }
+
+    @Override
+    public AbstractConstraint visit(SingleConstraint constraint) {
+      // Do nothing.
+      return constraint;
+    }
+
+    @Override
+    public AbstractConstraint visit(TargetExpression expression) {
+      // Do nothing.
+      return null;
+    }
+
+    @Override
+    public AbstractConstraint visit(TargetConstraint constraint) {
+      // Do nothing.
+      return constraint;
+    }
+
+    @Override
+    public AbstractConstraint visit(CardinalityConstraint constraint) {
+      // Do nothing.
+      return constraint;
+    }
+
+    private AbstractConstraint visitAndOr(
+        CompositeConstraint<AbstractConstraint> constraint) {
+      for (ListIterator<AbstractConstraint> iter =
+          constraint.getChildren().listIterator(); iter.hasNext();) {
+        AbstractConstraint child = iter.next();
+        child = child.accept(this);
+        iter.set(child);
+      }
+      return constraint;
+    }
+
+    @Override
+    public AbstractConstraint visit(And constraint) {
+      return visitAndOr(constraint);
+    }
+
+    @Override
+    public AbstractConstraint visit(Or constraint) {
+      return visitAndOr(constraint);
+    }
+
+    @Override
+    public AbstractConstraint visit(DelayedOr constraint) {
+      constraint.getChildren().forEach(
+          child -> child.setConstraint(child.getConstraint().accept(this)));
+      return constraint;
+    }
+
+    @Override
+    public AbstractConstraint visit(TimedPlacementConstraint constraint) {
+      // Do nothing.
+      return null;
+    }
+  }
+
+  /**
+   * Visits a {@link PlacementConstraint} tree and substitutes each
+   * {@link TargetConstraint} and {@link CardinalityConstraint} with an
+   * equivalent {@link SingleConstraint}.
+   */
+  public static class SingleConstraintTransformer extends AbstractTransformer {
+
+    public SingleConstraintTransformer(PlacementConstraint constraint) {
+      super(constraint);
+    }
+
+    @Override
+    public AbstractConstraint visit(TargetConstraint constraint) {
+      AbstractConstraint newConstraint;
+      if (constraint.getOp() == TargetOperator.IN) {
+        newConstraint = new SingleConstraint(constraint.getScope(), 1,
+            Integer.MAX_VALUE, constraint.getTargetExpressions());
+      } else if (constraint.getOp() == TargetOperator.NOT_IN) {
+        newConstraint = new SingleConstraint(constraint.getScope(), 0, 0,
+            constraint.getTargetExpressions());
+      } else {
+        throw new YarnRuntimeException(
+            "Encountered unexpected type of constraint target operator: "
+                + constraint.getOp());
+      }
+      return newConstraint;
+    }
+
+    @Override
+    public AbstractConstraint visit(CardinalityConstraint constraint) {
+      return new SingleConstraint(constraint.getScope(),
+          constraint.getMinCardinality(), constraint.getMaxCardinality(),
+          new TargetExpression(TargetExpression.TargetType.SELF));
+    }
+  }
+
+  /**
+   * Visits a {@link PlacementConstraint} tree and, whenever possible,
+   * substitutes each {@link SingleConstraint} with a {@link TargetConstraint}
+   * or a {@link CardinalityConstraint}. When such a substitution is not
+   * possible, we keep the original {@link SingleConstraint}.
+   */
+  public static class SpecializedConstraintTransformer
+      extends AbstractTransformer {
+
+    public SpecializedConstraintTransformer(PlacementConstraint constraint) {
+      super(constraint);
+    }
+
+    @Override
+    public AbstractConstraint visit(SingleConstraint constraint) {
+      AbstractConstraint transformedConstraint = constraint;
+      // Check if it is a cardinality constraint.
+      if (constraint.getTargetExpressions().size() == 1) {
+        TargetExpression targetExpr =
+            constraint.getTargetExpressions().iterator().next();
+        if (targetExpr.getTargetType() == TargetExpression.TargetType.SELF) {
+          transformedConstraint = new CardinalityConstraint(
+              constraint.getScope(), constraint.getMinCardinality(),
+              constraint.getMaxCardinality());
+        }
+      }
+      // Check if it is a target constraint.
+      if (constraint.getMinCardinality() == 1
+          && constraint.getMaxCardinality() == Integer.MAX_VALUE) {
+        transformedConstraint = new TargetConstraint(TargetOperator.IN,
+            constraint.getScope(), constraint.getTargetExpressions());
+      } else if (constraint.getMinCardinality() == 0
+          && constraint.getMaxCardinality() == 0) {
+        transformedConstraint = new TargetConstraint(TargetOperator.NOT_IN,
+            constraint.getScope(), constraint.getTargetExpressions());
+      }
+
+      return transformedConstraint;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/33a796d9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/resource/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/resource/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/resource/package-info.java
new file mode 100644
index 0000000..660dc02
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/resource/package-info.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * API related to resources.
+ */
+@InterfaceAudience.Private
+package org.apache.hadoop.yarn.api.resource;
+import org.apache.hadoop.classification.InterfaceAudience;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/33a796d9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPlacementConstraintPBConversion.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPlacementConstraintPBConversion.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPlacementConstraintPBConversion.java
new file mode 100644
index 0000000..bd245e2
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPlacementConstraintPBConversion.java
@@ -0,0 +1,195 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api;
+
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.NODE;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.RACK;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.cardinality;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.maxCardinality;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.or;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.targetCardinality;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.targetIn;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.PlacementTargets.allocationTag;
+
+import java.util.Iterator;
+
+import org.apache.hadoop.yarn.api.pb.PlacementConstraintFromProtoConverter;
+import org.apache.hadoop.yarn.api.pb.PlacementConstraintToProtoConverter;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.AbstractConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.Or;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.SingleConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraints;
+import org.apache.hadoop.yarn.proto.YarnProtos.CompositePlacementConstraintProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.CompositePlacementConstraintProto.CompositeType;
+import org.apache.hadoop.yarn.proto.YarnProtos.PlacementConstraintProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.SimplePlacementConstraintProto;
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * Test class for {@link PlacementConstraintToProtoConverter} and
+ * {@link PlacementConstraintFromProtoConverter}.
+ */
+public class TestPlacementConstraintPBConversion {
+
+  @Test
+  public void testTargetConstraintProtoConverter() {
+    AbstractConstraint sConstraintExpr =
+        targetIn(NODE, allocationTag("hbase-m"));
+    Assert.assertTrue(sConstraintExpr instanceof SingleConstraint);
+    SingleConstraint single = (SingleConstraint) sConstraintExpr;
+    PlacementConstraint sConstraint =
+        PlacementConstraints.build(sConstraintExpr);
+
+    // Convert to proto.
+    PlacementConstraintToProtoConverter toProtoConverter =
+        new PlacementConstraintToProtoConverter(sConstraint);
+    PlacementConstraintProto protoConstraint = toProtoConverter.convert();
+
+    Assert.assertTrue(protoConstraint.hasSimpleConstraint());
+    Assert.assertFalse(protoConstraint.hasCompositeConstraint());
+    SimplePlacementConstraintProto sProto =
+        protoConstraint.getSimpleConstraint();
+    Assert.assertEquals(single.getScope(), sProto.getScope());
+    Assert.assertEquals(single.getMinCardinality(), sProto.getMinCardinality());
+    Assert.assertEquals(single.getMaxCardinality(), sProto.getMaxCardinality());
+    Assert.assertEquals(single.getTargetExpressions().size(),
+        sProto.getTargetExpressionsList().size());
+
+    // Convert from proto.
+    PlacementConstraintFromProtoConverter fromProtoConverter =
+        new PlacementConstraintFromProtoConverter(protoConstraint);
+    PlacementConstraint newConstraint = fromProtoConverter.convert();
+
+    AbstractConstraint newConstraintExpr = newConstraint.getConstraintExpr();
+    Assert.assertTrue(newConstraintExpr instanceof SingleConstraint);
+    SingleConstraint newSingle = (SingleConstraint) newConstraintExpr;
+    Assert.assertEquals(single.getScope(), newSingle.getScope());
+    Assert.assertEquals(single.getMinCardinality(),
+        newSingle.getMinCardinality());
+    Assert.assertEquals(single.getMaxCardinality(),
+        newSingle.getMaxCardinality());
+    Assert.assertEquals(single.getTargetExpressions(),
+        newSingle.getTargetExpressions());
+  }
+
+  @Test
+  public void testCardinalityConstraintProtoConverter() {
+    AbstractConstraint sConstraintExpr = cardinality(RACK, 3, 10);
+    Assert.assertTrue(sConstraintExpr instanceof SingleConstraint);
+    SingleConstraint single = (SingleConstraint) sConstraintExpr;
+    PlacementConstraint sConstraint =
+        PlacementConstraints.build(sConstraintExpr);
+
+    // Convert to proto.
+    PlacementConstraintToProtoConverter toProtoConverter =
+        new PlacementConstraintToProtoConverter(sConstraint);
+    PlacementConstraintProto protoConstraint = toProtoConverter.convert();
+
+    compareSimpleConstraintToProto(single, protoConstraint);
+
+    // Convert from proto.
+    PlacementConstraintFromProtoConverter fromProtoConverter =
+        new PlacementConstraintFromProtoConverter(protoConstraint);
+    PlacementConstraint newConstraint = fromProtoConverter.convert();
+
+    AbstractConstraint newConstraintExpr = newConstraint.getConstraintExpr();
+    Assert.assertTrue(newConstraintExpr instanceof SingleConstraint);
+    SingleConstraint newSingle = (SingleConstraint) newConstraintExpr;
+    compareSimpleConstraints(single, newSingle);
+  }
+
+  @Test
+  public void testCompositeConstraintProtoConverter() {
+    AbstractConstraint constraintExpr =
+        or(targetIn(RACK, allocationTag("spark")), maxCardinality(NODE, 3),
+            targetCardinality(RACK, 2, 10, allocationTag("zk")));
+    Assert.assertTrue(constraintExpr instanceof Or);
+    PlacementConstraint constraint = PlacementConstraints.build(constraintExpr);
+    Or orExpr = (Or) constraintExpr;
+
+    // Convert to proto.
+    PlacementConstraintToProtoConverter toProtoConverter =
+        new PlacementConstraintToProtoConverter(constraint);
+    PlacementConstraintProto protoConstraint = toProtoConverter.convert();
+
+    Assert.assertFalse(protoConstraint.hasSimpleConstraint());
+    Assert.assertTrue(protoConstraint.hasCompositeConstraint());
+    CompositePlacementConstraintProto cProto =
+        protoConstraint.getCompositeConstraint();
+
+    Assert.assertEquals(CompositeType.OR, cProto.getCompositeType());
+    Assert.assertEquals(3, cProto.getChildConstraintsCount());
+    Assert.assertEquals(0, cProto.getTimedChildConstraintsCount());
+    Iterator<AbstractConstraint> orChildren = orExpr.getChildren().iterator();
+    Iterator<PlacementConstraintProto> orProtoChildren =
+        cProto.getChildConstraintsList().iterator();
+    while (orChildren.hasNext() && orProtoChildren.hasNext()) {
+      AbstractConstraint orChild = orChildren.next();
+      PlacementConstraintProto orProtoChild = orProtoChildren.next();
+      compareSimpleConstraintToProto((SingleConstraint) orChild, orProtoChild);
+    }
+
+    // Convert from proto.
+    PlacementConstraintFromProtoConverter fromProtoConverter =
+        new PlacementConstraintFromProtoConverter(protoConstraint);
+    PlacementConstraint newConstraint = fromProtoConverter.convert();
+
+    AbstractConstraint newConstraintExpr = newConstraint.getConstraintExpr();
+    Assert.assertTrue(newConstraintExpr instanceof Or);
+    Or newOrExpr = (Or) newConstraintExpr;
+    Assert.assertEquals(3, newOrExpr.getChildren().size());
+    orChildren = orExpr.getChildren().iterator();
+    Iterator<AbstractConstraint> newOrChildren =
+        newOrExpr.getChildren().iterator();
+    while (orChildren.hasNext() && newOrChildren.hasNext()) {
+      AbstractConstraint orChild = orChildren.next();
+      AbstractConstraint newOrChild = newOrChildren.next();
+      compareSimpleConstraints((SingleConstraint) orChild,
+          (SingleConstraint) newOrChild);
+    }
+  }
+
+  private void compareSimpleConstraintToProto(SingleConstraint constraint,
+      PlacementConstraintProto proto) {
+    Assert.assertTrue(proto.hasSimpleConstraint());
+    Assert.assertFalse(proto.hasCompositeConstraint());
+    SimplePlacementConstraintProto sProto = proto.getSimpleConstraint();
+    Assert.assertEquals(constraint.getScope(), sProto.getScope());
+    Assert.assertEquals(constraint.getMinCardinality(),
+        sProto.getMinCardinality());
+    Assert.assertEquals(constraint.getMaxCardinality(),
+        sProto.getMaxCardinality());
+    Assert.assertEquals(constraint.getTargetExpressions().size(),
+        sProto.getTargetExpressionsList().size());
+  }
+
+  private void compareSimpleConstraints(SingleConstraint single,
+      SingleConstraint newSingle) {
+    Assert.assertEquals(single.getScope(), newSingle.getScope());
+    Assert.assertEquals(single.getMinCardinality(),
+        newSingle.getMinCardinality());
+    Assert.assertEquals(single.getMaxCardinality(),
+        newSingle.getMaxCardinality());
+    Assert.assertEquals(single.getTargetExpressions(),
+        newSingle.getTargetExpressions());
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/33a796d9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/resource/TestPlacementConstraintTransformations.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/resource/TestPlacementConstraintTransformations.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/resource/TestPlacementConstraintTransformations.java
new file mode 100644
index 0000000..1763735
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/resource/TestPlacementConstraintTransformations.java
@@ -0,0 +1,183 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.resource;
+
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.NODE;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.RACK;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.cardinality;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.maxCardinality;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.or;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.targetCardinality;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.targetIn;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.PlacementTargets.allocationTag;
+
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.List;
+
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.AbstractConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.CardinalityConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.Or;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.SingleConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TargetConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TargetConstraint.TargetOperator;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraintTransformations.SingleConstraintTransformer;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraintTransformations.SpecializedConstraintTransformer;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraints.PlacementTargets;
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * Test class for {@link PlacementConstraintTransformations}.
+ */
+public class TestPlacementConstraintTransformations {
+
+  @Test
+  public void testTargetConstraint() {
+    AbstractConstraint sConstraintExpr =
+        targetIn(NODE, allocationTag("hbase-m"));
+    Assert.assertTrue(sConstraintExpr instanceof SingleConstraint);
+    PlacementConstraint sConstraint =
+        PlacementConstraints.build(sConstraintExpr);
+
+    // Transform from SimpleConstraint to specialized TargetConstraint
+    SpecializedConstraintTransformer specTransformer =
+        new SpecializedConstraintTransformer(sConstraint);
+    PlacementConstraint tConstraint = specTransformer.transform();
+
+    AbstractConstraint tConstraintExpr = tConstraint.getConstraintExpr();
+    Assert.assertTrue(tConstraintExpr instanceof TargetConstraint);
+
+    SingleConstraint single = (SingleConstraint) sConstraintExpr;
+    TargetConstraint target = (TargetConstraint) tConstraintExpr;
+    Assert.assertEquals(single.getScope(), target.getScope());
+    Assert.assertEquals(TargetOperator.IN, target.getOp());
+    Assert.assertEquals(single.getTargetExpressions(),
+        target.getTargetExpressions());
+
+    // Transform from specialized TargetConstraint to SimpleConstraint
+    SingleConstraintTransformer singleTransformer =
+        new SingleConstraintTransformer(tConstraint);
+    sConstraint = singleTransformer.transform();
+
+    sConstraintExpr = sConstraint.getConstraintExpr();
+    Assert.assertTrue(sConstraintExpr instanceof SingleConstraint);
+
+    single = (SingleConstraint) sConstraintExpr;
+    Assert.assertEquals(target.getScope(), single.getScope());
+    Assert.assertEquals(1, single.getMinCardinality());
+    Assert.assertEquals(Integer.MAX_VALUE, single.getMaxCardinality());
+    Assert.assertEquals(single.getTargetExpressions(),
+        target.getTargetExpressions());
+  }
+
+  @Test
+  public void testCardinalityConstraint() {
+    AbstractConstraint sConstraintExpr = cardinality(RACK, 3, 10);
+    Assert.assertTrue(sConstraintExpr instanceof SingleConstraint);
+    PlacementConstraint sConstraint =
+        PlacementConstraints.build(sConstraintExpr);
+
+    // Transform from SimpleConstraint to specialized CardinalityConstraint
+    SpecializedConstraintTransformer specTransformer =
+        new SpecializedConstraintTransformer(sConstraint);
+    PlacementConstraint cConstraint = specTransformer.transform();
+
+    AbstractConstraint cConstraintExpr = cConstraint.getConstraintExpr();
+    Assert.assertTrue(cConstraintExpr instanceof CardinalityConstraint);
+
+    SingleConstraint single = (SingleConstraint) sConstraintExpr;
+    CardinalityConstraint cardinality = (CardinalityConstraint) cConstraintExpr;
+    Assert.assertEquals(single.getScope(), cardinality.getScope());
+    Assert.assertEquals(single.getMinCardinality(),
+        cardinality.getMinCardinality());
+    Assert.assertEquals(single.getMaxCardinality(),
+        cardinality.getMaxCardinality());
+
+    // Transform from specialized CardinalityConstraint to SimpleConstraint
+    SingleConstraintTransformer singleTransformer =
+        new SingleConstraintTransformer(cConstraint);
+    sConstraint = singleTransformer.transform();
+
+    sConstraintExpr = sConstraint.getConstraintExpr();
+    Assert.assertTrue(sConstraintExpr instanceof SingleConstraint);
+
+    single = (SingleConstraint) sConstraintExpr;
+    Assert.assertEquals(cardinality.getScope(), single.getScope());
+    Assert.assertEquals(cardinality.getMinCardinality(),
+        single.getMinCardinality());
+    Assert.assertEquals(cardinality.getMaxCardinality(),
+        single.getMaxCardinality());
+    Assert.assertEquals(new HashSet<>(Arrays.asList(PlacementTargets.self())),
+        single.getTargetExpressions());
+  }
+
+  @Test
+  public void testTargetCardinalityConstraint() {
+    AbstractConstraint constraintExpr =
+        targetCardinality(RACK, 3, 10, allocationTag("zk"));
+    Assert.assertTrue(constraintExpr instanceof SingleConstraint);
+    PlacementConstraint constraint = PlacementConstraints.build(constraintExpr);
+
+    // Apply transformation. Should be a no-op.
+    SpecializedConstraintTransformer specTransformer =
+        new SpecializedConstraintTransformer(constraint);
+    PlacementConstraint newConstraint = specTransformer.transform();
+
+    // The constraint expression should be the same.
+    Assert.assertEquals(constraintExpr, newConstraint.getConstraintExpr());
+  }
+
+  @Test
+  public void testCompositeConstraint() {
+    AbstractConstraint constraintExpr =
+        or(targetIn(RACK, allocationTag("spark")), maxCardinality(NODE, 3),
+            targetCardinality(RACK, 2, 10, allocationTag("zk")));
+    Assert.assertTrue(constraintExpr instanceof Or);
+    PlacementConstraint constraint = PlacementConstraints.build(constraintExpr);
+    Or orExpr = (Or) constraintExpr;
+    for (AbstractConstraint child : orExpr.getChildren()) {
+      Assert.assertTrue(child instanceof SingleConstraint);
+    }
+
+    // Apply transformation. Should transform target and cardinality constraints
+    // included in the composite constraint to specialized ones.
+    SpecializedConstraintTransformer specTransformer =
+        new SpecializedConstraintTransformer(constraint);
+    PlacementConstraint specConstraint = specTransformer.transform();
+
+    Or specOrExpr = (Or) specConstraint.getConstraintExpr();
+    List<AbstractConstraint> specChildren = specOrExpr.getChildren();
+    Assert.assertEquals(3, specChildren.size());
+    Assert.assertTrue(specChildren.get(0) instanceof TargetConstraint);
+    Assert.assertTrue(specChildren.get(1) instanceof CardinalityConstraint);
+    Assert.assertTrue(specChildren.get(2) instanceof SingleConstraint);
+
+    // Transform from specialized TargetConstraint to SimpleConstraint
+    SingleConstraintTransformer singleTransformer =
+        new SingleConstraintTransformer(specConstraint);
+    PlacementConstraint simConstraint = singleTransformer.transform();
+    Assert.assertTrue(constraintExpr instanceof Or);
+    Or simOrExpr = (Or) specConstraint.getConstraintExpr();
+    for (AbstractConstraint child : simOrExpr.getChildren()) {
+      Assert.assertTrue(child instanceof SingleConstraint);
+    }
+  }
+
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[24/32] hadoop git commit: YARN-6599. Support anti-affinity constraint via AppPlacementAllocator. (Wangda Tan via asuresh)

Posted by as...@apache.org.
YARN-6599. Support anti-affinity constraint via AppPlacementAllocator. (Wangda Tan via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/38af2379
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/38af2379
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/38af2379

Branch: refs/heads/trunk
Commit: 38af23796971193fa529c3d08ffde8fcd6e607b6
Parents: 8779a35
Author: Arun Suresh <as...@apache.org>
Authored: Thu Jan 18 14:10:30 2018 -0800
Committer: Arun Suresh <as...@apache.org>
Committed: Wed Jan 31 01:30:17 2018 -0800

----------------------------------------------------------------------
 .../v2/app/rm/TestRMContainerAllocator.java     |  15 +-
 .../sls/scheduler/SLSCapacityScheduler.java     |  15 +-
 .../yarn/sls/scheduler/SLSFairScheduler.java    |  12 +-
 .../dev-support/findbugs-exclude.xml            |   8 +
 .../yarn/api/resource/PlacementConstraints.java |  43 +-
 .../hadoop/yarn/conf/YarnConfiguration.java     |   2 +-
 ...SchedulerInvalidResoureRequestException.java |  47 ++
 .../api/impl/TestAMRMClientOnRMRestart.java     |   9 +-
 .../impl/pb/AllocateRequestPBImpl.java          |   1 +
 .../server/scheduler/SchedulerRequestKey.java   |  11 +
 .../resourcemanager/DefaultAMSProcessor.java    |  13 +-
 .../rmapp/attempt/RMAppAttemptImpl.java         |   5 +-
 .../scheduler/AbstractYarnScheduler.java        |   3 +-
 .../scheduler/AppSchedulingInfo.java            | 205 +++++--
 .../ApplicationPlacementAllocatorFactory.java   |  68 +++
 .../scheduler/ApplicationPlacementFactory.java  |  63 ---
 .../scheduler/ContainerUpdateContext.java       |   4 +-
 .../scheduler/SchedulerApplicationAttempt.java  |  20 +-
 .../scheduler/YarnScheduler.java                |  15 +-
 .../scheduler/capacity/CapacityScheduler.java   |  54 +-
 .../CapacitySchedulerConfiguration.java         |   5 +
 .../allocator/RegularContainerAllocator.java    |   3 +-
 .../scheduler/common/ContainerRequest.java      |  12 +
 .../scheduler/common/PendingAsk.java            |   6 +
 .../scheduler/common/fica/FiCaSchedulerApp.java |   6 +
 .../constraint/AllocationTagsManager.java       |  71 +--
 .../constraint/AllocationTagsNamespaces.java    |  31 --
 .../constraint/PlacementConstraintsUtil.java    | 165 ++++--
 .../algorithm/DefaultPlacementAlgorithm.java    |   2 +-
 .../processor/PlacementProcessor.java           |   8 +-
 .../scheduler/fair/FairScheduler.java           |  12 +-
 .../scheduler/fifo/FifoScheduler.java           |   7 +-
 .../placement/AppPlacementAllocator.java        |  66 ++-
 .../LocalityAppPlacementAllocator.java          |  35 +-
 .../SingleConstraintAppPlacementAllocator.java  | 531 +++++++++++++++++++
 .../server/resourcemanager/Application.java     |   9 +-
 .../yarn/server/resourcemanager/MockAM.java     |  51 ++
 .../attempt/TestRMAppAttemptTransitions.java    |  10 +-
 .../rmcontainer/TestRMContainerImpl.java        |   6 +-
 .../scheduler/TestAppSchedulingInfo.java        |   4 +-
 .../capacity/CapacitySchedulerTestBase.java     |  79 +++
 .../capacity/TestCapacityScheduler.java         |  90 +---
 .../TestCapacitySchedulerAsyncScheduling.java   |   2 +-
 .../TestCapacitySchedulerAutoQueueCreation.java |   2 +-
 ...apacitySchedulerSchedulingRequestUpdate.java | 260 +++++++++
 .../capacity/TestIncreaseAllocationExpirer.java |   2 +-
 ...estSchedulingRequestContainerAllocation.java | 277 ++++++++++
 ...hedulingRequestContainerAllocationAsync.java | 139 +++++
 .../scheduler/capacity/TestUtils.java           |   2 +
 .../constraint/TestAllocationTagsManager.java   |  30 +-
 .../TestPlacementConstraintsUtil.java           |  36 +-
 .../scheduler/fair/FairSchedulerTestBase.java   |   6 +-
 .../fair/TestContinuousScheduling.java          |  10 +-
 .../scheduler/fair/TestFairScheduler.java       |  30 +-
 .../scheduler/fifo/TestFifoScheduler.java       |  28 +-
 ...stSingleConstraintAppPlacementAllocator.java | 403 ++++++++++++++
 56 files changed, 2557 insertions(+), 492 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/38af2379/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
index 85e4181..7875917 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
@@ -111,6 +111,7 @@ import org.apache.hadoop.yarn.api.records.NodeReport;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
 import org.apache.hadoop.yarn.client.api.TimelineV2Client;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.Dispatcher;
@@ -1751,6 +1752,7 @@ public class TestRMContainerAllocator {
       super();
       try {
         Configuration conf = new Configuration();
+        init(conf);
         reinitialize(conf, rmContext);
       } catch (IOException ie) {
         LOG.info("add application failed with ", ie);
@@ -1769,8 +1771,8 @@ public class TestRMContainerAllocator {
     @Override
     public synchronized Allocation allocate(
         ApplicationAttemptId applicationAttemptId, List<ResourceRequest> ask,
-        List<ContainerId> release, List<String> blacklistAdditions,
-        List<String> blacklistRemovals,
+        List<SchedulingRequest> schedulingRequests, List<ContainerId> release,
+        List<String> blacklistAdditions, List<String> blacklistRemovals,
         ContainerUpdates updateRequests) {
       List<ResourceRequest> askCopy = new ArrayList<ResourceRequest>();
       for (ResourceRequest req : ask) {
@@ -1785,7 +1787,7 @@ public class TestRMContainerAllocator {
       lastBlacklistAdditions = blacklistAdditions;
       lastBlacklistRemovals = blacklistRemovals;
       Allocation allocation = super.allocate(
-          applicationAttemptId, askCopy, release, blacklistAdditions,
+          applicationAttemptId, askCopy, schedulingRequests, release, blacklistAdditions,
           blacklistRemovals, updateRequests);
       if (forceResourceLimit != null) {
         // Test wants to force the non-default resource limit
@@ -1805,6 +1807,7 @@ public class TestRMContainerAllocator {
       super();
       try {
         Configuration conf = new Configuration();
+        init(conf);
         reinitialize(conf, rmContext);
       } catch (IOException ie) {
         LOG.info("add application failed with ", ie);
@@ -1815,8 +1818,8 @@ public class TestRMContainerAllocator {
     @Override
     public synchronized Allocation allocate(
         ApplicationAttemptId applicationAttemptId, List<ResourceRequest> ask,
-        List<ContainerId> release, List<String> blacklistAdditions,
-        List<String> blacklistRemovals,
+        List<SchedulingRequest> schedulingRequests, List<ContainerId> release,
+        List<String> blacklistAdditions, List<String> blacklistRemovals,
         ContainerUpdates updateRequests) {
       List<ResourceRequest> askCopy = new ArrayList<ResourceRequest>();
       for (ResourceRequest req : ask) {
@@ -1827,7 +1830,7 @@ public class TestRMContainerAllocator {
       }
       SecurityUtil.setTokenServiceUseIp(false);
       Allocation normalAlloc = super.allocate(
-          applicationAttemptId, askCopy, release,
+          applicationAttemptId, askCopy, schedulingRequests, release,
           blacklistAdditions, blacklistRemovals, updateRequests);
       List<Container> containers = normalAlloc.getContainers();
       if(containers.size() > 0) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38af2379/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SLSCapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SLSCapacityScheduler.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SLSCapacityScheduler.java
index 6848b22..35f3ed1 100644
--- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SLSCapacityScheduler.java
+++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SLSCapacityScheduler.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo;
@@ -42,9 +43,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Allocation;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ContainerUpdates;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerAppReport;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplication;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.LeafQueue;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptAddedSchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptRemovedSchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent;
@@ -100,16 +99,17 @@ public class SLSCapacityScheduler extends CapacityScheduler implements
 
   @Override
   public Allocation allocate(ApplicationAttemptId attemptId,
-      List<ResourceRequest> resourceRequests, List<ContainerId> containerIds,
-      List<String> strings, List<String> strings2,
-      ContainerUpdates updateRequests) {
+      List<ResourceRequest> resourceRequests,
+      List<SchedulingRequest> schedulingRequests, List<ContainerId> containerIds,
+      List<String> strings, List<String> strings2, ContainerUpdates updateRequests) {
     if (metricsON) {
       final Timer.Context context = schedulerMetrics.getSchedulerAllocateTimer()
           .time();
       Allocation allocation = null;
       try {
         allocation = super
-            .allocate(attemptId, resourceRequests, containerIds, strings,
+            .allocate(attemptId, resourceRequests, schedulingRequests,
+                containerIds, strings,
                 strings2, updateRequests);
         return allocation;
       } finally {
@@ -123,7 +123,8 @@ public class SLSCapacityScheduler extends CapacityScheduler implements
         }
       }
     } else {
-      return super.allocate(attemptId, resourceRequests, containerIds, strings,
+      return super.allocate(attemptId, resourceRequests, schedulingRequests,
+          containerIds, strings,
           strings2, updateRequests);
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38af2379/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SLSFairScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SLSFairScheduler.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SLSFairScheduler.java
index 8e49c51..c27ab3e 100644
--- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SLSFairScheduler.java
+++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SLSFairScheduler.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo;
@@ -39,8 +40,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptR
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEventType;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSLeafQueue;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSQueue;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
 import org.apache.hadoop.yarn.sls.SLSRunner;
 import org.apache.hadoop.yarn.sls.conf.SLSConfiguration;
@@ -94,7 +93,8 @@ public class SLSFairScheduler extends FairScheduler
 
   @Override
   public Allocation allocate(ApplicationAttemptId attemptId,
-      List<ResourceRequest> resourceRequests, List<ContainerId> containerIds,
+      List<ResourceRequest> resourceRequests,
+      List<SchedulingRequest> schedulingRequests, List<ContainerId> containerIds,
       List<String> blacklistAdditions, List<String> blacklistRemovals,
       ContainerUpdates updateRequests) {
     if (metricsON) {
@@ -102,7 +102,8 @@ public class SLSFairScheduler extends FairScheduler
           .time();
       Allocation allocation = null;
       try {
-        allocation = super.allocate(attemptId, resourceRequests, containerIds,
+        allocation = super.allocate(attemptId, resourceRequests,
+            schedulingRequests, containerIds,
             blacklistAdditions, blacklistRemovals, updateRequests);
         return allocation;
       } finally {
@@ -116,7 +117,8 @@ public class SLSFairScheduler extends FairScheduler
         }
       }
     } else {
-      return super.allocate(attemptId, resourceRequests, containerIds,
+      return super.allocate(attemptId, resourceRequests, schedulingRequests,
+          containerIds,
           blacklistAdditions, blacklistRemovals, updateRequests);
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38af2379/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 6a10312..81b8825 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -650,4 +650,12 @@
     <Method name="equals" />
     <Bug pattern="EQ_OVERRIDING_EQUALS_NOT_SYMMETRIC" />
   </Match>
+
+  <!-- Null pointer exception needs to be ignored here as Findbugs doesn't properly detect code logic -->
+  <Match>
+    <Class name="org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.SingleConstraintAppPlacementAllocator" />
+    <Method name="validateAndSetSchedulingRequest" />
+    <Bug pattern="NP_NULL_ON_SOME_PATH" />
+  </Match>
+
 </FindBugsFilter>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38af2379/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraints.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraints.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraints.java
index c8991cb..ba1beae 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraints.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraints.java
@@ -20,8 +20,12 @@ package org.apache.hadoop.yarn.api.resource;
 
 import java.util.concurrent.TimeUnit;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.resource.PlacementConstraint.AbstractConstraint;
 import org.apache.hadoop.yarn.api.resource.PlacementConstraint.And;
 import org.apache.hadoop.yarn.api.resource.PlacementConstraint.DelayedOr;
@@ -47,6 +51,14 @@ public final class PlacementConstraints {
 
   public static final String NODE = PlacementConstraint.NODE_SCOPE;
   public static final String RACK = PlacementConstraint.RACK_SCOPE;
+  public static final String NODE_PARTITION = "yarn_node_partition/";
+
+  private static final String APPLICATION_LABEL_PREFIX =
+      "yarn_application_label/";
+
+  @InterfaceAudience.Private
+  public static final String APPLICATION_LABEL_INTRA_APPLICATION =
+      APPLICATION_LABEL_PREFIX + "%intra_app%";
 
   /**
    * Creates a constraint that requires allocations to be placed on nodes that
@@ -187,6 +199,20 @@ public final class PlacementConstraints {
     }
 
     /**
+     * Constructs a target expression on a node partition. It is satisfied if
+     * the specified node partition has one of the specified nodePartitions
+     *
+     * @param nodePartitions the set of values that the attribute should take
+     *          values from
+     * @return the resulting expression on the node attribute
+     */
+    public static TargetExpression nodePartition(
+        String... nodePartitions) {
+      return new TargetExpression(TargetType.NODE_ATTRIBUTE, NODE_PARTITION,
+          nodePartitions);
+    }
+
+    /**
      * Constructs a target expression on an allocation tag. It is satisfied if
      * the there are allocations with one of the given tags.
      *
@@ -198,6 +224,22 @@ public final class PlacementConstraints {
       return new TargetExpression(TargetType.ALLOCATION_TAG, null,
           allocationTags);
     }
+
+    /**
+     * Constructs a target expression on an allocation tag. It is satisfied if
+     * the there are allocations with one of the given tags. Comparing to
+     * {@link PlacementTargets#allocationTag(String...)}, this only check tags
+     * within the application.
+     *
+     * @param allocationTags the set of tags that the attribute should take
+     *          values from
+     * @return the resulting expression on the allocation tags
+     */
+    public static TargetExpression allocationTagToIntraApp(
+        String... allocationTags) {
+      return new TargetExpression(TargetType.ALLOCATION_TAG,
+          APPLICATION_LABEL_INTRA_APPLICATION, allocationTags);
+    }
   }
 
   // Creation of compound constraints.
@@ -277,5 +319,4 @@ public final class PlacementConstraints {
   public static PlacementConstraint build(AbstractConstraint constraintExpr) {
     return constraintExpr.build();
   }
-
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38af2379/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 367b1ae..f5bb2c7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -543,7 +543,7 @@ public class YarnConfiguration extends Configuration {
   public static final String RM_PLACEMENT_CONSTRAINTS_ENABLED =
       RM_PREFIX + "placement-constraints.enabled";
 
-  public static final boolean DEFAULT_RM_PLACEMENT_CONSTRAINTS_ENABLED = true;
+  public static final boolean DEFAULT_RM_PLACEMENT_CONSTRAINTS_ENABLED = false;
 
   public static final String RM_PLACEMENT_CONSTRAINTS_RETRY_ATTEMPTS =
       RM_PREFIX + "placement-constraints.retry-attempts";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38af2379/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/SchedulerInvalidResoureRequestException.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/SchedulerInvalidResoureRequestException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/SchedulerInvalidResoureRequestException.java
new file mode 100644
index 0000000..f55ad83
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/SchedulerInvalidResoureRequestException.java
@@ -0,0 +1,47 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.exceptions;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * This exception is thrown when any issue inside scheduler to handle a new or
+ * updated {@link org.apache.hadoop.yarn.api.records.SchedulingRequest}/
+ * {@link org.apache.hadoop.yarn.api.records.ResourceRequest} add to the
+ * scheduler.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class SchedulerInvalidResoureRequestException extends YarnRuntimeException {
+  private static final long serialVersionUID = 10081123982L;
+
+  public SchedulerInvalidResoureRequestException(String message) {
+    super(message);
+  }
+
+  public SchedulerInvalidResoureRequestException(Throwable cause) {
+    super(cause);
+  }
+
+  public SchedulerInvalidResoureRequestException(String message,
+      Throwable cause) {
+    super(message, cause);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38af2379/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientOnRMRestart.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientOnRMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientOnRMRestart.java
index 337d7d4..11d703d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientOnRMRestart.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientOnRMRestart.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
 import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
 import org.apache.hadoop.yarn.client.api.AMRMClient;
 import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest;
@@ -545,6 +546,7 @@ public class TestAMRMClientOnRMRestart {
       super();
       try {
         Configuration conf = new Configuration();
+        init(conf);
         reinitialize(conf, rmContext);
       } catch (IOException ie) {
         assert (false);
@@ -563,8 +565,8 @@ public class TestAMRMClientOnRMRestart {
     @Override
     public synchronized Allocation allocate(
         ApplicationAttemptId applicationAttemptId, List<ResourceRequest> ask,
-        List<ContainerId> release, List<String> blacklistAdditions,
-        List<String> blacklistRemovals,
+        List<SchedulingRequest> schedulingRequests, List<ContainerId> release,
+        List<String> blacklistAdditions, List<String> blacklistRemovals,
         ContainerUpdates updateRequests) {
       List<ResourceRequest> askCopy = new ArrayList<ResourceRequest>();
       for (ResourceRequest req : ask) {
@@ -580,7 +582,8 @@ public class TestAMRMClientOnRMRestart {
       lastDecrease = updateRequests.getDecreaseRequests();
       lastBlacklistAdditions = blacklistAdditions;
       lastBlacklistRemovals = blacklistRemovals;
-      return super.allocate(applicationAttemptId, askCopy, release,
+      return super.allocate(applicationAttemptId, askCopy, schedulingRequests,
+          release,
           blacklistAdditions, blacklistRemovals, updateRequests);
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38af2379/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateRequestPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateRequestPBImpl.java
index b460044..50672a3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateRequestPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateRequestPBImpl.java
@@ -194,6 +194,7 @@ public class AllocateRequestPBImpl extends AllocateRequest {
   public void setSchedulingRequests(
       List<SchedulingRequest> schedulingRequests) {
     if (schedulingRequests == null) {
+      builder.clearSchedulingRequests();
       return;
     }
     initSchedulingRequests();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38af2379/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/SchedulerRequestKey.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/SchedulerRequestKey.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/SchedulerRequestKey.java
index c4f37f6..0fce083 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/SchedulerRequestKey.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/SchedulerRequestKey.java
@@ -22,6 +22,7 @@ import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
 import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
 
 /**
@@ -45,6 +46,16 @@ public final class SchedulerRequestKey implements
         req.getAllocationRequestId(), null);
   }
 
+  /**
+   * Factory method to generate a SchedulerRequestKey from a SchedulingRequest.
+   * @param req SchedulingRequest
+   * @return SchedulerRequestKey
+   */
+  public static SchedulerRequestKey create(SchedulingRequest req) {
+    return new SchedulerRequestKey(req.getPriority(),
+        req.getAllocationRequestId(), null);
+  }
+
   public static SchedulerRequestKey create(UpdateContainerRequest req,
       SchedulerRequestKey schedulerRequestKey) {
     return new SchedulerRequestKey(schedulerRequestKey.getPriority(),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38af2379/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java
index 713947f..18ab473 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java
@@ -53,6 +53,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.InvalidContainerReleaseException;
 import org.apache.hadoop.yarn.exceptions.InvalidResourceBlacklistRequestException;
 import org.apache.hadoop.yarn.exceptions.InvalidResourceRequestException;
+import org.apache.hadoop.yarn.exceptions.SchedulerInvalidResoureRequestException;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
@@ -273,10 +274,14 @@ final class DefaultAMSProcessor implements ApplicationMasterServiceProcessor {
           " state, ignore container allocate request.");
       allocation = EMPTY_ALLOCATION;
     } else {
-      allocation =
-          getScheduler().allocate(appAttemptId, ask, release,
-              blacklistAdditions, blacklistRemovals,
-              containerUpdateRequests);
+      try {
+        allocation = getScheduler().allocate(appAttemptId, ask,
+            request.getSchedulingRequests(), release,
+            blacklistAdditions, blacklistRemovals, containerUpdateRequests);
+      } catch (SchedulerInvalidResoureRequestException e) {
+        LOG.warn("Exceptions caught when scheduler handling requests");
+        throw new YarnException(e);
+      }
     }
 
     if (!blacklistAdditions.isEmpty() || !blacklistRemovals.isEmpty()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38af2379/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
index cf10be4..8c2f4e4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
@@ -1113,8 +1113,7 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable {
         Allocation amContainerAllocation =
             appAttempt.scheduler.allocate(
                 appAttempt.applicationAttemptId,
-                appAttempt.amReqs,
-                EMPTY_CONTAINER_RELEASE_LIST,
+                appAttempt.amReqs, null, EMPTY_CONTAINER_RELEASE_LIST,
                 amBlacklist.getBlacklistAdditions(),
                 amBlacklist.getBlacklistRemovals(),
                 new ContainerUpdates());
@@ -1140,7 +1139,7 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable {
       // Acquire the AM container from the scheduler.
       Allocation amContainerAllocation =
           appAttempt.scheduler.allocate(appAttempt.applicationAttemptId,
-            EMPTY_CONTAINER_REQUEST_LIST, EMPTY_CONTAINER_RELEASE_LIST, null,
+            EMPTY_CONTAINER_REQUEST_LIST, null, EMPTY_CONTAINER_RELEASE_LIST, null,
             null, new ContainerUpdates());
       // There must be at least one container allocated, because a
       // CONTAINER_ALLOCATED is emitted after an RMContainer is constructed,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38af2379/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
index 72376df..7f81f00 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
@@ -53,6 +53,7 @@ import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceOption;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.ResourceSizing;
 import org.apache.hadoop.yarn.api.records.SchedulingRequest;
 import org.apache.hadoop.yarn.api.records.UpdateContainerError;
 import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
@@ -1155,7 +1156,7 @@ public abstract class AbstractYarnScheduler
    *
    * @param asks resource requests
    */
-  protected void normalizeRequests(List<ResourceRequest> asks) {
+  protected void normalizeResourceRequests(List<ResourceRequest> asks) {
     for (ResourceRequest ask: asks) {
       ask.setCapability(getNormalizedResource(ask.getCapability()));
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38af2379/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
index 8858d3b..7d6f233 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
@@ -41,6 +41,8 @@ import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ExecutionType;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerState;
@@ -49,7 +51,9 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.Applicatio
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.ContainerRequest;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.PendingAsk;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.AppPlacementAllocator;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.LocalityAppPlacementAllocator;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.PendingAskUpdateResult;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.SingleConstraintAppPlacementAllocator;
 import org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey;
 import org.apache.hadoop.yarn.util.resource.Resources;
 /**
@@ -91,11 +95,12 @@ public class AppSchedulingInfo {
 
   public final ContainerUpdateContext updateContext;
   public final Map<String, String> applicationSchedulingEnvs = new HashMap<>();
+  private final RMContext rmContext;
 
   public AppSchedulingInfo(ApplicationAttemptId appAttemptId, String user,
       Queue queue, AbstractUsersManager abstractUsersManager, long epoch,
       ResourceUsage appResourceUsage,
-      Map<String, String> applicationSchedulingEnvs) {
+      Map<String, String> applicationSchedulingEnvs, RMContext rmContext) {
     this.applicationAttemptId = appAttemptId;
     this.applicationId = appAttemptId.getApplicationId();
     this.queue = queue;
@@ -105,6 +110,7 @@ public class AppSchedulingInfo {
         epoch << ResourceManager.EPOCH_BIT_SHIFT);
     this.appResourceUsage = appResourceUsage;
     this.applicationSchedulingEnvs.putAll(applicationSchedulingEnvs);
+    this.rmContext = rmContext;
 
     ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
     updateContext = new ContainerUpdateContext(this);
@@ -163,74 +169,153 @@ public class AppSchedulingInfo {
    * application, by asking for more resources and releasing resources acquired
    * by the application.
    *
-   * @param requests
-   *          resources to be acquired
+   * @param resourceRequests resource requests to be allocated
    * @param recoverPreemptedRequestForAContainer
-   *          recover ResourceRequest on preemption
+   *          recover ResourceRequest/SchedulingRequest on preemption
    * @return true if any resource was updated, false otherwise
    */
-  public boolean updateResourceRequests(List<ResourceRequest> requests,
+  public boolean updateResourceRequests(List<ResourceRequest> resourceRequests,
       boolean recoverPreemptedRequestForAContainer) {
-    if (null == requests || requests.isEmpty()) {
-      return false;
+    // Flag to track if any incoming requests update "ANY" requests
+    boolean offswitchResourcesUpdated;
+
+    writeLock.lock();
+    try {
+      // Update AppPlacementAllocator by requests
+      offswitchResourcesUpdated = internalAddResourceRequests(
+          recoverPreemptedRequestForAContainer, resourceRequests);
+    } finally {
+      writeLock.unlock();
     }
 
+    return offswitchResourcesUpdated;
+  }
+
+  /**
+   * The ApplicationMaster is updating resource requirements for the
+   * application, by asking for more resources and releasing resources acquired
+   * by the application.
+   *
+   * @param dedupRequests (dedup) resource requests to be allocated
+   * @param recoverPreemptedRequestForAContainer
+   *          recover ResourceRequest/SchedulingRequest on preemption
+   * @return true if any resource was updated, false otherwise
+   */
+  public boolean updateResourceRequests(
+      Map<SchedulerRequestKey, Map<String, ResourceRequest>> dedupRequests,
+      boolean recoverPreemptedRequestForAContainer) {
     // Flag to track if any incoming requests update "ANY" requests
-    boolean offswitchResourcesUpdated = false;
+    boolean offswitchResourcesUpdated;
 
+    writeLock.lock();
     try {
-      this.writeLock.lock();
-
-      // A map to group resource requests and dedup
-      Map<SchedulerRequestKey, Map<String, ResourceRequest>> dedupRequests =
-          new HashMap<>();
+      // Update AppPlacementAllocator by requests
+      offswitchResourcesUpdated = internalAddResourceRequests(
+          recoverPreemptedRequestForAContainer, dedupRequests);
+    } finally {
+      writeLock.unlock();
+    }
 
-      // Group resource request by schedulerRequestKey and resourceName
-      for (ResourceRequest request : requests) {
-        SchedulerRequestKey schedulerKey = SchedulerRequestKey.create(request);
-        if (!dedupRequests.containsKey(schedulerKey)) {
-          dedupRequests.put(schedulerKey, new HashMap<>());
-        }
-        dedupRequests.get(schedulerKey).put(request.getResourceName(), request);
-      }
+    return offswitchResourcesUpdated;
+  }
 
-      // Update AppPlacementAllocator by dedup requests.
-      offswitchResourcesUpdated =
-          addRequestToAppPlacement(
-              recoverPreemptedRequestForAContainer, dedupRequests);
+  /**
+   * The ApplicationMaster is updating resource requirements for the
+   * application, by asking for more resources and releasing resources acquired
+   * by the application.
+   *
+   * @param schedulingRequests resource requests to be allocated
+   * @param recoverPreemptedRequestForAContainer
+   *          recover ResourceRequest/SchedulingRequest on preemption
+   * @return true if any resource was updated, false otherwise
+   */
+  public boolean updateSchedulingRequests(
+      List<SchedulingRequest> schedulingRequests,
+      boolean recoverPreemptedRequestForAContainer) {
+    // Flag to track if any incoming requests update "ANY" requests
+    boolean offswitchResourcesUpdated;
 
-      return offswitchResourcesUpdated;
+    writeLock.lock();
+    try {
+      // Update AppPlacementAllocator by requests
+      offswitchResourcesUpdated = addSchedulingRequests(
+          recoverPreemptedRequestForAContainer, schedulingRequests);
     } finally {
-      this.writeLock.unlock();
+      writeLock.unlock();
     }
+
+    return offswitchResourcesUpdated;
   }
 
   public void removeAppPlacement(SchedulerRequestKey schedulerRequestKey) {
     schedulerKeyToAppPlacementAllocator.remove(schedulerRequestKey);
   }
 
-  boolean addRequestToAppPlacement(
+  private boolean addSchedulingRequests(
+      boolean recoverPreemptedRequestForAContainer,
+      List<SchedulingRequest> schedulingRequests) {
+    // Do we need to update pending resource for app/queue, etc.?
+    boolean requireUpdatePendingResource = false;
+
+    for (SchedulingRequest request : schedulingRequests) {
+      SchedulerRequestKey schedulerRequestKey = SchedulerRequestKey.create(
+          request);
+
+      AppPlacementAllocator appPlacementAllocator =
+          getAndAddAppPlacementAllocatorIfNotExist(schedulerRequestKey,
+              SingleConstraintAppPlacementAllocator.class.getCanonicalName());
+
+      // Update AppPlacementAllocator
+      PendingAskUpdateResult pendingAmountChanges =
+          appPlacementAllocator.updatePendingAsk(schedulerRequestKey,
+              request, recoverPreemptedRequestForAContainer);
+
+      if (null != pendingAmountChanges) {
+        updatePendingResources(pendingAmountChanges, schedulerRequestKey,
+            queue.getMetrics());
+        requireUpdatePendingResource = true;
+      }
+    }
+
+    return requireUpdatePendingResource;
+  }
+
+  /**
+   * Get and insert AppPlacementAllocator if it doesn't exist, this should be
+   * protected by write lock.
+   * @param schedulerRequestKey schedulerRequestKey
+   * @param placementTypeClass placementTypeClass
+   * @return AppPlacementAllocator
+   */
+  private AppPlacementAllocator<SchedulerNode> getAndAddAppPlacementAllocatorIfNotExist(
+      SchedulerRequestKey schedulerRequestKey, String placementTypeClass) {
+    AppPlacementAllocator<SchedulerNode> appPlacementAllocator;
+    if ((appPlacementAllocator = schedulerKeyToAppPlacementAllocator.get(
+        schedulerRequestKey)) == null) {
+      appPlacementAllocator =
+          ApplicationPlacementAllocatorFactory.getAppPlacementAllocator(
+              placementTypeClass, this, schedulerRequestKey, rmContext);
+      schedulerKeyToAppPlacementAllocator.put(schedulerRequestKey,
+          appPlacementAllocator);
+    }
+    return appPlacementAllocator;
+  }
+
+  private boolean internalAddResourceRequests(
       boolean recoverPreemptedRequestForAContainer,
       Map<SchedulerRequestKey, Map<String, ResourceRequest>> dedupRequests) {
     boolean offswitchResourcesUpdated = false;
     for (Map.Entry<SchedulerRequestKey, Map<String, ResourceRequest>> entry :
     dedupRequests.entrySet()) {
       SchedulerRequestKey schedulerRequestKey = entry.getKey();
-
-      if (!schedulerKeyToAppPlacementAllocator
-          .containsKey(schedulerRequestKey)) {
-        AppPlacementAllocator<SchedulerNode> placementAllocatorInstance = ApplicationPlacementFactory
-            .getAppPlacementAllocator(applicationSchedulingEnvs
-                .get(ApplicationSchedulingConfig.ENV_APPLICATION_PLACEMENT_TYPE_CLASS));
-        placementAllocatorInstance.setAppSchedulingInfo(this);
-
-        schedulerKeyToAppPlacementAllocator.put(schedulerRequestKey,
-            placementAllocatorInstance);
-      }
+      AppPlacementAllocator<SchedulerNode> appPlacementAllocator =
+          getAndAddAppPlacementAllocatorIfNotExist(schedulerRequestKey,
+              applicationSchedulingEnvs.get(
+                  ApplicationSchedulingConfig.ENV_APPLICATION_PLACEMENT_TYPE_CLASS));
 
       // Update AppPlacementAllocator
-      PendingAskUpdateResult pendingAmountChanges = schedulerKeyToAppPlacementAllocator
-          .get(schedulerRequestKey).updatePendingAsk(entry.getValue().values(),
+      PendingAskUpdateResult pendingAmountChanges =
+          appPlacementAllocator.updatePendingAsk(entry.getValue().values(),
               recoverPreemptedRequestForAContainer);
 
       if (null != pendingAmountChanges) {
@@ -242,6 +327,29 @@ public class AppSchedulingInfo {
     return offswitchResourcesUpdated;
   }
 
+  private boolean internalAddResourceRequests(boolean recoverPreemptedRequestForAContainer,
+      List<ResourceRequest> resourceRequests) {
+    if (null == resourceRequests || resourceRequests.isEmpty()) {
+      return false;
+    }
+
+    // A map to group resource requests and dedup
+    Map<SchedulerRequestKey, Map<String, ResourceRequest>> dedupRequests =
+        new HashMap<>();
+
+    // Group resource request by schedulerRequestKey and resourceName
+    for (ResourceRequest request : resourceRequests) {
+      SchedulerRequestKey schedulerKey = SchedulerRequestKey.create(request);
+      if (!dedupRequests.containsKey(schedulerKey)) {
+        dedupRequests.put(schedulerKey, new HashMap<>());
+      }
+      dedupRequests.get(schedulerKey).put(request.getResourceName(), request);
+    }
+
+    return internalAddResourceRequests(recoverPreemptedRequestForAContainer,
+        dedupRequests);
+  }
+
   private void updatePendingResources(PendingAskUpdateResult updateResult,
       SchedulerRequestKey schedulerKey, QueueMetrics metrics) {
 
@@ -629,13 +737,22 @@ public class AppSchedulingInfo {
     }
   }
 
-  public boolean acceptNodePartition(SchedulerRequestKey schedulerKey,
-      String nodePartition, SchedulingMode schedulingMode) {
+  /**
+   * Pre-check node to see if it satisfy the given schedulerKey and
+   * scheduler mode
+   *
+   * @param schedulerKey schedulerKey
+   * @param schedulerNode schedulerNode
+   * @param schedulingMode schedulingMode
+   * @return can use the node or not.
+   */
+  public boolean precheckNode(SchedulerRequestKey schedulerKey,
+      SchedulerNode schedulerNode, SchedulingMode schedulingMode) {
     try {
       this.readLock.lock();
       AppPlacementAllocator ap =
           schedulerKeyToAppPlacementAllocator.get(schedulerKey);
-      return (ap != null) && ap.acceptNodePartition(nodePartition,
+      return (ap != null) && ap.precheckNode(schedulerNode,
           schedulingMode);
     } finally {
       this.readLock.unlock();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38af2379/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ApplicationPlacementAllocatorFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ApplicationPlacementAllocatorFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ApplicationPlacementAllocatorFactory.java
new file mode 100644
index 0000000..a4e5484
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ApplicationPlacementAllocatorFactory.java
@@ -0,0 +1,68 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.ApplicationSchedulingConfig;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.AppPlacementAllocator;
+import org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey;
+
+/**
+ * Factory class to build various application placement policies.
+ */
+@Public
+@Unstable
+public class ApplicationPlacementAllocatorFactory {
+
+  /**
+   * Get AppPlacementAllocator related to the placement type requested.
+   *
+   * @param appPlacementAllocatorName
+   *          allocator class name.
+   * @return Specific AppPlacementAllocator instance based on type
+   */
+  public static AppPlacementAllocator<SchedulerNode> getAppPlacementAllocator(
+      String appPlacementAllocatorName, AppSchedulingInfo appSchedulingInfo,
+      SchedulerRequestKey schedulerRequestKey, RMContext rmContext) {
+    Class<?> policyClass;
+    try {
+      if (appPlacementAllocatorName == null) {
+        policyClass = ApplicationSchedulingConfig.DEFAULT_APPLICATION_PLACEMENT_TYPE_CLASS;
+      } else {
+        policyClass = Class.forName(appPlacementAllocatorName);
+      }
+    } catch (ClassNotFoundException e) {
+      policyClass = ApplicationSchedulingConfig.DEFAULT_APPLICATION_PLACEMENT_TYPE_CLASS;
+    }
+
+    if (!AppPlacementAllocator.class.isAssignableFrom(policyClass)) {
+      policyClass = ApplicationSchedulingConfig.DEFAULT_APPLICATION_PLACEMENT_TYPE_CLASS;
+    }
+
+    @SuppressWarnings("unchecked")
+    AppPlacementAllocator<SchedulerNode> placementAllocatorInstance = (AppPlacementAllocator<SchedulerNode>) ReflectionUtils
+        .newInstance(policyClass, null);
+    placementAllocatorInstance.initialize(appSchedulingInfo,
+        schedulerRequestKey, rmContext);
+    return placementAllocatorInstance;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38af2379/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ApplicationPlacementFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ApplicationPlacementFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ApplicationPlacementFactory.java
deleted file mode 100644
index 40c8d05..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ApplicationPlacementFactory.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
-
-import org.apache.hadoop.classification.InterfaceAudience.Public;
-import org.apache.hadoop.classification.InterfaceStability.Unstable;
-import org.apache.hadoop.util.ReflectionUtils;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.ApplicationSchedulingConfig;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.AppPlacementAllocator;
-
-/**
- * Factory class to build various application placement policies.
- */
-@Public
-@Unstable
-public class ApplicationPlacementFactory {
-
-  /**
-   * Get AppPlacementAllocator related to the placement type requested.
-   *
-   * @param appPlacementAllocatorName
-   *          allocator class name.
-   * @return Specific AppPlacementAllocator instance based on type
-   */
-  public static AppPlacementAllocator<SchedulerNode> getAppPlacementAllocator(
-      String appPlacementAllocatorName) {
-    Class<?> policyClass;
-    try {
-      if (appPlacementAllocatorName == null) {
-        policyClass = ApplicationSchedulingConfig.DEFAULT_APPLICATION_PLACEMENT_TYPE_CLASS;
-      } else {
-        policyClass = Class.forName(appPlacementAllocatorName);
-      }
-    } catch (ClassNotFoundException e) {
-      policyClass = ApplicationSchedulingConfig.DEFAULT_APPLICATION_PLACEMENT_TYPE_CLASS;
-    }
-
-    if (!AppPlacementAllocator.class.isAssignableFrom(policyClass)) {
-      policyClass = ApplicationSchedulingConfig.DEFAULT_APPLICATION_PLACEMENT_TYPE_CLASS;
-    }
-
-    @SuppressWarnings("unchecked")
-    AppPlacementAllocator<SchedulerNode> placementAllocatorInstance = (AppPlacementAllocator<SchedulerNode>) ReflectionUtils
-        .newInstance(policyClass, null);
-    return placementAllocatorInstance;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38af2379/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ContainerUpdateContext.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ContainerUpdateContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ContainerUpdateContext.java
index f410db1..491a9ce 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ContainerUpdateContext.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ContainerUpdateContext.java
@@ -146,7 +146,7 @@ public class ContainerUpdateContext {
           createResourceRequests(rmContainer, schedulerNode,
               schedulerKey, resToIncrease);
       updateResReqs.put(schedulerKey, resMap);
-      appSchedulingInfo.addRequestToAppPlacement(false, updateResReqs);
+      appSchedulingInfo.updateResourceRequests(updateResReqs, false);
     }
     return true;
   }
@@ -290,7 +290,7 @@ public class ContainerUpdateContext {
           (rmContainer, node, schedulerKey,
           rmContainer.getContainer().getResource());
       reqsToUpdate.put(schedulerKey, resMap);
-      appSchedulingInfo.addRequestToAppPlacement(true, reqsToUpdate);
+      appSchedulingInfo.updateResourceRequests(reqsToUpdate, true);
       return UNDEFINED;
     }
     return retVal;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38af2379/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
index 3930a35..753c2b8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
@@ -56,6 +56,7 @@ import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceInformation;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
 import org.apache.hadoop.yarn.api.records.UpdateContainerError;
 import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager;
 import org.apache.hadoop.yarn.server.api.ContainerType;
@@ -231,7 +232,7 @@ public class SchedulerApplicationAttempt implements SchedulableEntity {
 
     this.appSchedulingInfo = new AppSchedulingInfo(applicationAttemptId, user,
         queue, abstractUsersManager, rmContext.getEpoch(), attemptResourceUsage,
-        applicationSchedulingEnvs);
+        applicationSchedulingEnvs, rmContext);
     ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
     readLock = lock.readLock();
     writeLock = lock.writeLock();
@@ -451,6 +452,23 @@ public class SchedulerApplicationAttempt implements SchedulableEntity {
       writeLock.unlock();
     }
   }
+
+  public boolean updateSchedulingRequests(
+      List<SchedulingRequest> requests) {
+    if (requests == null) {
+      return false;
+    }
+
+    try {
+      writeLock.lock();
+      if (!isStopped) {
+        return appSchedulingInfo.updateSchedulingRequests(requests, false);
+      }
+      return false;
+    } finally {
+      writeLock.unlock();
+    }
+  }
   
   public void recoverResourceRequestsForContainer(
       ContainerRequest containerRequest) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38af2379/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/YarnScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/YarnScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/YarnScheduler.java
index 93ca7c2..43d55c4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/YarnScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/YarnScheduler.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.yarn.api.records.QueueInfo;
 import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
 import org.apache.hadoop.yarn.exceptions.YarnException;
@@ -132,18 +133,18 @@ public interface YarnScheduler extends EventHandler<SchedulerEvent> {
    * 
    * @param appAttemptId
    * @param ask
+   * @param schedulingRequests
    * @param release
-   * @param blacklistAdditions 
-   * @param blacklistRemovals 
-   * @param updateRequests
-   * @return the {@link Allocation} for the application
+   * @param blacklistAdditions
+   * @param blacklistRemovals
+   * @param updateRequests     @return the {@link Allocation} for the application
    */
   @Public
   @Stable
   Allocation allocate(ApplicationAttemptId appAttemptId,
-      List<ResourceRequest> ask, List<ContainerId> release,
-      List<String> blacklistAdditions, List<String> blacklistRemovals,
-      ContainerUpdates updateRequests);
+      List<ResourceRequest> ask, List<SchedulingRequest> schedulingRequests,
+      List<ContainerId> release, List<String> blacklistAdditions,
+      List<String> blacklistRemovals, ContainerUpdates updateRequests);
 
   /**
    * Get node resource usage report.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38af2379/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index d2713c8..c713036 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -60,8 +60,11 @@ import org.apache.hadoop.yarn.api.records.ReservationId;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceOption;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.ResourceSizing;
 import org.apache.hadoop.yarn.api.records.SchedulingRequest;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.InvalidResourceRequestException;
+import org.apache.hadoop.yarn.exceptions.SchedulerInvalidResoureRequestException;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.proto.YarnServiceProtos.SchedulerResourceTypes;
@@ -1058,12 +1061,29 @@ public class CapacityScheduler extends
     }
   }
 
+  /**
+   * Normalize a list of SchedulingRequest
+   *
+   * @param asks scheduling request
+   */
+  private void normalizeSchedulingRequests(List<SchedulingRequest> asks) {
+    if (asks == null) {
+      return;
+    }
+    for (SchedulingRequest ask: asks) {
+      ResourceSizing sizing = ask.getResourceSizing();
+      if (sizing != null && sizing.getResources() != null) {
+        sizing.setResources(getNormalizedResource(sizing.getResources()));
+      }
+    }
+  }
+
   @Override
   @Lock(Lock.NoLock.class)
   public Allocation allocate(ApplicationAttemptId applicationAttemptId,
-      List<ResourceRequest> ask, List<ContainerId> release,
-      List<String> blacklistAdditions, List<String> blacklistRemovals,
-      ContainerUpdates updateRequests) {
+      List<ResourceRequest> ask, List<SchedulingRequest> schedulingRequests,
+      List<ContainerId> release, List<String> blacklistAdditions,
+      List<String> blacklistRemovals, ContainerUpdates updateRequests) {
     FiCaSchedulerApp application = getApplicationAttempt(applicationAttemptId);
     if (application == null) {
       LOG.error("Calling allocate on removed or non existent application " +
@@ -1071,6 +1091,18 @@ public class CapacityScheduler extends
       return EMPTY_ALLOCATION;
     }
 
+    if ((!getConfiguration().getBoolean(
+        CapacitySchedulerConfiguration.SCHEDULING_REQUEST_ALLOWED,
+        CapacitySchedulerConfiguration.DEFAULT_SCHEDULING_REQUEST_ALLOWED))
+        && schedulingRequests != null && (!schedulingRequests.isEmpty())) {
+      throw new SchedulerInvalidResoureRequestException(
+          "Application attempt:" + applicationAttemptId
+              + " is using SchedulingRequest, which is disabled. Please update "
+              + CapacitySchedulerConfiguration.SCHEDULING_REQUEST_ALLOWED
+              + " to true in capacity-scheduler.xml in order to use this "
+              + "feature.");
+    }
+
     // The allocate may be the leftover from previous attempt, and it will
     // impact current attempt, such as confuse the request and allocation for
     // current attempt's AM container.
@@ -1091,7 +1123,10 @@ public class CapacityScheduler extends
     LeafQueue updateDemandForQueue = null;
 
     // Sanity check for new allocation requests
-    normalizeRequests(ask);
+    normalizeResourceRequests(ask);
+
+    // Normalize scheduling requests
+    normalizeSchedulingRequests(schedulingRequests);
 
     Allocation allocation;
 
@@ -1104,7 +1139,8 @@ public class CapacityScheduler extends
       }
 
       // Process resource requests
-      if (!ask.isEmpty()) {
+      if (!ask.isEmpty() || (schedulingRequests != null && !schedulingRequests
+          .isEmpty())) {
         if (LOG.isDebugEnabled()) {
           LOG.debug(
               "allocate: pre-update " + applicationAttemptId + " ask size ="
@@ -1113,7 +1149,8 @@ public class CapacityScheduler extends
         }
 
         // Update application requests
-        if (application.updateResourceRequests(ask)) {
+        if (application.updateResourceRequests(ask) || application
+            .updateSchedulingRequests(schedulingRequests)) {
           updateDemandForQueue = (LeafQueue) application.getQueue();
         }
 
@@ -2580,10 +2617,9 @@ public class CapacityScheduler extends
         // Validate placement constraint is satisfied before
         // committing the request.
         try {
-          if (!PlacementConstraintsUtil.canSatisfyConstraints(
+          if (!PlacementConstraintsUtil.canSatisfySingleConstraint(
               appAttempt.getApplicationId(),
-              schedulingRequest.getAllocationTags(),
-              schedulerNode,
+              schedulingRequest.getAllocationTags(), schedulerNode,
               rmContext.getPlacementConstraintManager(),
               rmContext.getAllocationTagsManager())) {
             LOG.debug("Failed to allocate container for application "

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38af2379/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
index e609be9..00733a1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
@@ -77,6 +77,11 @@ public class CapacitySchedulerConfiguration extends ReservationSchedulerConfigur
   
   @Private
   public static final String PREFIX = "yarn.scheduler.capacity.";
+
+  @Private
+  public static final String SCHEDULING_REQUEST_ALLOWED =
+      PREFIX + "scheduling-request.allowed";
+  public static final boolean DEFAULT_SCHEDULING_REQUEST_ALLOWED = false;
   
   @Private
   public static final String DOT = ".";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38af2379/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java
index 2642532..afa468b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java
@@ -143,8 +143,7 @@ public class RegularContainerAllocator extends AbstractContainerAllocator {
 
     // Is the nodePartition of pending request matches the node's partition
     // If not match, jump to next priority.
-    if (!appInfo.acceptNodePartition(schedulerKey, node.getPartition(),
-        schedulingMode)) {
+    if (!appInfo.precheckNode(schedulerKey, node, schedulingMode)) {
       ActivitiesLogger.APP.recordSkippedAppActivityWithoutAllocation(
           activitiesManager, node, application, priority,
           ActivityDiagnosticConstant.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38af2379/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/ContainerRequest.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/ContainerRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/ContainerRequest.java
index 075db79..cad15a0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/ContainerRequest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/ContainerRequest.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.common;
 
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
 
 import java.util.List;
 
@@ -43,12 +44,23 @@ import java.util.List;
  */
 public class ContainerRequest {
   private List<ResourceRequest> requests;
+  private SchedulingRequest schedulingRequest;
 
   public ContainerRequest(List<ResourceRequest> requests) {
     this.requests = requests;
+    schedulingRequest = null;
+  }
+
+  public ContainerRequest(SchedulingRequest schedulingRequest) {
+    this.schedulingRequest = schedulingRequest;
+    this.requests = null;
   }
 
   public List<ResourceRequest> getResourceRequests() {
     return requests;
   }
+
+  public SchedulingRequest getSchedulingRequest() {
+    return schedulingRequest;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38af2379/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/PendingAsk.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/PendingAsk.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/PendingAsk.java
index 85d8715..2ed3e83 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/PendingAsk.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/PendingAsk.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.common;
 
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceSizing;
 import org.apache.hadoop.yarn.util.resource.Resources;
 
 /**
@@ -31,6 +32,11 @@ public class PendingAsk {
   private final int count;
   public final static PendingAsk ZERO = new PendingAsk(Resources.none(), 0);
 
+  public PendingAsk(ResourceSizing sizing) {
+    this.perAllocationResource = sizing.getResources();
+    this.count = sizing.getNumAllocations();
+  }
+
   public PendingAsk(Resource res, int num) {
     this.perAllocationResource = res;
     this.count = num;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38af2379/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
index 4ea0347..7eb1e31 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
@@ -542,6 +542,12 @@ public class FiCaSchedulerApp extends SchedulerApplicationAttempt {
                 schedulerContainer.getRmContainer().getContainer());
             ((RMContainerImpl) rmContainer).setContainerRequest(
                 containerRequest);
+
+            // If this is from a SchedulingRequest, set allocation tags.
+            if (containerRequest.getSchedulingRequest() != null) {
+              ((RMContainerImpl) rmContainer).setAllocationTags(
+                  containerRequest.getSchedulingRequest().getAllocationTags());
+            }
           }
 
           attemptResourceUsage.incUsed(schedulerContainer.getNodePartition(),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38af2379/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
index 4bb3e79..962e548 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.SchedulingRequest;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraints;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.log4j.Logger;
 
@@ -287,21 +288,15 @@ public class AllocationTagsManager {
    *                       {@link SchedulingRequest#getAllocationTags()}
    *                       application_id will be added to allocationTags.
    */
+  @SuppressWarnings("unchecked")
   public void addContainer(NodeId nodeId, ContainerId containerId,
       Set<String> allocationTags) {
+    // Do nothing for empty allocation tags.
+    if (allocationTags == null || allocationTags.isEmpty()) {
+      return;
+    }
     ApplicationId applicationId =
         containerId.getApplicationAttemptId().getApplicationId();
-    String applicationIdTag =
-        AllocationTagsNamespaces.APP_ID + applicationId.toString();
-
-    boolean useSet = false;
-    if (allocationTags != null && !allocationTags.isEmpty()) {
-      // Copy before edit it.
-      allocationTags = new HashSet<>(allocationTags);
-      allocationTags.add(applicationIdTag);
-      useSet = true;
-    }
-
     writeLock.lock();
     try {
       TypeToCountedTags perAppTagsMapping = perAppNodeMappings
@@ -311,19 +306,12 @@ public class AllocationTagsManager {
       // Covering test-cases where context is mocked
       String nodeRack = (rmContext.getRMNodes() != null
           && rmContext.getRMNodes().get(nodeId) != null)
-              ? rmContext.getRMNodes().get(nodeId).getRackName()
-              : "default-rack";
-      if (useSet) {
-        perAppTagsMapping.addTags(nodeId, allocationTags);
-        perAppRackTagsMapping.addTags(nodeRack, allocationTags);
-        globalNodeMapping.addTags(nodeId, allocationTags);
-        globalRackMapping.addTags(nodeRack, allocationTags);
-      } else {
-        perAppTagsMapping.addTag(nodeId, applicationIdTag);
-        perAppRackTagsMapping.addTag(nodeRack, applicationIdTag);
-        globalNodeMapping.addTag(nodeId, applicationIdTag);
-        globalRackMapping.addTag(nodeRack, applicationIdTag);
-      }
+              ? rmContext.getRMNodes().get(nodeId).getRackName() :
+          "default-rack";
+      perAppTagsMapping.addTags(nodeId, allocationTags);
+      perAppRackTagsMapping.addTags(nodeRack, allocationTags);
+      globalNodeMapping.addTags(nodeId, allocationTags);
+      globalRackMapping.addTags(nodeRack, allocationTags);
 
       if (LOG.isDebugEnabled()) {
         LOG.debug("Added container=" + containerId + " with tags=["
@@ -341,20 +329,15 @@ public class AllocationTagsManager {
    * @param containerId    containerId.
    * @param allocationTags allocation tags for given container
    */
+  @SuppressWarnings("unchecked")
   public void removeContainer(NodeId nodeId,
       ContainerId containerId, Set<String> allocationTags) {
+    // Do nothing for empty allocation tags.
+    if (allocationTags == null || allocationTags.isEmpty()) {
+      return;
+    }
     ApplicationId applicationId =
         containerId.getApplicationAttemptId().getApplicationId();
-    String applicationIdTag =
-        AllocationTagsNamespaces.APP_ID + applicationId.toString();
-    boolean useSet = false;
-
-    if (allocationTags != null && !allocationTags.isEmpty()) {
-      // Copy before edit it.
-      allocationTags = new HashSet<>(allocationTags);
-      allocationTags.add(applicationIdTag);
-      useSet = true;
-    }
 
     writeLock.lock();
     try {
@@ -368,19 +351,12 @@ public class AllocationTagsManager {
       // Covering test-cases where context is mocked
       String nodeRack = (rmContext.getRMNodes() != null
           && rmContext.getRMNodes().get(nodeId) != null)
-              ? rmContext.getRMNodes().get(nodeId).getRackName()
-              : "default-rack";
-      if (useSet) {
-        perAppTagsMapping.removeTags(nodeId, allocationTags);
-        perAppRackTagsMapping.removeTags(nodeRack, allocationTags);
-        globalNodeMapping.removeTags(nodeId, allocationTags);
-        globalRackMapping.removeTags(nodeRack, allocationTags);
-      } else {
-        perAppTagsMapping.removeTag(nodeId, applicationIdTag);
-        perAppRackTagsMapping.removeTag(nodeRack, applicationIdTag);
-        globalNodeMapping.removeTag(nodeId, applicationIdTag);
-        globalRackMapping.removeTag(nodeRack, applicationIdTag);
-      }
+              ? rmContext.getRMNodes().get(nodeId).getRackName() :
+          "default-rack";
+      perAppTagsMapping.removeTags(nodeId, allocationTags);
+      perAppRackTagsMapping.removeTags(nodeRack, allocationTags);
+      globalNodeMapping.removeTags(nodeId, allocationTags);
+      globalRackMapping.removeTags(nodeRack, allocationTags);
 
       if (perAppTagsMapping.isEmpty()) {
         perAppNodeMappings.remove(applicationId);
@@ -602,6 +578,7 @@ public class AllocationTagsManager {
    * @throws InvalidAllocationTagsQueryException when illegal query
    *                                            parameter specified
    */
+  @SuppressWarnings("unchecked")
   public long getRackCardinalityByOp(String rack, ApplicationId applicationId,
       Set<String> tags, LongBinaryOperator op)
       throws InvalidAllocationTagsQueryException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38af2379/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsNamespaces.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsNamespaces.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsNamespaces.java
deleted file mode 100644
index 43fcfe5..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsNamespaces.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * *
- *  Licensed to the Apache Software Foundation (ASF) under one
- *  or more contributor license agreements.  See the NOTICE file
- *  distributed with this work for additional information
- *  regarding copyright ownership.  The ASF licenses this file
- *  to you under the Apache License, Version 2.0 (the
- *  "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- * /
- */
-
-package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint;
-
-/**
- * Predefined namespaces for tags
- *
- * Same as namespace  of resource types. Namespaces of placement tags are start
- * with alphabets and ended with "/"
- */
-public class AllocationTagsNamespaces {
-  public static final String APP_ID = "yarn_app_id/";
-}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org