You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ji...@apache.org on 2015/07/14 02:29:02 UTC

[01/11] hadoop git commit: YARN-3116. RM notifies NM whether a container is an AM container or normal task container. Contributed by Giovanni Matteo Fumarola.

Repository: hadoop
Updated Branches:
  refs/heads/YARN-1197 47f4c5410 -> a431ed907


YARN-3116. RM notifies NM whether a container is an AM container or normal task container. Contributed by Giovanni Matteo Fumarola.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1ea36299
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1ea36299
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1ea36299

Branch: refs/heads/YARN-1197
Commit: 1ea36299a47af302379ae0750b571ec021eb54ad
Parents: 47f4c54
Author: Zhijie Shen <zj...@apache.org>
Authored: Fri Jul 10 18:58:10 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Fri Jul 10 18:58:10 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |  3 ++
 .../yarn/server/api/ContainerContext.java       | 19 +++++++
 .../api/ContainerInitializationContext.java     |  7 +++
 .../server/api/ContainerTerminationContext.java |  7 +++
 .../hadoop/yarn/server/api/ContainerType.java   | 34 +++++++++++++
 .../src/main/proto/yarn_protos.proto            |  5 ++
 .../yarn/api/records/impl/pb/ProtoUtils.java    | 12 +++++
 .../yarn/security/ContainerTokenIdentifier.java | 43 ++++++++++++++--
 .../main/proto/server/yarn_security_token.proto |  1 +
 .../yarn/security/TestYARNTokenIdentifier.java  | 53 ++++++++++++++++++++
 .../containermanager/AuxServices.java           |  6 ++-
 .../scheduler/SchedulerApplicationAttempt.java  | 17 ++++++-
 .../scheduler/capacity/LeafQueue.java           |  3 +-
 .../security/RMContainerTokenSecretManager.java | 13 +++--
 .../server/resourcemanager/Application.java     | 12 +++++
 .../capacity/TestContainerAllocation.java       |  5 +-
 .../scheduler/fair/FairSchedulerTestBase.java   | 31 ++++++++++--
 .../scheduler/fifo/TestFifoScheduler.java       |  4 ++
 18 files changed, 253 insertions(+), 22 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ea36299/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index db000d7..f78bbfa 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -134,6 +134,9 @@ Release 2.8.0 - UNRELEASED
     YARN-1012. Report NM aggregated container resource utilization in heartbeat. 
     (Inigo Goiri via kasha)
 
+    YARN-3116. RM notifies NM whether a container is an AM container or normal
+    task container. (Giovanni Matteo Fumarola via zjshen)
+
   IMPROVEMENTS
 
     YARN-644. Basic null check is not performed on passed in arguments before

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ea36299/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ContainerContext.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ContainerContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ContainerContext.java
index d13159b..f7a9b02 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ContainerContext.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ContainerContext.java
@@ -35,14 +35,23 @@ public class ContainerContext {
   private final String user;
   private final ContainerId containerId;
   private final Resource resource;
+  private final ContainerType containerType;
 
   @Private
   @Unstable
   public ContainerContext(String user, ContainerId containerId,
       Resource resource) {
+    this(user, containerId, resource, ContainerType.TASK);
+  }
+
+  @Private
+  @Unstable
+  public ContainerContext(String user, ContainerId containerId,
+      Resource resource, ContainerType containerType) {
     this.user = user;
     this.containerId = containerId;
     this.resource = resource;
+    this.containerType = containerType;
   }
 
   /**
@@ -72,4 +81,14 @@ public class ContainerContext {
   public Resource getResource() {
     return resource;
   }
+
+  /**
+   * Get {@link ContainerType} the type of the container
+   * being initialized or stopped.
+   *
+   * @return the type of the container
+   */
+  public ContainerType getContainerType() {
+    return containerType;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ea36299/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ContainerInitializationContext.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ContainerInitializationContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ContainerInitializationContext.java
index 5b5bbda..44428f9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ContainerInitializationContext.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ContainerInitializationContext.java
@@ -41,4 +41,11 @@ public class ContainerInitializationContext extends ContainerContext {
     super(user, containerId, resource);
   }
 
+  @Private
+  @Unstable
+  public ContainerInitializationContext(String user, ContainerId containerId,
+      Resource resource, ContainerType containerType) {
+    super(user, containerId, resource, containerType);
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ea36299/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ContainerTerminationContext.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ContainerTerminationContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ContainerTerminationContext.java
index 34ba73e..2e4ad3b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ContainerTerminationContext.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ContainerTerminationContext.java
@@ -41,4 +41,11 @@ public class ContainerTerminationContext extends ContainerContext {
     super(user, containerId, resource);
   }
 
+  @Private
+  @Unstable
+  public ContainerTerminationContext(String user, ContainerId containerId,
+      Resource resource, ContainerType containerType) {
+    super(user, containerId, resource, containerType);
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ea36299/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ContainerType.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ContainerType.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ContainerType.java
new file mode 100644
index 0000000..ffae811
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ContainerType.java
@@ -0,0 +1,34 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.api;
+
+/**
+ * Container property encoding allocation and execution semantics.
+ * 
+ * <p>
+ * The container types are the following:
+ * <ul>
+ * <li>{@link #APPLICATION_MASTER}
+ * <li>{@link #TASK}
+ * </ul>
+ * </p>
+ */
+public enum ContainerType {
+  APPLICATION_MASTER, TASK
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ea36299/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
index f801409..33cc255 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
@@ -264,6 +264,11 @@ message NodeLabelProto {
   optional bool isExclusive = 2 [default = true]; 
 }
 
+enum ContainerTypeProto {
+  APPLICATION_MASTER = 1;
+  TASK = 2;
+}
+
 ////////////////////////////////////////////////////////////////////////
 ////// From AM_RM_Protocol /////////////////////////////////////////////
 ////////////////////////////////////////////////////////////////////////

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ea36299/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java
index 4e8a19c..e742f4c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java
@@ -53,7 +53,9 @@ import org.apache.hadoop.yarn.proto.YarnProtos.QueueStateProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ReservationRequestInterpreterProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.YarnApplicationAttemptStateProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.YarnApplicationStateProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ContainerTypeProto;
 import org.apache.hadoop.yarn.proto.YarnServiceProtos;
+import org.apache.hadoop.yarn.server.api.ContainerType;
 
 import com.google.protobuf.ByteString;
 
@@ -270,4 +272,14 @@ public class ProtoUtils {
     return LogAggregationStatus.valueOf(e.name().replace(
       LOG_AGGREGATION_STATUS_PREFIX, ""));
   }
+
+  /*
+   * ContainerType
+   */
+  public static ContainerTypeProto convertToProtoFormat(ContainerType e) {
+    return ContainerTypeProto.valueOf(e.name());
+  }
+  public static ContainerType convertFromProtoFormat(ContainerTypeProto e) {
+    return ContainerType.valueOf(e.name());
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ea36299/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java
index 9a60d01..106e6d5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java
@@ -39,13 +39,15 @@ import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.impl.pb.ContainerIdPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.LogAggregationContextPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.PriorityPBImpl;
+import org.apache.hadoop.yarn.api.records.impl.pb.ProtoUtils;
 import org.apache.hadoop.yarn.api.records.impl.pb.ResourcePBImpl;
 import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager;
+import org.apache.hadoop.yarn.proto.YarnProtos.ContainerTypeProto;
 import org.apache.hadoop.yarn.proto.YarnSecurityTokenProtos.ContainerTokenIdentifierProto;
+import org.apache.hadoop.yarn.server.api.ContainerType;
 
 import com.google.protobuf.TextFormat;
 
-
 /**
  * TokenIdentifier for a container. Encodes {@link ContainerId},
  * {@link Resource} needed by the container and the target NMs host-address.
@@ -66,14 +68,24 @@ public class ContainerTokenIdentifier extends TokenIdentifier {
       int masterKeyId, long rmIdentifier, Priority priority, long creationTime) {
     this(containerID, hostName, appSubmitter, r, expiryTimeStamp, masterKeyId,
         rmIdentifier, priority, creationTime, null,
-        CommonNodeLabelsManager.NO_LABEL);
+        CommonNodeLabelsManager.NO_LABEL, ContainerType.TASK);
   }
 
   public ContainerTokenIdentifier(ContainerId containerID, String hostName,
       String appSubmitter, Resource r, long expiryTimeStamp, int masterKeyId,
       long rmIdentifier, Priority priority, long creationTime,
       LogAggregationContext logAggregationContext, String nodeLabelExpression) {
-    ContainerTokenIdentifierProto.Builder builder = 
+    this(containerID, hostName, appSubmitter, r, expiryTimeStamp, masterKeyId,
+        rmIdentifier, priority, creationTime, logAggregationContext,
+        nodeLabelExpression, ContainerType.TASK);
+  }
+
+  public ContainerTokenIdentifier(ContainerId containerID, String hostName,
+      String appSubmitter, Resource r, long expiryTimeStamp, int masterKeyId,
+      long rmIdentifier, Priority priority, long creationTime,
+      LogAggregationContext logAggregationContext, String nodeLabelExpression,
+      ContainerType containerType) {
+    ContainerTokenIdentifierProto.Builder builder =
         ContainerTokenIdentifierProto.newBuilder();
     if (containerID != null) {
       builder.setContainerId(((ContainerIdPBImpl)containerID).getProto());
@@ -99,7 +111,8 @@ public class ContainerTokenIdentifier extends TokenIdentifier {
     if (nodeLabelExpression != null) {
       builder.setNodeLabelExpression(nodeLabelExpression);
     }
-    
+    builder.setContainerType(convertToProtoFormat(containerType));
+
     proto = builder.build();
   }
 
@@ -156,7 +169,18 @@ public class ContainerTokenIdentifier extends TokenIdentifier {
   public long getRMIdentifier() {
     return proto.getRmIdentifier();
   }
-  
+
+  /**
+   * Get the ContainerType of container to allocate
+   * @return ContainerType
+   */
+  public ContainerType getContainerType(){
+    if (!proto.hasContainerType()) {
+      return null;
+    }
+    return convertFromProtoFormat(proto.getContainerType());
+  }
+
   public ContainerTokenIdentifierProto getProto() {
     return proto;
   }
@@ -232,4 +256,13 @@ public class ContainerTokenIdentifier extends TokenIdentifier {
   public String toString() {
     return TextFormat.shortDebugString(getProto());
   }
+
+  private ContainerTypeProto convertToProtoFormat(ContainerType containerType) {
+    return ProtoUtils.convertToProtoFormat(containerType);
+  }
+
+  private ContainerType convertFromProtoFormat(
+      ContainerTypeProto containerType) {
+    return ProtoUtils.convertFromProtoFormat(containerType);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ea36299/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/proto/server/yarn_security_token.proto
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/proto/server/yarn_security_token.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/proto/server/yarn_security_token.proto
index d1bef21..339e99e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/proto/server/yarn_security_token.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/proto/server/yarn_security_token.proto
@@ -50,6 +50,7 @@ message ContainerTokenIdentifierProto {
   optional int64 creationTime = 9;
   optional LogAggregationContextProto logAggregationContext = 10;
   optional string nodeLabelExpression = 11;
+  optional ContainerTypeProto containerType = 12;
 }
 
 message ClientToAMTokenIdentifierProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ea36299/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/security/TestYARNTokenIdentifier.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/security/TestYARNTokenIdentifier.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/security/TestYARNTokenIdentifier.java
index 5fe75bc..68f0b9d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/security/TestYARNTokenIdentifier.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/security/TestYARNTokenIdentifier.java
@@ -33,10 +33,12 @@ import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager;
 import org.apache.hadoop.yarn.proto.YarnSecurityTokenProtos.YARNDelegationTokenIdentifierProto;
 import org.apache.hadoop.yarn.security.client.ClientToAMTokenIdentifier;
 import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier;
 import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier;
+import org.apache.hadoop.yarn.server.api.ContainerType;
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -201,6 +203,12 @@ public class TestYARNTokenIdentifier {
         anotherToken.getCreationTime(), creationTime);
     
     Assert.assertNull(anotherToken.getLogAggregationContext());
+
+    Assert.assertEquals(CommonNodeLabelsManager.NO_LABEL,
+        anotherToken.getNodeLabelExpression());
+
+    Assert.assertEquals(ContainerType.TASK,
+        anotherToken.getContainerType());
   }
   
   @Test
@@ -347,4 +355,49 @@ public class TestYARNTokenIdentifier {
     Assert.assertEquals(new Text("yarn"), token.getRenewer());
   }
 
+  @Test
+  public void testAMContainerTokenIdentifier() throws IOException {
+    ContainerId containerID = ContainerId.newContainerId(
+        ApplicationAttemptId.newInstance(ApplicationId.newInstance(
+            1, 1), 1), 1);
+    String hostName = "host0";
+    String appSubmitter = "usr0";
+    Resource r = Resource.newInstance(1024, 1);
+    long expiryTimeStamp = 1000;
+    int masterKeyId = 1;
+    long rmIdentifier = 1;
+    Priority priority = Priority.newInstance(1);
+    long creationTime = 1000;
+
+    ContainerTokenIdentifier token =
+        new ContainerTokenIdentifier(containerID, hostName, appSubmitter, r,
+            expiryTimeStamp, masterKeyId, rmIdentifier, priority, creationTime,
+            null, CommonNodeLabelsManager.NO_LABEL, ContainerType.APPLICATION_MASTER);
+
+    ContainerTokenIdentifier anotherToken = new ContainerTokenIdentifier();
+
+    byte[] tokenContent = token.getBytes();
+    DataInputBuffer dib = new DataInputBuffer();
+    dib.reset(tokenContent, tokenContent.length);
+    anotherToken.readFields(dib);
+
+    Assert.assertEquals(ContainerType.APPLICATION_MASTER,
+        anotherToken.getContainerType());
+
+    token =
+        new ContainerTokenIdentifier(containerID, hostName, appSubmitter, r,
+            expiryTimeStamp, masterKeyId, rmIdentifier, priority, creationTime,
+            null, CommonNodeLabelsManager.NO_LABEL, ContainerType.TASK);
+
+    anotherToken = new ContainerTokenIdentifier();
+
+    tokenContent = token.getBytes();
+    dib = new DataInputBuffer();
+    dib.reset(tokenContent, tokenContent.length);
+    anotherToken.readFields(dib);
+
+    Assert.assertEquals(ContainerType.TASK,
+        anotherToken.getContainerType());
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ea36299/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
index fb6f79b..cd5ed88 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
@@ -225,7 +225,8 @@ public class AuxServices extends AbstractService
           try {
             serv.initializeContainer(new ContainerInitializationContext(
                 event.getUser(), event.getContainer().getContainerId(),
-                event.getContainer().getResource()));
+                event.getContainer().getResource(), event.getContainer()
+                .getContainerTokenIdentifier().getContainerType()));
           } catch (Throwable th) {
             logWarningWhenAuxServiceThrowExceptions(serv,
                 AuxServicesEventType.CONTAINER_INIT, th);
@@ -237,7 +238,8 @@ public class AuxServices extends AbstractService
           try {
             serv.stopContainer(new ContainerTerminationContext(
                 event.getUser(), event.getContainer().getContainerId(),
-                event.getContainer().getResource()));
+                event.getContainer().getResource(), event.getContainer()
+                .getContainerTokenIdentifier().getContainerType()));
           } catch (Throwable th) {
             logWarningWhenAuxServiceThrowExceptions(serv,
                 AuxServicesEventType.CONTAINER_STOP, th);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ea36299/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
index dbc3cb5..475f2c7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
@@ -45,6 +45,7 @@ import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.server.api.ContainerType;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.AggregateAppResourceUsage;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
@@ -61,7 +62,6 @@ import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.Resources;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy.SchedulableEntity;
 
-
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.HashMultiset;
@@ -467,13 +467,26 @@ public class SchedulerApplicationAttempt implements SchedulableEntity {
       .hasNext();) {
       RMContainer rmContainer = i.next();
       Container container = rmContainer.getContainer();
+      ContainerType containerType = ContainerType.TASK;
+      // The working knowledge is that masterContainer for AM is null as it
+      // itself is the master container.
+      RMAppAttempt appAttempt =
+          rmContext
+              .getRMApps()
+              .get(
+                  container.getId().getApplicationAttemptId()
+                      .getApplicationId()).getCurrentAppAttempt();
+      if (appAttempt.getMasterContainer() == null
+          && appAttempt.getSubmissionContext().getUnmanagedAM() == false) {
+        containerType = ContainerType.APPLICATION_MASTER;
+      }
       try {
         // create container token and NMToken altogether.
         container.setContainerToken(rmContext.getContainerTokenSecretManager()
             .createContainerToken(container.getId(), container.getNodeId(),
                 getUser(), container.getResource(), container.getPriority(),
                 rmContainer.getCreationTime(), this.logAggregationContext,
-                rmContainer.getNodeLabelExpression()));
+                rmContainer.getNodeLabelExpression(), containerType));
         NMToken nmToken =
             rmContext.getNMTokenSecretManager().createAndGetNMToken(getUser(),
               getApplicationAttemptId(), container);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ea36299/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
index e5b44a6..598f279 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
@@ -829,7 +829,8 @@ public class LeafQueue extends AbstractCSQueue {
             RMAppAttempt rmAppAttempt =
                 csContext.getRMContext().getRMApps()
                     .get(application.getApplicationId()).getCurrentAppAttempt();
-            if (null == rmAppAttempt.getMasterContainer()) {
+            if (rmAppAttempt.getSubmissionContext().getUnmanagedAM() == false
+                && null == rmAppAttempt.getMasterContainer()) {
               if (LOG.isDebugEnabled()) {
                 LOG.debug("Skip allocating AM container to app_attempt="
                     + application.getApplicationAttemptId()

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ea36299/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMContainerTokenSecretManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMContainerTokenSecretManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMContainerTokenSecretManager.java
index 1c0533d..6f00615 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMContainerTokenSecretManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMContainerTokenSecretManager.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.Token;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
+import org.apache.hadoop.yarn.server.api.ContainerType;
 import org.apache.hadoop.yarn.server.api.records.MasterKey;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.server.security.BaseContainerTokenSecretManager;
@@ -166,7 +167,7 @@ public class RMContainerTokenSecretManager extends
 
   /**
    * Helper function for creating ContainerTokens
-   * 
+   *
    * @param containerId
    * @param nodeId
    * @param appSubmitter
@@ -179,12 +180,12 @@ public class RMContainerTokenSecretManager extends
       String appSubmitter, Resource capability, Priority priority,
       long createTime) {
     return createContainerToken(containerId, nodeId, appSubmitter, capability,
-      priority, createTime, null, null);
+      priority, createTime, null, null, ContainerType.TASK);
   }
 
   /**
    * Helper function for creating ContainerTokens
-   * 
+   *
    * @param containerId
    * @param nodeId
    * @param appSubmitter
@@ -192,12 +193,14 @@ public class RMContainerTokenSecretManager extends
    * @param priority
    * @param createTime
    * @param logAggregationContext
+   * @param nodeLabelExpression
+   * @param containerType
    * @return the container-token
    */
   public Token createContainerToken(ContainerId containerId, NodeId nodeId,
       String appSubmitter, Resource capability, Priority priority,
       long createTime, LogAggregationContext logAggregationContext,
-      String nodeLabelExpression) {
+      String nodeLabelExpression, ContainerType containerType) {
     byte[] password;
     ContainerTokenIdentifier tokenIdentifier;
     long expiryTimeStamp =
@@ -211,7 +214,7 @@ public class RMContainerTokenSecretManager extends
             appSubmitter, capability, expiryTimeStamp, this.currentMasterKey
               .getMasterKey().getKeyId(),
             ResourceManager.getClusterTimeStamp(), priority, createTime,
-            logAggregationContext, nodeLabelExpression);
+            logAggregationContext, nodeLabelExpression, containerType);
       password = this.createPassword(tokenIdentifier);
 
     } finally {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ea36299/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Application.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Application.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Application.java
index 9b7eb84..e62f7d7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Application.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Application.java
@@ -55,6 +55,8 @@ import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.server.resourcemanager.Task.State;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEventType;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Allocation;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
@@ -168,6 +170,16 @@ public class Application {
     
     resourceManager.getClientRMService().submitApplication(request);
 
+    RMAppEvent event =
+        new RMAppEvent(this.applicationId, RMAppEventType.START);
+    resourceManager.getRMContext().getRMApps().get(applicationId).handle(event);
+    event =
+        new RMAppEvent(this.applicationId, RMAppEventType.APP_NEW_SAVED);
+    resourceManager.getRMContext().getRMApps().get(applicationId).handle(event);
+    event =
+        new RMAppEvent(this.applicationId, RMAppEventType.APP_ACCEPTED);
+    resourceManager.getRMContext().getRMApps().get(applicationId).handle(event);
+
     // Notify scheduler
     AppAddedSchedulerEvent addAppEvent =
         new AppAddedSchedulerEvent(this.applicationId, this.queue, "user");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ea36299/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java
index 0ea993f..6183bf6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.api.records.Token;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
+import org.apache.hadoop.yarn.server.api.ContainerType;
 import org.apache.hadoop.yarn.server.resourcemanager.MockAM;
 import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
 import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
@@ -286,11 +287,11 @@ public class TestContainerAllocation {
         public Token createContainerToken(ContainerId containerId,
             NodeId nodeId, String appSubmitter, Resource capability,
             Priority priority, long createTime,
-            LogAggregationContext logAggregationContext, String nodeLabelExp) {
+            LogAggregationContext logAggregationContext, String nodeLabelExp, ContainerType containerType) {
           numRetries++;
           return super.createContainerToken(containerId, nodeId, appSubmitter,
               capability, priority, createTime, logAggregationContext,
-              nodeLabelExp);
+              nodeLabelExp, containerType);
         }
       };
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ea36299/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java
index 23b708a..403c8ea 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
@@ -38,6 +39,8 @@ import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEventType;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl;
@@ -152,6 +155,11 @@ public class FairSchedulerTestBase {
     when(rmApp.getCurrentAppAttempt()).thenReturn(rmAppAttempt);
     when(rmAppAttempt.getRMAppAttemptMetrics()).thenReturn(
         new RMAppAttemptMetrics(id, resourceManager.getRMContext()));
+    ApplicationSubmissionContext submissionContext = mock(ApplicationSubmissionContext.class);
+    when(submissionContext.getUnmanagedAM()).thenReturn(false);
+    when(rmAppAttempt.getSubmissionContext()).thenReturn(submissionContext);
+    Container container = mock(Container.class);
+    when(rmAppAttempt.getMasterContainer()).thenReturn(container);
     resourceManager.getRMContext().getRMApps()
         .put(id.getApplicationId(), rmApp);
 
@@ -175,6 +183,9 @@ public class FairSchedulerTestBase {
     when(rmApp.getCurrentAppAttempt()).thenReturn(rmAppAttempt);
     when(rmAppAttempt.getRMAppAttemptMetrics()).thenReturn(
         new RMAppAttemptMetrics(id,resourceManager.getRMContext()));
+    ApplicationSubmissionContext submissionContext = mock(ApplicationSubmissionContext.class);
+    when(submissionContext.getUnmanagedAM()).thenReturn(false);
+    when(rmAppAttempt.getSubmissionContext()).thenReturn(submissionContext);
     resourceManager.getRMContext().getRMApps()
         .put(id.getApplicationId(), rmApp);
 
@@ -206,13 +217,20 @@ public class FairSchedulerTestBase {
   protected void createApplicationWithAMResource(ApplicationAttemptId attId,
       String queue, String user, Resource amResource) {
     RMContext rmContext = resourceManager.getRMContext();
-    RMApp rmApp = new RMAppImpl(attId.getApplicationId(), rmContext, conf,
-        null, null, null, ApplicationSubmissionContext.newInstance(null, null,
-        null, null, null, false, false, 0, amResource, null), null, null,
+    ApplicationId appId = attId.getApplicationId();
+    RMApp rmApp = new RMAppImpl(appId, rmContext, conf,
+        null, user, null, ApplicationSubmissionContext.newInstance(appId, null,
+        queue, null, null, false, false, 0, amResource, null), null, null,
         0, null, null, null);
-    rmContext.getRMApps().put(attId.getApplicationId(), rmApp);
+    rmContext.getRMApps().put(appId, rmApp);
+    RMAppEvent event = new RMAppEvent(appId, RMAppEventType.START);
+    resourceManager.getRMContext().getRMApps().get(appId).handle(event);
+    event = new RMAppEvent(appId, RMAppEventType.APP_NEW_SAVED);
+    resourceManager.getRMContext().getRMApps().get(appId).handle(event);
+    event = new RMAppEvent(appId, RMAppEventType.APP_ACCEPTED);
+    resourceManager.getRMContext().getRMApps().get(appId).handle(event);
     AppAddedSchedulerEvent appAddedEvent = new AppAddedSchedulerEvent(
-        attId.getApplicationId(), queue, user);
+        appId, queue, user);
     scheduler.handle(appAddedEvent);
     AppAttemptAddedSchedulerEvent attempAddedEvent =
         new AppAttemptAddedSchedulerEvent(attId, false);
@@ -227,6 +245,9 @@ public class FairSchedulerTestBase {
     RMAppAttemptMetrics attemptMetric = mock(RMAppAttemptMetrics.class);
     when(attempt.getRMAppAttemptMetrics()).thenReturn(attemptMetric);
     when(app.getCurrentAppAttempt()).thenReturn(attempt);
+    ApplicationSubmissionContext submissionContext = mock(ApplicationSubmissionContext.class);
+    when(submissionContext.getUnmanagedAM()).thenReturn(false);
+    when(attempt.getSubmissionContext()).thenReturn(submissionContext);
     resourceManager.getRMContext().getRMApps()
         .put(attemptId.getApplicationId(), app);
     return app;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ea36299/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java
index e4583d1..a454801 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerState;
@@ -1184,6 +1185,9 @@ public class TestFifoScheduler {
     RMAppAttemptMetrics attemptMetric = mock(RMAppAttemptMetrics.class);
     when(attempt.getRMAppAttemptMetrics()).thenReturn(attemptMetric);
     when(app.getCurrentAppAttempt()).thenReturn(attempt);
+    ApplicationSubmissionContext submissionContext = mock(ApplicationSubmissionContext.class);
+    when(submissionContext.getUnmanagedAM()).thenReturn(false);
+    when(attempt.getSubmissionContext()).thenReturn(submissionContext);
     context.getRMApps().putIfAbsent(attemptId.getApplicationId(), app);
     return app;
   }


[11/11] hadoop git commit: HADOOP-12211. Collect disk usage on the node. Contributed by Robert Grandl

Posted by ji...@apache.org.
HADOOP-12211. Collect disk usage on the node. Contributed by Robert Grandl


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a431ed90
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a431ed90
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a431ed90

Branch: refs/heads/YARN-1197
Commit: a431ed9075cf6f467be5ff10f4ffb131cb1d3216
Parents: 9ef03a4
Author: Chris Douglas <cd...@apache.org>
Authored: Mon Jul 13 15:36:11 2015 -0700
Committer: Chris Douglas <cd...@apache.org>
Committed: Mon Jul 13 15:36:11 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |   2 +
 .../java/org/apache/hadoop/util/SysInfo.java    |  14 ++
 .../org/apache/hadoop/util/SysInfoLinux.java    | 157 ++++++++++++++++++-
 .../org/apache/hadoop/util/SysInfoWindows.java  |  12 ++
 .../apache/hadoop/util/TestSysInfoLinux.java    |  77 ++++++++-
 .../gridmix/DummyResourceCalculatorPlugin.java  |  17 ++
 .../yarn/util/ResourceCalculatorPlugin.java     |  18 +++
 7 files changed, 293 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a431ed90/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 3d4f1e4..3121430 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -695,6 +695,8 @@ Release 2.8.0 - UNRELEASED
 
     HADOOP-12210. Collect network usage on the node (Robert Grandl via cdouglas)
 
+    HADOOP-12211. Collect disk usage on the node (Robert Grandl via cdouglas)
+
   OPTIMIZATIONS
 
     HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a431ed90/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfo.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfo.java
index 24b339d..b75a8d8 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfo.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfo.java
@@ -120,4 +120,18 @@ public abstract class SysInfo {
    */
   public abstract long getNetworkBytesWritten();
 
+  /**
+   * Obtain the aggregated number of bytes read from disks.
+   *
+   * @return total number of bytes read.
+   */
+  public abstract long getStorageBytesRead();
+
+  /**
+   * Obtain the aggregated number of bytes written to disks.
+   *
+   * @return total number of bytes written.
+   */
+  public abstract long getStorageBytesWritten();
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a431ed90/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoLinux.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoLinux.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoLinux.java
index 8801985..6b21f18 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoLinux.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoLinux.java
@@ -25,6 +25,7 @@ import java.io.InputStreamReader;
 import java.io.IOException;
 import java.math.BigInteger;
 import java.nio.charset.Charset;
+import java.util.HashMap;
 import java.util.HashSet;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
@@ -94,11 +95,27 @@ public class SysInfoLinux extends SysInfo {
                "[ \t]*([0-9]+)[ \t]*([0-9]+)[ \t]*([0-9]+)[ \t]*([0-9]+)" +
                "[ \t]*([0-9]+)[ \t]*([0-9]+)[ \t]*([0-9]+)[ \t]*([0-9]+).*");
 
+  /**
+   * Pattern for parsing /proc/diskstats.
+   */
+  private static final String PROCFS_DISKSFILE = "/proc/diskstats";
+  private static final Pattern PROCFS_DISKSFILE_FORMAT =
+      Pattern.compile("^[ \t]*([0-9]+)[ \t]*([0-9 ]+)" +
+              "(?!([a-zA-Z]+[0-9]+))([a-zA-Z]+)" +
+              "[ \t]*([0-9]+)[ \t]*([0-9]+)[ \t]*([0-9]+)[ \t]*([0-9]+)" +
+              "[ \t]*([0-9]+)[ \t]*([0-9]+)[ \t]*([0-9]+)[ \t]*([0-9]+)" +
+              "[ \t]*([0-9]+)[ \t]*([0-9]+)[ \t]*([0-9]+)");
+  /**
+   * Pattern for parsing /sys/block/partition_name/queue/hw_sector_size.
+   */
+  private static final Pattern PROCFS_DISKSECTORFILE_FORMAT =
+      Pattern.compile("^([0-9]+)");
 
   private String procfsMemFile;
   private String procfsCpuFile;
   private String procfsStatFile;
   private String procfsNetFile;
+  private String procfsDisksFile;
   private long jiffyLengthInMillis;
 
   private long ramSize = 0;
@@ -113,10 +130,15 @@ public class SysInfoLinux extends SysInfo {
   private long cpuFrequency = 0L; // CPU frequency on the system (kHz)
   private long numNetBytesRead = 0L; // aggregated bytes read from network
   private long numNetBytesWritten = 0L; // aggregated bytes written to network
+  private long numDisksBytesRead = 0L; // aggregated bytes read from disks
+  private long numDisksBytesWritten = 0L; // aggregated bytes written to disks
 
   private boolean readMemInfoFile = false;
   private boolean readCpuInfoFile = false;
 
+  /* map for every disk its sector size */
+  private HashMap<String, Integer> perDiskSectorSize = null;
+
   public static final long PAGE_SIZE = getConf("PAGESIZE");
   public static final long JIFFY_LENGTH_IN_MILLIS =
       Math.max(Math.round(1000D / getConf("CLK_TCK")), -1);
@@ -145,7 +167,7 @@ public class SysInfoLinux extends SysInfo {
 
   public SysInfoLinux() {
     this(PROCFS_MEMFILE, PROCFS_CPUINFO, PROCFS_STAT,
-         PROCFS_NETFILE, JIFFY_LENGTH_IN_MILLIS);
+         PROCFS_NETFILE, PROCFS_DISKSFILE, JIFFY_LENGTH_IN_MILLIS);
   }
 
   /**
@@ -155,6 +177,7 @@ public class SysInfoLinux extends SysInfo {
    * @param procfsCpuFile fake file for /proc/cpuinfo
    * @param procfsStatFile fake file for /proc/stat
    * @param procfsNetFile fake file for /proc/net/dev
+   * @param procfsDisksFile fake file for /proc/diskstats
    * @param jiffyLengthInMillis fake jiffy length value
    */
   @VisibleForTesting
@@ -162,13 +185,16 @@ public class SysInfoLinux extends SysInfo {
                                        String procfsCpuFile,
                                        String procfsStatFile,
                                        String procfsNetFile,
+                                       String procfsDisksFile,
                                        long jiffyLengthInMillis) {
     this.procfsMemFile = procfsMemFile;
     this.procfsCpuFile = procfsCpuFile;
     this.procfsStatFile = procfsStatFile;
     this.procfsNetFile = procfsNetFile;
+    this.procfsDisksFile = procfsDisksFile;
     this.jiffyLengthInMillis = jiffyLengthInMillis;
     this.cpuTimeTracker = new CpuTimeTracker(jiffyLengthInMillis);
+    this.perDiskSectorSize = new HashMap<String, Integer>();
   }
 
   /**
@@ -411,6 +437,119 @@ public class SysInfoLinux extends SysInfo {
     }
   }
 
+  /**
+   * Read /proc/diskstats file, parse and calculate amount
+   * of bytes read and written from/to disks.
+   */
+  private void readProcDisksInfoFile() {
+
+    numDisksBytesRead = 0L;
+    numDisksBytesWritten = 0L;
+
+    // Read "/proc/diskstats" file
+    BufferedReader in;
+    try {
+      in = new BufferedReader(new InputStreamReader(
+            new FileInputStream(procfsDisksFile), Charset.forName("UTF-8")));
+    } catch (FileNotFoundException f) {
+      return;
+    }
+
+    Matcher mat;
+    try {
+      String str = in.readLine();
+      while (str != null) {
+        mat = PROCFS_DISKSFILE_FORMAT.matcher(str);
+        if (mat.find()) {
+          String diskName = mat.group(4);
+          assert diskName != null;
+          // ignore loop or ram partitions
+          if (diskName.contains("loop") || diskName.contains("ram")) {
+            str = in.readLine();
+            continue;
+          }
+
+          Integer sectorSize;
+          synchronized (perDiskSectorSize) {
+            sectorSize = perDiskSectorSize.get(diskName);
+            if (null == sectorSize) {
+              // retrieve sectorSize
+              // if unavailable or error, assume 512
+              sectorSize = readDiskBlockInformation(diskName, 512);
+              perDiskSectorSize.put(diskName, sectorSize);
+            }
+          }
+
+          String sectorsRead = mat.group(7);
+          String sectorsWritten = mat.group(11);
+          if (null == sectorsRead || null == sectorsWritten) {
+            return;
+          }
+          numDisksBytesRead += Long.parseLong(sectorsRead) * sectorSize;
+          numDisksBytesWritten += Long.parseLong(sectorsWritten) * sectorSize;
+        }
+        str = in.readLine();
+      }
+    } catch (IOException e) {
+      LOG.warn("Error reading the stream " + procfsDisksFile, e);
+    } finally {
+      // Close the streams
+      try {
+        in.close();
+      } catch (IOException e) {
+        LOG.warn("Error closing the stream " + procfsDisksFile, e);
+      }
+    }
+  }
+
+  /**
+   * Read /sys/block/diskName/queue/hw_sector_size file, parse and calculate
+   * sector size for a specific disk.
+   * @return sector size of specified disk, or defSector
+   */
+  int readDiskBlockInformation(String diskName, int defSector) {
+
+    assert perDiskSectorSize != null && diskName != null;
+
+    String procfsDiskSectorFile =
+            "/sys/block/" + diskName + "/queue/hw_sector_size";
+
+    BufferedReader in;
+    try {
+      in = new BufferedReader(new InputStreamReader(
+            new FileInputStream(procfsDiskSectorFile),
+              Charset.forName("UTF-8")));
+    } catch (FileNotFoundException f) {
+      return defSector;
+    }
+
+    Matcher mat;
+    try {
+      String str = in.readLine();
+      while (str != null) {
+        mat = PROCFS_DISKSECTORFILE_FORMAT.matcher(str);
+        if (mat.find()) {
+          String secSize = mat.group(1);
+          if (secSize != null) {
+            return Integer.parseInt(secSize);
+          }
+        }
+        str = in.readLine();
+      }
+      return defSector;
+    } catch (IOException|NumberFormatException e) {
+      LOG.warn("Error reading the stream " + procfsDiskSectorFile, e);
+      return defSector;
+    } finally {
+      // Close the streams
+      try {
+        in.close();
+      } catch (IOException e) {
+        LOG.warn("Error closing the stream " + procfsDiskSectorFile, e);
+      }
+    }
+  }
+
   /** {@inheritDoc} */
   @Override
   public long getPhysicalMemorySize() {
@@ -492,6 +631,18 @@ public class SysInfoLinux extends SysInfo {
     return numNetBytesWritten;
   }
 
+  @Override
+  public long getStorageBytesRead() {
+    readProcDisksInfoFile();
+    return numDisksBytesRead;
+  }
+
+  @Override
+  public long getStorageBytesWritten() {
+    readProcDisksInfoFile();
+    return numDisksBytesWritten;
+  }
+
   /**
    * Test the {@link SysInfoLinux}.
    *
@@ -515,6 +666,10 @@ public class SysInfoLinux extends SysInfo {
             + plugin.getNetworkBytesRead());
     System.out.println("Total network written (bytes) : "
             + plugin.getNetworkBytesWritten());
+    System.out.println("Total storage read (bytes) : "
+            + plugin.getStorageBytesRead());
+    System.out.println("Total storage written (bytes) : "
+            + plugin.getStorageBytesWritten());
     try {
       // Sleep so we can compute the CPU usage
       Thread.sleep(500L);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a431ed90/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoWindows.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoWindows.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoWindows.java
index f8542a3..f3fb364 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoWindows.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoWindows.java
@@ -193,4 +193,16 @@ public class SysInfoWindows extends SysInfo {
     return 0L;
   }
 
+  @Override
+  public long getStorageBytesRead() {
+    // TODO unimplemented
+    return 0L;
+  }
+
+  @Override
+  public long getStorageBytesWritten() {
+    // TODO unimplemented
+    return 0L;
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a431ed90/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSysInfoLinux.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSysInfoLinux.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSysInfoLinux.java
index 2a31f31..83f2e86 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSysInfoLinux.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSysInfoLinux.java
@@ -37,17 +37,18 @@ public class TestSysInfoLinux {
   /**
    * LinuxResourceCalculatorPlugin with a fake timer
    */
-  static class FakeLinuxResourceCalculatorPlugin extends
-      SysInfoLinux {
+  static class FakeLinuxResourceCalculatorPlugin extends SysInfoLinux {
+    static final int SECTORSIZE = 4096;
 
     long currentTime = 0;
     public FakeLinuxResourceCalculatorPlugin(String procfsMemFile,
                                              String procfsCpuFile,
                                              String procfsStatFile,
 			                                       String procfsNetFile,
+                                             String procfsDisksFile,
                                              long jiffyLengthInMillis) {
       super(procfsMemFile, procfsCpuFile, procfsStatFile, procfsNetFile,
-          jiffyLengthInMillis);
+          procfsDisksFile, jiffyLengthInMillis);
     }
     @Override
     long getCurrentTime() {
@@ -56,6 +57,10 @@ public class TestSysInfoLinux {
     public void advanceTime(long adv) {
       currentTime += adv * this.getJiffyLengthInMillis();
     }
+    @Override
+    int readDiskBlockInformation(String diskName, int defSector) {
+      return SECTORSIZE;
+    }
   }
   private static final FakeLinuxResourceCalculatorPlugin plugin;
   private static String TEST_ROOT_DIR = new Path(System.getProperty(
@@ -64,6 +69,7 @@ public class TestSysInfoLinux {
   private static final String FAKE_CPUFILE;
   private static final String FAKE_STATFILE;
   private static final String FAKE_NETFILE;
+  private static final String FAKE_DISKSFILE;
   private static final long FAKE_JIFFY_LENGTH = 10L;
   static {
     int randomNum = (new Random()).nextInt(1000000000);
@@ -71,9 +77,11 @@ public class TestSysInfoLinux {
     FAKE_CPUFILE = TEST_ROOT_DIR + File.separator + "CPUINFO_" + randomNum;
     FAKE_STATFILE = TEST_ROOT_DIR + File.separator + "STATINFO_" + randomNum;
     FAKE_NETFILE = TEST_ROOT_DIR + File.separator + "NETINFO_" + randomNum;
+    FAKE_DISKSFILE = TEST_ROOT_DIR + File.separator + "DISKSINFO_" + randomNum;
     plugin = new FakeLinuxResourceCalculatorPlugin(FAKE_MEMFILE, FAKE_CPUFILE,
                                                    FAKE_STATFILE,
                                                    FAKE_NETFILE,
+                                                   FAKE_DISKSFILE,
                                                    FAKE_JIFFY_LENGTH);
   }
   static final String MEMINFO_FORMAT =
@@ -157,6 +165,38 @@ public class TestSysInfoLinux {
     " eth1: %d 3152521    0    0    0     0          0    219781 %d 1866290    0    0    " +
     "0     0       0          0\n";
 
+  static final String DISKSINFO_FORMAT =
+      "1       0 ram0 0 0 0 0 0 0 0 0 0 0 0\n"+
+      "1       1 ram1 0 0 0 0 0 0 0 0 0 0 0\n"+
+      "1       2 ram2 0 0 0 0 0 0 0 0 0 0 0\n"+
+      "1       3 ram3 0 0 0 0 0 0 0 0 0 0 0\n"+
+      "1       4 ram4 0 0 0 0 0 0 0 0 0 0 0\n"+
+      "1       5 ram5 0 0 0 0 0 0 0 0 0 0 0\n"+
+      "1       6 ram6 0 0 0 0 0 0 0 0 0 0 0\n"+
+      "7       0 loop0 0 0 0 0 0 0 0 0 0 0 0\n"+
+      "7       1 loop1 0 0 0 0 0 0 0 0 0 0 0\n"+
+      "8       0 sda 82575678 2486518 %d 59876600 3225402 19761924 %d " +
+      "6407705 4 48803346 66227952\n"+
+      "8       1 sda1 732 289 21354 787 7 3 32 4 0 769 791"+
+      "8       2 sda2 744272 2206315 23605200 6742762 336830 2979630 " +
+      "26539520 1424776 4 1820130 8165444\n"+
+      "8       3 sda3 81830497 279914 17881852954 53132969 2888558 16782291 " +
+      "157367552 4982925 0 47077660 58061635\n"+
+      "8      32 sdc 10148118 693255 %d 122125461 6090515 401630172 %d 2696685590 " +
+      "0 26848216 2818793840\n"+
+      "8      33 sdc1 10147917 693230 2054138426 122125426 6090506 401630172 " +
+      "3261765880 2696685589 0 26848181 2818793804\n"+
+      "8      64 sde 9989771 553047 %d 93407551 5978572 391997273 %d 2388274325 " +
+      "0 24396646 2481664818\n"+
+      "8      65 sde1 9989570 553022 1943973346 93407489 5978563 391997273 3183807264 " +
+      "2388274325 0 24396584 2481666274\n"+
+      "8      80 sdf 10197163 693995 %d 144374395 6216644 408395438 %d 2669389056 0 " +
+      "26164759 2813746348\n"+
+      "8      81 sdf1 10196962 693970 2033452794 144374355 6216635 408395438 3316897064 " +
+      "2669389056 0 26164719 2813746308\n"+
+      "8     129 sdi1 10078602 657936 2056552626 108362198 6134036 403851153 3279882064 " +
+      "2639256086 0 26260432 2747601085\n";
+
   /**
    * Test parsing /proc/stat and /proc/cpuinfo
    * @throws IOException
@@ -358,4 +398,35 @@ public class TestSysInfoLinux {
     assertEquals(plugin.getNetworkBytesWritten(), numBytesWrittenIntf1 + numBytesWrittenIntf2);
   }
 
+  /**
+   * Test parsing /proc/diskstats
+   * @throws IOException
+   */
+  @Test
+  public void parsingProcDisksFile() throws IOException {
+    long numSectorsReadsda = 1790549L; long numSectorsWrittensda = 1839071L;
+    long numSectorsReadsdc = 20541402L; long numSectorsWrittensdc = 32617658L;
+    long numSectorsReadsde = 19439751L; long numSectorsWrittensde = 31838072L;
+    long numSectorsReadsdf = 20334546L; long numSectorsWrittensdf = 33168970L;
+    File tempFile = new File(FAKE_DISKSFILE);
+    tempFile.deleteOnExit();
+    FileWriter fWriter = new FileWriter(FAKE_DISKSFILE);
+    fWriter.write(String.format(DISKSINFO_FORMAT,
+             numSectorsReadsda, numSectorsWrittensda,
+             numSectorsReadsdc, numSectorsWrittensdc,
+             numSectorsReadsde, numSectorsWrittensde,
+             numSectorsReadsdf, numSectorsWrittensdf));
+
+    fWriter.close();
+    long expectedNumSectorsRead = numSectorsReadsda + numSectorsReadsdc +
+                                  numSectorsReadsde + numSectorsReadsdf;
+    long expectedNumSectorsWritten = numSectorsWrittensda + numSectorsWrittensdc +
+                                     numSectorsWrittensde + numSectorsWrittensdf;
+    // use non-default sector size
+    int diskSectorSize = FakeLinuxResourceCalculatorPlugin.SECTORSIZE;
+    assertEquals(expectedNumSectorsRead * diskSectorSize,
+        plugin.getStorageBytesRead());
+    assertEquals(expectedNumSectorsWritten * diskSectorSize,
+        plugin.getStorageBytesWritten());
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a431ed90/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/DummyResourceCalculatorPlugin.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/DummyResourceCalculatorPlugin.java b/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/DummyResourceCalculatorPlugin.java
index b86303b..4999f14 100644
--- a/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/DummyResourceCalculatorPlugin.java
+++ b/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/DummyResourceCalculatorPlugin.java
@@ -54,6 +54,12 @@ public class DummyResourceCalculatorPlugin extends ResourceCalculatorPlugin {
   /** cumulative number of bytes written over the network */
   public static final String NETWORK_BYTES_WRITTEN =
       "mapred.tasktracker.networkwritten.testing";
+  /** cumulative number of bytes read from disks */
+  public static final String STORAGE_BYTES_READ =
+      "mapred.tasktracker.storageread.testing";
+  /** cumulative number of bytes written to disks */
+  public static final String STORAGE_BYTES_WRITTEN =
+      "mapred.tasktracker.storagewritten.testing";
   /** process cumulative CPU usage time for testing */
   public static final String PROC_CUMULATIVE_CPU_TIME =
       "mapred.tasktracker.proccumulativecputime.testing";
@@ -130,4 +136,15 @@ public class DummyResourceCalculatorPlugin extends ResourceCalculatorPlugin {
     return getConf().getLong(NETWORK_BYTES_WRITTEN, -1);
   }
 
+  /** {@inheritDoc} */
+  @Override
+  public long getStorageBytesRead() {
+    return getConf().getLong(STORAGE_BYTES_READ, -1);
+  }
+
+  /** {@inheritDoc} */
+  @Override
+  public long getStorageBytesWritten() {
+    return getConf().getLong(STORAGE_BYTES_WRITTEN, -1);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a431ed90/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorPlugin.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorPlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorPlugin.java
index 3af4aee..691c4ac 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorPlugin.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorPlugin.java
@@ -145,6 +145,24 @@ public class ResourceCalculatorPlugin extends Configured {
   }
 
   /**
+   * Obtain the aggregated number of bytes read from disks.
+   *
+   * @return total number of bytes read.
+   */
+  public long getStorageBytesRead() {
+    return sys.getStorageBytesRead();
+  }
+
+  /**
+   * Obtain the aggregated number of bytes written to disks.
+   *
+   * @return total number of bytes written.
+   */
+  public long getStorageBytesWritten() {
+    return sys.getStorageBytesWritten();
+  }
+
+  /**
    * Create the ResourceCalculatorPlugin from the class name and configure it. If
    * class name is null, this method will try and return a memory calculator
    * plugin available for this system.


[04/11] hadoop git commit: YARN-3069. Document missing properties in yarn-default.xml. Contributed by Ray Chiang.

Posted by ji...@apache.org.
YARN-3069. Document missing properties in yarn-default.xml. Contributed by Ray Chiang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d6675606
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d6675606
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d6675606

Branch: refs/heads/YARN-1197
Commit: d6675606dc5f141c9af4f76a37128f8de4cfedad
Parents: d7319de
Author: Akira Ajisaka <aa...@apache.org>
Authored: Mon Jul 13 12:57:01 2015 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Mon Jul 13 12:57:01 2015 +0900

----------------------------------------------------------------------
 .../src/site/markdown/DeprecatedProperties.md   |   1 +
 hadoop-yarn-project/CHANGES.txt                 |   3 +
 .../src/main/resources/yarn-default.xml         | 466 ++++++++++++++++++-
 .../yarn/conf/TestYarnConfigurationFields.java  |  44 +-
 4 files changed, 488 insertions(+), 26 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6675606/hadoop-common-project/hadoop-common/src/site/markdown/DeprecatedProperties.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/DeprecatedProperties.md b/hadoop-common-project/hadoop-common/src/site/markdown/DeprecatedProperties.md
index f964735..f8bc95b 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/DeprecatedProperties.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/DeprecatedProperties.md
@@ -276,6 +276,7 @@ The following table lists the configuration property names that are deprecated i
 | user.name | mapreduce.job.user.name |
 | webinterface.private.actions | mapreduce.jobtracker.webinterface.trusted |
 | yarn.app.mapreduce.yarn.app.mapreduce.client-am.ipc.max-retries-on-timeouts | yarn.app.mapreduce.client-am.ipc.max-retries-on-timeouts |
+| yarn.client.app-submission.poll-interval | yarn.client.application-client-protocol.poll-timeout-ms |
 
 The following table lists additional changes to some configuration properties:
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6675606/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 9881771..0415ae4 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -329,6 +329,9 @@ Release 2.8.0 - UNRELEASED
     YARN-3800. Reduce storage footprint for ReservationAllocation. (Anubhav Dhoot
     via curino)
 
+    YARN-3069. Document missing properties in yarn-default.xml.
+    (Ray Chiang via aajisaka)
+
   OPTIMIZATIONS
 
     YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6675606/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 4987084..2edeef0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -24,7 +24,8 @@
 
 <configuration>
 
-  <!-- IPC Configs -->
+  <!-- IPC Configuration -->
+
   <property>
     <description>Factory to create client IPC classes.</description>
     <name>yarn.ipc.client.factory.class</name>
@@ -46,7 +47,8 @@
     <value>org.apache.hadoop.yarn.ipc.HadoopYarnProtoRPC</value>
   </property>
   
-  <!-- Resource Manager Configs -->
+  <!-- Resource Manager Configuration -->
+
   <property>
     <description>The hostname of the RM.</description>
     <name>yarn.resourcemanager.hostname</name>
@@ -143,6 +145,32 @@
   </property>
 
   <property>
+    <description>
+    The Kerberos keytab file to be used for spnego filter for the RM web
+    interface.
+    </description>
+    <name>yarn.resourcemanager.webapp.spnego-keytab-file</name>
+    <value></value>
+  </property>
+
+  <property>
+    <description>
+    The Kerberos principal to be used for spnego filter for the RM web
+    interface.
+    </description>
+    <name>yarn.resourcemanager.webapp.spnego-principal</name>
+    <value></value>
+  </property>
+
+  <property>
+    <description>
+    Add button to kill application in the RM Application view.
+    </description>
+    <name>yarn.resourcemanager.webapp.ui-actions.enabled</name>
+    <value>true</value>
+  </property>
+
+  <property>
     <name>yarn.resourcemanager.resource-tracker.address</name>
     <value>${yarn.resourcemanager.hostname}:8031</value>
   </property>
@@ -280,7 +308,16 @@
   </property>
 
   <property>
-    <description>Enable RM to recover state after starting. If true, then 
+    <description>
+    Used by node labels.  If set to true, the port should be included in the
+    node name.  Only usable if your scheduler supports node labels.
+    </description>
+    <name>yarn.scheduler.include-port-in-node-name</name>
+    <value>false</value>
+  </property>
+
+  <property>
+    <description>Enable RM to recover state after starting. If true, then
       yarn.resourcemanager.store.class must be specified. </description>
     <name>yarn.resourcemanager.recovery.enabled</name>
     <value>false</value>
@@ -673,6 +710,16 @@
   </property>
 
   <property>
+    <description>
+    The value specifies the file system (e.g. HDFS) path where ResourceManager
+    loads configuration if yarn.resourcemanager.configuration.provider-class
+    is set to org.apache.hadoop.yarn.FileSystemBasedConfigurationProvider.
+    </description>
+    <name>yarn.resourcemanager.configuration.file-system-based-store</name>
+    <value>/yarn/conf</value>
+  </property>
+
+  <property>
     <description>The setting that controls whether yarn system metrics is
     published on the timeline server or not by RM.</description>
     <name>yarn.resourcemanager.system-metrics-publisher.enabled</name>
@@ -695,6 +742,119 @@
   </property>
 
   <!-- Node Manager Configs -->
+
+  <property>
+    <description>
+    RM DelegationTokenRenewer thread count
+    </description>
+    <name>yarn.resourcemanager.delegation-token-renewer.thread-count</name>
+    <value>50</value>
+  </property>
+
+  <property>
+    <description>
+    RM secret key update interval in ms
+    </description>
+    <name>yarn.resourcemanager.delegation.key.update-interval</name>
+    <value>86400000</value>
+  </property>
+
+  <property>
+    <description>
+    RM delegation token maximum lifetime in ms
+    </description>
+    <name>yarn.resourcemanager.delegation.token.max-lifetime</name>
+    <value>604800000</value>
+  </property>
+
+  <property>
+    <description>
+    RM delegation token update interval in ms
+    </description>
+    <name>yarn.resourcemanager.delegation.token.renew-interval</name>
+    <value>86400000</value>
+  </property>
+
+  <property>
+    <description>
+    Thread pool size for RMApplicationHistoryWriter.
+    </description>
+    <name>yarn.resourcemanager.history-writer.multi-threaded-dispatcher.pool-size</name>
+    <value>10</value>
+  </property>
+
+  <property>
+    <description>
+    Comma-separated list of values (in minutes) for schedule queue related
+    metrics.
+    </description>
+    <name>yarn.resourcemanager.metrics.runtime.buckets</name>
+    <value>60,300,1440</value>
+  </property>
+
+  <property>
+    <description>
+    Interval for the roll over for the master key used to generate
+    NodeManager tokens.  It is expected to be set to a value much larger
+    than yarn.nm.liveness-monitor.expiry-interval-ms.
+    </description>
+    <name>yarn.resourcemanager.nm-tokens.master-key-rolling-interval-secs</name>
+    <value>86400</value>
+  </property>
+
+  <property>
+    <description>
+    Flag to enable the ResourceManager reservation system.
+    </description>
+    <name>yarn.resourcemanager.reservation-system.enable</name>
+    <value>false</value>
+  </property>
+
+  <property>
+    <description>
+    The Java class to use as the ResourceManager reservation system.
+    By default, is set to
+    org.apache.hadoop.yarn.server.resourcemanager.reservation.CapacityReservationSystem
+    when using CapacityScheduler and is set to
+    org.apache.hadoop.yarn.server.resourcemanager.reservation.FairReservationSystem
+    when using FairScheduler.
+    </description>
+    <name>yarn.resourcemanager.reservation-system.class</name>
+    <value></value>
+  </property>
+
+  <property>
+    <description>
+    The plan follower policy class name to use for the ResourceManager
+    reservation system.
+    By default, is set to
+    org.apache.hadoop.yarn.server.resourcemanager.reservation.CapacitySchedulerPlanFollower
+    is used when using CapacityScheduler, and is set to
+    org.apache.hadoop.yarn.server.resourcemanager.reservation.FairSchedulerPlanFollower
+    when using FairScheduler.
+    </description>
+    <name>yarn.resourcemanager.reservation-system.plan.follower</name>
+    <value></value>
+  </property>
+
+  <property>
+    <description>
+    Step size of the reservation system in ms
+    </description>
+    <name>yarn.resourcemanager.reservation-system.planfollower.time-step</name>
+    <value>1000</value>
+  </property>
+
+  <property>
+    <description>
+    The expiry interval for a container
+    </description>
+    <name>yarn.resourcemanager.rm.container-allocation.expiry-interval-ms</name>
+    <value>600000</value>
+  </property>
+
+  <!-- Node Manager Configuration -->
+
   <property>
     <description>The hostname of the NM.</description>
     <name>yarn.nodemanager.hostname</name>
@@ -727,7 +887,7 @@
   <property>
     <description>Environment variables that containers may override rather than use NodeManager's default.</description>
     <name>yarn.nodemanager.env-whitelist</name>
-    <value>JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,HADOOP_YARN_HOME</value>
+    <value>JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,CLASSPATH_PREPEND_DISTCACHE,HADOOP_YARN_HOME</value>
   </property>
 
   <property>
@@ -840,7 +1000,14 @@
 
   <property>
     <description>
-      Where to store container logs. An application's localized log directory 
+    </description>
+    <name>yarn.nodemanager.container-localizer.java.opts</name>
+    <value>-Xmx256m</value>
+  </property>
+
+  <property>
+    <description>
+      Where to store container logs. An application's localized log directory
       will be found in ${yarn.nodemanager.log-dirs}/application_${appid}.
       Individual containers' log directories will be below this, in directories 
       named container_{$contid}. Each container directory will contain the files
@@ -880,6 +1047,17 @@
   </property>
 
   <property>
+    <description>
+    How long for ResourceManager to wait for NodeManager to report its
+    log aggregation status. If waiting time of which the log aggregation
+    status is reported from NodeManager exceeds the configured value, RM
+    will report log aggregation status for this NodeManager as TIME_OUT
+    </description>
+    <name>yarn.log-aggregation-status.time-out.ms</name>
+    <value>600000</value>
+  </property>
+
+  <property>
     <description>Time in seconds to retain user logs. Only applicable if
     log aggregation is disabled
     </description>
@@ -1006,6 +1184,32 @@
   </property>
 
   <property>
+    <description>
+    The https adddress of the NM web application.
+    </description>
+    <name>yarn.nodemanager.webapp.https.address</name>
+    <value>0.0.0.0:8044</value>
+  </property>
+
+  <property>
+    <description>
+    The Kerberos keytab file to be used for spnego filter for the NM web
+    interface.
+    </description>
+    <name>yarn.nodemanager.webapp.spnego-keytab-file</name>
+    <value></value>
+  </property>
+
+  <property>
+    <description>
+    The Kerberos principal to be used for spnego filter for the NM web
+    interface.
+    </description>
+    <name>yarn.nodemanager.webapp.spnego-principal</name>
+    <value></value>
+  </property>
+
+  <property>
     <description>How often to monitor containers.</description>
     <name>yarn.nodemanager.container-monitor.interval-ms</name>
     <value>3000</value>
@@ -1113,6 +1317,12 @@
   </property>
 
   <property>
+    <description>Delay in ms between attempts to remove linux cgroup</description>
+    <name>yarn.nodemanager.linux-container-executor.cgroups.delete-delay-ms</name>
+    <value>20</value>
+  </property>
+
+  <property>
     <description>This determines which of the two modes that LCE should use on
       a non-secure cluster.  If this value is set to true, then all containers
       will be launched as the user specified in
@@ -1166,6 +1376,23 @@
   </property>
 
   <property>
+    <description>
+    Interval of time the linux container executor should try cleaning up
+    cgroups entry when cleaning up a container.
+    </description>
+    <name>yarn.nodemanager.linux-container-executor.cgroups.delete-timeout-ms</name>
+    <value>1000</value>
+  </property>
+
+  <property>
+    <description>
+    The UNIX group that the linux-container-executor should run as.
+    </description>
+    <name>yarn.nodemanager.linux-container-executor.group</name>
+    <value></value>
+  </property>
+
+  <property>
     <description>T-file compression types used to compress aggregated logs.</description>
     <name>yarn.nodemanager.log-aggregation.compression-type</name>
     <value>none</value>
@@ -1273,6 +1500,66 @@
   <!--Docker configuration-->
 
   <property>
+    <description>
+    Adjustment to the container OS scheduling priority.  In Linux, passed
+    directly to the nice command.
+    </description>
+    <name>yarn.nodemanager.container-executor.os.sched.priority.adjustment</name>
+    <value>0</value>
+  </property>
+
+  <property>
+    <description>
+    Flag to enable container metrics
+    </description>
+    <name>yarn.nodemanager.container-metrics.enable</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <description>
+    Container metrics flush period in ms.  Set to -1 for flush on completion.
+    </description>
+    <name>yarn.nodemanager.container-metrics.period-ms</name>
+    <value>-1</value>
+  </property>
+
+  <property>
+    <description>
+    Class used to calculate current container resource utilization.
+    </description>
+    <name>yarn.nodemanager.container-monitor.process-tree.class</name>
+    <value></value>
+  </property>
+
+  <property>
+    <description>
+    Flag to enable NodeManager disk health checker
+    </description>
+    <name>yarn.nodemanager.disk-health-checker.enable</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <description>
+    Number of threads to use in NM log cleanup.  Used when log aggregation
+    is disabled.
+    </description>
+    <name>yarn.nodemanager.log.deletion-threads-count</name>
+    <value>4</value>
+  </property>
+
+  <property>
+    <description>
+    The Windows group that the windows-container-executor should run as.
+    </description>
+    <name>yarn.nodemanager.windows-secure-container-executor.group</name>
+    <value></value>
+  </property>
+
+  <!-- Docker Configuration -->
+
+  <property>
     <name>yarn.nodemanager.docker-container-executor.exec-name</name>
     <value>/usr/bin/docker</value>
     <description>
@@ -1280,24 +1567,28 @@
     </description>
   </property>
 
-  <!--Map Reduce configuration-->
   <property>
-    <name>yarn.nodemanager.aux-services.mapreduce_shuffle.class</name>
-    <value>org.apache.hadoop.mapred.ShuffleHandler</value>
+    <description>
+    The Docker image name to use for DockerContainerExecutor
+    </description>
+    <name>yarn.nodemanager.docker-container-executor.image-name</name>
+    <value></value>
   </property>
 
-  <property>
-    <name>mapreduce.job.jar</name>
-    <value/>
-  </property>
+  <!-- Map Reduce Configuration -->
 
   <property>
     <name>mapreduce.job.hdfs-servers</name>
     <value>${fs.defaultFS}</value>
   </property>
 
-  <!-- WebAppProxy Configuration-->
-  
+  <property>
+    <name>yarn.nodemanager.aux-services.mapreduce_shuffle.class</name>
+    <value>org.apache.hadoop.mapred.ShuffleHandler</value>
+  </property>
+
+  <!-- WebAppProxy Configuration -->
+
   <property>
     <description>The kerberos principal for the proxy, if the proxy is not
     running as part of the RM.</description>
@@ -1318,8 +1609,8 @@
      <value/>
   </property>
 
-  <!-- Applications' Configuration-->
-  
+  <!-- Applications' Configuration -->
+
   <property>
     <description>
       CLASSPATH for YARN applications. A comma-separated list
@@ -1346,7 +1637,7 @@
     <value></value>
   </property>
 
-  <!-- Timeline Service's Configuration-->
+  <!-- Timeline Service Configuration -->
 
   <property>
     <description>Indicate to clients whether timeline service is enabled or not.
@@ -1530,6 +1821,7 @@
   </property>
 
   <!--  Shared Cache Configuration -->
+
   <property>
     <description>Whether the shared cache is enabled</description>
     <name>yarn.sharedcache.enabled</name>
@@ -1671,7 +1963,99 @@
     <value>20</value>
   </property>
 
-  <!-- Other configuration -->
+  <property>
+    <description>
+    ACL protocol for use in the Timeline server.
+    </description>
+    <name>security.applicationhistory.protocol.acl</name>
+    <value></value>
+  </property>
+
+  <!-- Minicluster Configuration (for testing only!) -->
+
+  <property>
+    <description>
+    Set to true for MiniYARNCluster unit tests
+    </description>
+    <name>yarn.is.minicluster</name>
+    <value>false</value>
+  </property>
+
+  <property>
+    <description>
+    Set for MiniYARNCluster unit tests to control resource monitoring
+    </description>
+    <name>yarn.minicluster.control-resource-monitoring</name>
+    <value>false</value>
+  </property>
+
+  <property>
+    <description>
+    Set to false in order to allow MiniYARNCluster to run tests without
+    port conflicts.
+    </description>
+    <name>yarn.minicluster.fixed.ports</name>
+    <value>false</value>
+  </property>
+
+  <property>
+    <description>
+    Set to false in order to allow the NodeManager in MiniYARNCluster to
+    use RPC to talk to the RM.
+    </description>
+    <name>yarn.minicluster.use-rpc</name>
+    <value>false</value>
+  </property>
+
+  <property>
+    <description>
+    As yarn.nodemanager.resource.memory-mb property but for the NodeManager
+    in a MiniYARNCluster.
+    </description>
+    <name>yarn.minicluster.yarn.nodemanager.resource.memory-mb</name>
+    <value>4096</value>
+  </property>
+
+  <!-- Node Labels Configuration -->
+
+  <property>
+    <description>
+    Enable node labels feature
+    </description>
+    <name>yarn.node-labels.enabled</name>
+    <value>false</value>
+  </property>
+
+  <property>
+    <description>
+    Retry policy used for FileSystem node label store. The policy is
+    specified by N pairs of sleep-time in milliseconds and number-of-retries
+    "s1,n1,s2,n2,...".
+    </description>
+    <name>yarn.node-labels.fs-store.retry-policy-spec</name>
+    <value>2000, 500</value>
+  </property>
+
+  <property>
+    <description>
+    URI for NodeLabelManager.  The default value is
+    /tmp/hadoop-yarn-${user}/node-labels/ in the local filesystem.
+    </description>
+    <name>yarn.node-labels.fs-store.root-dir</name>
+    <value></value>
+  </property>
+
+  <property>
+    <description>
+    Set configuration type for node labels. Administrators can specify
+    "centralized" or "distributed".
+    </description>
+    <name>yarn.node-labels.configuration-type</name>
+    <value>centralized</value>
+  </property>
+
+  <!-- Other Configuration -->
+
   <property>
     <description>The interval that the yarn client library uses to poll the
     completion status of the asynchronous API of application client protocol.
@@ -1681,7 +2065,16 @@
   </property>
 
   <property>
-    <description>RSS usage of a process computed via 
+    <description>
+    The duration (in ms) the YARN client waits for an expected state change
+    to occur.  -1 means unlimited wait time.
+    </description>
+    <name>yarn.client.application-client-protocol.poll-timeout-ms</name>
+    <value>-1</value>
+  </property>
+
+  <property>
+    <description>RSS usage of a process computed via
     /proc/pid/stat is not very accurate as it includes shared pages of a
     process. /proc/pid/smaps provides useful information like
     Private_Dirty, Private_Clean, Shared_Dirty, Shared_Clean which can be used
@@ -1694,6 +2087,30 @@
   </property>
 
   <property>
+    <description>
+    URL for log aggregation server
+    </description>
+    <name>yarn.log.server.url</name>
+    <value></value>
+  </property>
+
+  <property>
+    <description>
+    RM Application Tracking URL
+    </description>
+    <name>yarn.tracking.url.generator</name>
+    <value></value>
+  </property>
+
+  <property>
+    <description>
+    Class to be used for YarnAuthorizationProvider
+    </description>
+    <name>yarn.authorization-provider</name>
+    <value></value>
+  </property>
+
+  <property>
     <description>Defines how often NMs wake up to upload log files.
     The default value is -1. By default, the logs will be uploaded when
     the application is finished. By setting this configure, logs can be uploaded
@@ -1703,4 +2120,15 @@
     <name>yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds</name>
     <value>-1</value>
   </property>
+
+  <property>
+    <description>
+    Enable/disable intermediate-data encryption at YARN level. For now,
+    this only is used by the FileSystemRMStateStore to setup right
+    file-system security attributes.
+    </description>
+    <name>yarn.intermediate-data-encryption.enable</name>
+    <value>false</value>
+  </property>
+
 </configuration>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6675606/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
index 9075d9f..10e1ca6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
@@ -33,18 +33,19 @@ import org.apache.hadoop.conf.TestConfigurationFieldsBase;
  */
 public class TestYarnConfigurationFields extends TestConfigurationFieldsBase {
 
+  @SuppressWarnings("deprecation")
   @Override
   public void initializeMemberVariables() {
     xmlFilename = new String("yarn-default.xml");
     configurationClasses = new Class[] { YarnConfiguration.class };
-        
 
     // Allocate for usage
     configurationPropsToSkipCompare = new HashSet<String>();
+    configurationPrefixToSkipCompare = new HashSet<String>();
 
     // Set error modes
     errorIfMissingConfigProps = true;
-    errorIfMissingXmlProps = false;
+    errorIfMissingXmlProps = true;
 
     // Specific properties to skip
     configurationPropsToSkipCompare
@@ -79,6 +80,37 @@ public class TestYarnConfigurationFields extends TestConfigurationFieldsBase {
     configurationPropsToSkipCompare
         .add(YarnConfiguration
 	    .YARN_SECURITY_SERVICE_AUTHORIZATION_RESOURCETRACKER_PROTOCOL);
+    configurationPropsToSkipCompare
+        .add(YarnConfiguration.DEFAULT_SCM_STORE_CLASS);
+    configurationPropsToSkipCompare
+        .add(YarnConfiguration.DEFAULT_SCM_APP_CHECKER_CLASS);
+    configurationPropsToSkipCompare
+        .add(YarnConfiguration.DEFAULT_SHARED_CACHE_CHECKSUM_ALGO_IMPL);
+
+    // Ignore all YARN Application Timeline Service (version 1) properties
+    configurationPrefixToSkipCompare.add("yarn.timeline-service.");
+
+    // Used as Java command line properties, not XML
+    configurationPrefixToSkipCompare.add("yarn.app.container");
+
+    // Ignore NodeManager "work in progress" variables
+    configurationPrefixToSkipCompare
+        .add(YarnConfiguration.NM_NETWORK_RESOURCE_ENABLED);
+    configurationPrefixToSkipCompare
+        .add(YarnConfiguration.NM_NETWORK_RESOURCE_INTERFACE);
+    configurationPrefixToSkipCompare
+        .add(YarnConfiguration.NM_NETWORK_RESOURCE_OUTBOUND_BANDWIDTH_MBIT);
+    configurationPrefixToSkipCompare
+        .add(YarnConfiguration.NM_NETWORK_RESOURCE_OUTBOUND_BANDWIDTH_YARN_MBIT);
+    configurationPrefixToSkipCompare
+        .add(YarnConfiguration.NM_DISK_RESOURCE_ENABLED);
+
+    // Set by container-executor.cfg
+    configurationPrefixToSkipCompare.add(YarnConfiguration.NM_USER_HOME_DIR);
+
+    // Ignore deprecated properties
+    configurationPrefixToSkipCompare
+        .add(YarnConfiguration.YARN_CLIENT_APP_SUBMISSION_POLL_INTERVAL_MS);
 
     // Allocate for usage
     xmlPropsToSkipCompare = new HashSet<String>();
@@ -94,13 +126,11 @@ public class TestYarnConfigurationFields extends TestConfigurationFieldsBase {
 
     // Used in the XML file as a variable reference internal to the XML file
     xmlPropsToSkipCompare.add("yarn.nodemanager.hostname");
-    xmlPropsToSkipCompare.add("yarn.timeline-service.hostname");
 
-    // Currently defined in TimelineAuthenticationFilterInitializer
-    xmlPrefixToSkipCompare.add("yarn.timeline-service.http-authentication");
+    // Ignore all YARN Application Timeline Service (version 1) properties
+    xmlPrefixToSkipCompare.add("yarn.timeline-service");
 
-    // Currently defined in RegistryConstants
+    // Currently defined in RegistryConstants/core-site.xml
     xmlPrefixToSkipCompare.add("hadoop.registry");
   }
-
 }


[03/11] hadoop git commit: YARN-3917. getResourceCalculatorPlugin for the default should intercept all exceptions. (gera)

Posted by ji...@apache.org.
YARN-3917. getResourceCalculatorPlugin for the default should intercept all exceptions. (gera)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d7319dee
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d7319dee
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d7319dee

Branch: refs/heads/YARN-1197
Commit: d7319dee37ea93f1a1ba4153ea63ea8010ba2441
Parents: 1df39c1
Author: Gera Shegalov <ge...@apache.org>
Authored: Fri Jul 10 17:43:53 2015 -0700
Committer: Gera Shegalov <ge...@apache.org>
Committed: Sat Jul 11 22:29:36 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                             | 3 +++
 .../apache/hadoop/yarn/util/ResourceCalculatorPlugin.java   | 9 +++++++--
 2 files changed, 10 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d7319dee/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 1365747..9881771 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -616,6 +616,9 @@ Release 2.8.0 - UNRELEASED
     YARN-3849. Too much of preemption activity causing continuos killing of
     containers across queues. (Sunil G via wangda)
 
+    YARN-3917. getResourceCalculatorPlugin for the default should intercept all
+    exceptions. (gera)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d7319dee/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorPlugin.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorPlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorPlugin.java
index 21724a9..3af4aee 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorPlugin.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorPlugin.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.yarn.util;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -30,6 +32,8 @@ import org.apache.hadoop.util.SysInfo;
 @InterfaceAudience.LimitedPrivate({"YARN", "MAPREDUCE"})
 @InterfaceStability.Unstable
 public class ResourceCalculatorPlugin extends Configured {
+  private static final Log LOG =
+      LogFactory.getLog(ResourceCalculatorPlugin.class);
 
   private final SysInfo sys;
 
@@ -158,9 +162,10 @@ public class ResourceCalculatorPlugin extends Configured {
     }
     try {
       return new ResourceCalculatorPlugin();
-    } catch (SecurityException e) {
-      return null;
+    } catch (Throwable t) {
+      LOG.warn(t + ": Failed to instantiate default resource calculator.", t);
     }
+    return null;
   }
 
 }


[06/11] hadoop git commit: MAPREDUCE-6421. Fix findbugs warning in RMContainerAllocator.reduceNodeLabelExpression. Contributed by Brahma Reddy Battula.

Posted by ji...@apache.org.
MAPREDUCE-6421. Fix findbugs warning in RMContainerAllocator.reduceNodeLabelExpression. Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e04faf8e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e04faf8e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e04faf8e

Branch: refs/heads/YARN-1197
Commit: e04faf8e462a24a3ddfd8f22050e74eda4aa19ff
Parents: 5ed1fea
Author: Akira Ajisaka <aa...@apache.org>
Authored: Mon Jul 13 14:28:14 2015 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Mon Jul 13 14:29:14 2015 +0900

----------------------------------------------------------------------
 hadoop-mapreduce-project/CHANGES.txt                      | 4 ++++
 hadoop-mapreduce-project/dev-support/findbugs-exclude.xml | 1 +
 2 files changed, 5 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e04faf8e/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 81b202b..95eec1c 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -533,6 +533,10 @@ Release 2.8.0 - UNRELEASED
     MAPREDUCE-6038. A boolean may be set error in the Word Count v2.0 in
     MapReduce Tutorial. (Tsuyoshi Ozawa via cdouglas)
 
+    MAPREDUCE-6421. Fix findbugs warning in
+    RMContainerAllocator.reduceNodeLabelExpression.
+    (Brahma Reddy Battula via aajisaka)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e04faf8e/hadoop-mapreduce-project/dev-support/findbugs-exclude.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/dev-support/findbugs-exclude.xml b/hadoop-mapreduce-project/dev-support/findbugs-exclude.xml
index dd4892b..f1ef2b8 100644
--- a/hadoop-mapreduce-project/dev-support/findbugs-exclude.xml
+++ b/hadoop-mapreduce-project/dev-support/findbugs-exclude.xml
@@ -479,6 +479,7 @@
       <Field name="reduceResourceRequest" />
       <Field name="maxReduceRampupLimit" />
       <Field name="reduceSlowStart" />
+      <Field name="reduceNodeLabelExpression" />
      </Or>
      <Bug pattern="IS2_INCONSISTENT_SYNC" />
    </Match>


[07/11] hadoop git commit: YARN-3381. Fix typo InvalidStateTransitonException. Contributed by Brahma Reddy Battula.

Posted by ji...@apache.org.
YARN-3381. Fix typo InvalidStateTransitonException. Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/19295b36
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/19295b36
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/19295b36

Branch: refs/heads/YARN-1197
Commit: 19295b36d90e26616accee73b1f7743aab5df692
Parents: e04faf8
Author: Akira Ajisaka <aa...@apache.org>
Authored: Mon Jul 13 17:52:13 2015 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Mon Jul 13 17:52:13 2015 +0900

----------------------------------------------------------------------
 .../mapreduce/v2/app/job/impl/JobImpl.java      |  4 +-
 .../v2/app/job/impl/TaskAttemptImpl.java        |  4 +-
 .../mapreduce/v2/app/job/impl/TaskImpl.java     |  4 +-
 hadoop-yarn-project/CHANGES.txt                 |  3 ++
 .../api/async/impl/NMClientAsyncImpl.java       |  4 +-
 .../state/InvalidStateTransitionException.java  | 51 ++++++++++++++++++++
 .../state/InvalidStateTransitonException.java   | 21 +++-----
 .../apache/hadoop/yarn/state/StateMachine.java  |  2 +-
 .../hadoop/yarn/state/StateMachineFactory.java  | 10 ++--
 .../application/ApplicationImpl.java            |  4 +-
 .../container/ContainerImpl.java                |  4 +-
 .../localizer/LocalizedResource.java            |  4 +-
 .../resourcemanager/recovery/RMStateStore.java  |  4 +-
 .../server/resourcemanager/rmapp/RMAppImpl.java |  4 +-
 .../rmapp/attempt/RMAppAttemptImpl.java         |  4 +-
 .../rmcontainer/RMContainerImpl.java            |  4 +-
 .../resourcemanager/rmnode/RMNodeImpl.java      |  4 +-
 17 files changed, 90 insertions(+), 45 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/19295b36/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
index 2c48019..731bcba 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
@@ -122,7 +122,7 @@ import org.apache.hadoop.yarn.api.records.NodeReport;
 import org.apache.hadoop.yarn.api.records.NodeState;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
-import org.apache.hadoop.yarn.state.InvalidStateTransitonException;
+import org.apache.hadoop.yarn.state.InvalidStateTransitionException;
 import org.apache.hadoop.yarn.state.MultipleArcTransition;
 import org.apache.hadoop.yarn.state.SingleArcTransition;
 import org.apache.hadoop.yarn.state.StateMachine;
@@ -994,7 +994,7 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
       JobStateInternal oldState = getInternalState();
       try {
          getStateMachine().doTransition(event.getType(), event);
-      } catch (InvalidStateTransitonException e) {
+      } catch (InvalidStateTransitionException e) {
         LOG.error("Can't handle this event at current state", e);
         addDiagnostic("Invalid event " + event.getType() + 
             " on Job " + this.jobId);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19295b36/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
index 3fa42fe..77a7555 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
@@ -128,7 +128,7 @@ import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
-import org.apache.hadoop.yarn.state.InvalidStateTransitonException;
+import org.apache.hadoop.yarn.state.InvalidStateTransitionException;
 import org.apache.hadoop.yarn.state.MultipleArcTransition;
 import org.apache.hadoop.yarn.state.SingleArcTransition;
 import org.apache.hadoop.yarn.state.StateMachine;
@@ -1188,7 +1188,7 @@ public abstract class TaskAttemptImpl implements
       final TaskAttemptStateInternal oldState = getInternalState()  ;
       try {
         stateMachine.doTransition(event.getType(), event);
-      } catch (InvalidStateTransitonException e) {
+      } catch (InvalidStateTransitionException e) {
         LOG.error("Can't handle this event at current state for "
             + this.attemptId, e);
         eventHandler.handle(new JobDiagnosticsUpdateEvent(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19295b36/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java
index ca81059..5f5f300 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java
@@ -85,7 +85,7 @@ import org.apache.hadoop.util.StringInterner;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
-import org.apache.hadoop.yarn.state.InvalidStateTransitonException;
+import org.apache.hadoop.yarn.state.InvalidStateTransitionException;
 import org.apache.hadoop.yarn.state.MultipleArcTransition;
 import org.apache.hadoop.yarn.state.SingleArcTransition;
 import org.apache.hadoop.yarn.state.StateMachine;
@@ -646,7 +646,7 @@ public abstract class TaskImpl implements Task, EventHandler<TaskEvent> {
       TaskStateInternal oldState = getInternalState();
       try {
         stateMachine.doTransition(event.getType(), event);
-      } catch (InvalidStateTransitonException e) {
+      } catch (InvalidStateTransitionException e) {
         LOG.error("Can't handle this event at current state for "
             + this.taskId, e);
         internalError(event.getType());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19295b36/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index f23effa..98f0e8d 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -625,6 +625,9 @@ Release 2.8.0 - UNRELEASED
     YARN-3894. RM startup should fail for wrong CS xml NodeLabel capacity
     configuration. (Bibin A Chundatt via wangda)
 
+    YARN-3381. Fix typo InvalidStateTransitonException.
+    (Brahma Reddy Battula via aajisaka)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19295b36/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/NMClientAsyncImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/NMClientAsyncImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/NMClientAsyncImpl.java
index 700a509..39682df 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/NMClientAsyncImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/NMClientAsyncImpl.java
@@ -55,7 +55,7 @@ import org.apache.hadoop.yarn.event.AbstractEvent;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.ipc.RPCUtil;
-import org.apache.hadoop.yarn.state.InvalidStateTransitonException;
+import org.apache.hadoop.yarn.state.InvalidStateTransitionException;
 import org.apache.hadoop.yarn.state.MultipleArcTransition;
 import org.apache.hadoop.yarn.state.SingleArcTransition;
 import org.apache.hadoop.yarn.state.StateMachine;
@@ -496,7 +496,7 @@ public class NMClientAsyncImpl extends NMClientAsync {
       try {
         try {
           this.stateMachine.doTransition(event.getType(), event);
-        } catch (InvalidStateTransitonException e) {
+        } catch (InvalidStateTransitionException e) {
           LOG.error("Can't handle this event at current state", e);
         }
       } finally {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19295b36/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/InvalidStateTransitionException.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/InvalidStateTransitionException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/InvalidStateTransitionException.java
new file mode 100644
index 0000000..d10902a
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/InvalidStateTransitionException.java
@@ -0,0 +1,51 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.state;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Evolving;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+
+/**
+ * The exception that happens when you call invalid state transition.
+ *
+ */
+@Public
+@Evolving
+public class InvalidStateTransitionException extends YarnRuntimeException {
+
+  private static final long serialVersionUID = -6188669113571351684L;
+  private Enum<?> currentState;
+  private Enum<?> event;
+
+  public InvalidStateTransitionException(Enum<?> currentState, Enum<?> event) {
+    super("Invalid event: " + event + " at " + currentState);
+    this.currentState = currentState;
+    this.event = event;
+  }
+
+  public Enum<?> getCurrentState() {
+    return currentState;
+  }
+
+  public Enum<?> getEvent() {
+    return event;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19295b36/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/InvalidStateTransitonException.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/InvalidStateTransitonException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/InvalidStateTransitonException.java
index a9a8b11..eeb1b97 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/InvalidStateTransitonException.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/InvalidStateTransitonException.java
@@ -20,29 +20,20 @@ package org.apache.hadoop.yarn.state;
 
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
-import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+
+/** @deprecated Use {@link InvalidStateTransitionException} instead. */
 
 @Public
 @Evolving
-public class InvalidStateTransitonException extends YarnRuntimeException {
+@Deprecated
+public class InvalidStateTransitonException extends
+    InvalidStateTransitionException {
 
   private static final long serialVersionUID = 8610511635996283691L;
 
-  private Enum<?> currentState;
-  private Enum<?> event;
-
   public InvalidStateTransitonException(Enum<?> currentState, Enum<?> event) {
-    super("Invalid event: " + event + " at " + currentState);
-    this.currentState = currentState;
-    this.event = event;
+    super(currentState, event);
   }
 
-  public Enum<?> getCurrentState() {
-    return currentState;
-  }
-  
-  public Enum<?> getEvent() {
-    return event;
-  }
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19295b36/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/StateMachine.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/StateMachine.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/StateMachine.java
index b5ce6a3..5151559 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/StateMachine.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/StateMachine.java
@@ -28,5 +28,5 @@ public interface StateMachine
                   EVENTTYPE extends Enum<EVENTTYPE>, EVENT> {
   public STATE getCurrentState();
   public STATE doTransition(EVENTTYPE eventType, EVENT event)
-        throws InvalidStateTransitonException;
+        throws InvalidStateTransitionException;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19295b36/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/StateMachineFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/StateMachineFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/StateMachineFactory.java
index 55ac4cf..5b76ce8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/StateMachineFactory.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/StateMachineFactory.java
@@ -289,7 +289,7 @@ final public class StateMachineFactory
    */
   private STATE doTransition
            (OPERAND operand, STATE oldState, EVENTTYPE eventType, EVENT event)
-      throws InvalidStateTransitonException {
+      throws InvalidStateTransitionException {
     // We can assume that stateMachineTable is non-null because we call
     //  maybeMakeStateMachineTable() when we build an InnerStateMachine ,
     //  and this code only gets called from inside a working InnerStateMachine .
@@ -302,7 +302,7 @@ final public class StateMachineFactory
         return transition.doTransition(operand, oldState, event, eventType);
       }
     }
-    throw new InvalidStateTransitonException(oldState, eventType);
+    throw new InvalidStateTransitionException(oldState, eventType);
   }
 
   private synchronized void maybeMakeStateMachineTable() {
@@ -381,11 +381,11 @@ final public class StateMachineFactory
     @Override
     public STATE doTransition(OPERAND operand, STATE oldState,
                               EVENT event, EVENTTYPE eventType)
-        throws InvalidStateTransitonException {
+        throws InvalidStateTransitionException {
       STATE postState = hook.transition(operand, event);
 
       if (!validPostStates.contains(postState)) {
-        throw new InvalidStateTransitonException(oldState, eventType);
+        throw new InvalidStateTransitionException(oldState, eventType);
       }
       return postState;
     }
@@ -444,7 +444,7 @@ final public class StateMachineFactory
 
     @Override
     public synchronized STATE doTransition(EVENTTYPE eventType, EVENT event)
-         throws InvalidStateTransitonException  {
+         throws InvalidStateTransitionException  {
       currentState = StateMachineFactory.this.doTransition
           (operand, currentState, eventType, event);
       return currentState;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19295b36/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java
index a73b113..e880c31 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java
@@ -49,7 +49,7 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerAppFinishedEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerAppStartedEvent;
 import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
-import org.apache.hadoop.yarn.state.InvalidStateTransitonException;
+import org.apache.hadoop.yarn.state.InvalidStateTransitionException;
 import org.apache.hadoop.yarn.state.MultipleArcTransition;
 import org.apache.hadoop.yarn.state.SingleArcTransition;
 import org.apache.hadoop.yarn.state.StateMachine;
@@ -457,7 +457,7 @@ public class ApplicationImpl implements Application {
       try {
         // queue event requesting init of the same app
         newState = stateMachine.doTransition(event.getType(), event);
-      } catch (InvalidStateTransitonException e) {
+      } catch (InvalidStateTransitionException e) {
         LOG.warn("Can't handle this event at current state", e);
       }
       if (oldState != newState) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19295b36/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
index 289d6d0..3c76596 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
@@ -71,7 +71,7 @@ import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics;
 import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService;
 import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredContainerStatus;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
-import org.apache.hadoop.yarn.state.InvalidStateTransitonException;
+import org.apache.hadoop.yarn.state.InvalidStateTransitionException;
 import org.apache.hadoop.yarn.state.MultipleArcTransition;
 import org.apache.hadoop.yarn.state.SingleArcTransition;
 import org.apache.hadoop.yarn.state.StateMachine;
@@ -1121,7 +1121,7 @@ public class ContainerImpl implements Container {
       try {
         newState =
             stateMachine.doTransition(event.getType(), event);
-      } catch (InvalidStateTransitonException e) {
+      } catch (InvalidStateTransitionException e) {
         LOG.warn("Can't handle this event at current state: Current: ["
             + oldState + "], eventType: [" + event.getType() + "]", e);
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19295b36/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalizedResource.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalizedResource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalizedResource.java
index e2d0fe1..04d95f1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalizedResource.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalizedResource.java
@@ -41,7 +41,7 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.even
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ResourceRecoveredEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ResourceReleaseEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ResourceRequestEvent;
-import org.apache.hadoop.yarn.state.InvalidStateTransitonException;
+import org.apache.hadoop.yarn.state.InvalidStateTransitionException;
 import org.apache.hadoop.yarn.state.SingleArcTransition;
 import org.apache.hadoop.yarn.state.StateMachine;
 import org.apache.hadoop.yarn.state.StateMachineFactory;
@@ -196,7 +196,7 @@ public class LocalizedResource implements EventHandler<ResourceEvent> {
       ResourceState newState = null;
       try {
         newState = this.stateMachine.doTransition(event.getType(), event);
-      } catch (InvalidStateTransitonException e) {
+      } catch (InvalidStateTransitionException e) {
         LOG.warn("Can't handle this event at current state", e);
       }
       if (oldState != newState) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19295b36/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
index cc4edd7..46c2954 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
@@ -62,7 +62,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.AggregateAppR
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEventType;
-import org.apache.hadoop.yarn.state.InvalidStateTransitonException;
+import org.apache.hadoop.yarn.state.InvalidStateTransitionException;
 import org.apache.hadoop.yarn.state.SingleArcTransition;
 import org.apache.hadoop.yarn.state.StateMachine;
 import org.apache.hadoop.yarn.state.StateMachineFactory;
@@ -841,7 +841,7 @@ public abstract class RMStateStore extends AbstractService {
             + getRMStateStoreState());
       }
 
-    } catch (InvalidStateTransitonException e) {
+    } catch (InvalidStateTransitionException e) {
       LOG.error("Can't handle this event at current state", e);
     } finally {
       this.writeLock.unlock();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19295b36/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
index 90e63c1..62d5555 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
@@ -93,7 +93,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSch
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppRemovedSchedulerEvent;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.apache.hadoop.yarn.server.webproxy.ProxyUriUtils;
-import org.apache.hadoop.yarn.state.InvalidStateTransitonException;
+import org.apache.hadoop.yarn.state.InvalidStateTransitionException;
 import org.apache.hadoop.yarn.state.MultipleArcTransition;
 import org.apache.hadoop.yarn.state.SingleArcTransition;
 import org.apache.hadoop.yarn.state.StateMachine;
@@ -759,7 +759,7 @@ public class RMAppImpl implements RMApp, Recoverable {
       try {
         /* keep the master in sync with the state machine */
         this.stateMachine.doTransition(event.getType(), event);
-      } catch (InvalidStateTransitonException e) {
+      } catch (InvalidStateTransitionException e) {
         LOG.error("Can't handle this event at current state", e);
         /* TODO fail the application on the failed transition */
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19295b36/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
index 5171bba..0914022 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
@@ -96,7 +96,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptA
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptRemovedSchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.security.ClientToAMTokenSecretManagerInRM;
 import org.apache.hadoop.yarn.server.webproxy.ProxyUriUtils;
-import org.apache.hadoop.yarn.state.InvalidStateTransitonException;
+import org.apache.hadoop.yarn.state.InvalidStateTransitionException;
 import org.apache.hadoop.yarn.state.MultipleArcTransition;
 import org.apache.hadoop.yarn.state.SingleArcTransition;
 import org.apache.hadoop.yarn.state.StateMachine;
@@ -784,7 +784,7 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable {
       try {
         /* keep the master in sync with the state machine */
         this.stateMachine.doTransition(event.getType(), event);
-      } catch (InvalidStateTransitonException e) {
+      } catch (InvalidStateTransitionException e) {
         LOG.error("Can't handle this event at current state", e);
         /* TODO fail the application on the failed transition */
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19295b36/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
index 316a450..0ad63b4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
@@ -47,7 +47,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptContainerAllocatedEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptContainerFinishedEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeCleanContainerEvent;
-import org.apache.hadoop.yarn.state.InvalidStateTransitonException;
+import org.apache.hadoop.yarn.state.InvalidStateTransitionException;
 import org.apache.hadoop.yarn.state.MultipleArcTransition;
 import org.apache.hadoop.yarn.state.SingleArcTransition;
 import org.apache.hadoop.yarn.state.StateMachine;
@@ -385,7 +385,7 @@ public class RMContainerImpl implements RMContainer, Comparable<RMContainer> {
       RMContainerState oldState = getState();
       try {
          stateMachine.doTransition(event.getType(), event);
-      } catch (InvalidStateTransitonException e) {
+      } catch (InvalidStateTransitionException e) {
         LOG.error("Can't handle this event at current state", e);
         LOG.error("Invalid event " + event.getType() + 
             " on container " + this.containerId);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19295b36/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
index 9bc91c7..09b9278 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
@@ -64,7 +64,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeRemoved
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeResourceUpdateSchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils.ContainerIdComparator;
-import org.apache.hadoop.yarn.state.InvalidStateTransitonException;
+import org.apache.hadoop.yarn.state.InvalidStateTransitionException;
 import org.apache.hadoop.yarn.state.MultipleArcTransition;
 import org.apache.hadoop.yarn.state.SingleArcTransition;
 import org.apache.hadoop.yarn.state.StateMachine;
@@ -446,7 +446,7 @@ public class RMNodeImpl implements RMNode, EventHandler<RMNodeEvent> {
       NodeState oldState = getState();
       try {
          stateMachine.doTransition(event.getType(), event);
-      } catch (InvalidStateTransitonException e) {
+      } catch (InvalidStateTransitionException e) {
         LOG.error("Can't handle this event at current state", e);
         LOG.error("Invalid event " + event.getType() + 
             " on Node  " + this.nodeId);


[05/11] hadoop git commit: YARN-3894. RM startup should fail for wrong CS xml NodeLabel capacity configuration. (Bibin A Chundatt via wangda)

Posted by ji...@apache.org.
YARN-3894. RM startup should fail for wrong CS xml NodeLabel capacity configuration. (Bibin A Chundatt via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5ed1fead
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5ed1fead
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5ed1fead

Branch: refs/heads/YARN-1197
Commit: 5ed1fead6b5ec24bb0ce1a3db033c2ee1ede4bb4
Parents: d667560
Author: Wangda Tan <wa...@apache.org>
Authored: Sun Jul 12 21:51:34 2015 -0700
Committer: Wangda Tan <wa...@apache.org>
Committed: Sun Jul 12 21:52:11 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |  3 ++
 .../scheduler/capacity/ParentQueue.java         |  2 +-
 .../scheduler/capacity/TestQueueParsing.java    | 38 ++++++++++++++++++++
 3 files changed, 42 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ed1fead/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 0415ae4..f23effa 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -622,6 +622,9 @@ Release 2.8.0 - UNRELEASED
     YARN-3917. getResourceCalculatorPlugin for the default should intercept all
     exceptions. (gera)
 
+    YARN-3894. RM startup should fail for wrong CS xml NodeLabel capacity
+    configuration. (Bibin A Chundatt via wangda)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ed1fead/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
index 97a5d27..5807dd1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
@@ -147,7 +147,7 @@ public class ParentQueue extends AbstractCSQueue {
       		" for children of queue " + queueName);
     }
     // check label capacities
-    for (String nodeLabel : labelManager.getClusterNodeLabelNames()) {
+    for (String nodeLabel : queueCapacities.getExistingNodeLabels()) {
       float capacityByLabel = queueCapacities.getCapacity(nodeLabel);
       // check children's labels
       float sum = 0;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ed1fead/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueParsing.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueParsing.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueParsing.java
index 198bd4a..92baa85 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueParsing.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueParsing.java
@@ -22,12 +22,15 @@ import java.io.IOException;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.service.ServiceOperations;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager;
+import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.NullRMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.security.ClientToAMTokenSecretManagerInRM;
 import org.apache.hadoop.yarn.server.resourcemanager.security.NMTokenSecretManagerInRM;
 import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager;
@@ -898,4 +901,39 @@ public class TestQueueParsing {
     
     capacityScheduler.reinitialize(csConf, rmContext);
   }
+
+  @Test(timeout = 60000, expected = java.lang.IllegalArgumentException.class)
+  public void testRMStartWrongNodeCapacity() throws Exception {
+    YarnConfiguration config = new YarnConfiguration();
+    nodeLabelManager = new NullRMNodeLabelsManager();
+    nodeLabelManager.init(config);
+    config.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
+        ResourceScheduler.class);
+    CapacitySchedulerConfiguration conf =
+        new CapacitySchedulerConfiguration(config);
+
+    // Define top-level queues
+    conf.setQueues(CapacitySchedulerConfiguration.ROOT, new String[] { "a" });
+    conf.setCapacityByLabel(CapacitySchedulerConfiguration.ROOT, "x", 100);
+    conf.setCapacityByLabel(CapacitySchedulerConfiguration.ROOT, "y", 100);
+    conf.setCapacityByLabel(CapacitySchedulerConfiguration.ROOT, "z", 100);
+
+    final String A = CapacitySchedulerConfiguration.ROOT + ".a";
+    conf.setCapacity(A, 100);
+    conf.setAccessibleNodeLabels(A, ImmutableSet.of("x", "y", "z"));
+    conf.setCapacityByLabel(A, "x", 100);
+    conf.setCapacityByLabel(A, "y", 100);
+    conf.setCapacityByLabel(A, "z", 70);
+    MockRM rm = null;
+    try {
+      rm = new MockRM(conf) {
+        @Override
+        public RMNodeLabelsManager createNodeLabelManager() {
+          return nodeLabelManager;
+        }
+      };
+    } finally {
+      IOUtils.closeStream(rm);
+    }
+  }
 }


[09/11] hadoop git commit: Add HDFS-8143 to CHANGES.txt.

Posted by ji...@apache.org.
Add HDFS-8143 to CHANGES.txt.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f7c8311e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f7c8311e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f7c8311e

Branch: refs/heads/YARN-1197
Commit: f7c8311e9836ad1a1a2ef6eca8b42fd61a688164
Parents: 2466460
Author: Tsz-Wo Nicholas Sze <sz...@hortonworks.com>
Authored: Mon Jul 13 14:59:45 2015 -0700
Committer: Tsz-Wo Nicholas Sze <sz...@hortonworks.com>
Committed: Mon Jul 13 14:59:45 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
 1 file changed, 3 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7c8311e/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 4fa566d..1491990 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1081,6 +1081,9 @@ Release 2.7.1 - 2015-07-06
 
     HDFS-7164. Feature documentation for HDFS-6581. (Arpit Agarwal)
 
+    HDFS-8143. Mover should exit after some retry when failed to move blocks.
+    (Surendra Singh Lilhore via szetszwo)
+
   OPTIMIZATIONS
     HDFS-8480. Fix performance and timeout issues in HDFS-7929 by using
     hard-links to preserve old edit logs, instead of copying them. (Zhe Zhang


[08/11] hadoop git commit: Revert "YARN-3878. AsyncDispatcher can hang while stopping if it is configured for draining events on stop. (Varun Saxena via kasha)"

Posted by ji...@apache.org.
Revert "YARN-3878. AsyncDispatcher can hang while stopping if it is configured for draining events on stop. (Varun Saxena via kasha)"

This reverts commit aa067c6aa47b4c79577096817acc00ad6421180c.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2466460d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2466460d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2466460d

Branch: refs/heads/YARN-1197
Commit: 2466460d4cd13ad5837c044476b26e63082c1d37
Parents: 19295b3
Author: Jian He <ji...@apache.org>
Authored: Mon Jul 13 14:30:35 2015 -0700
Committer: Jian He <ji...@apache.org>
Committed: Mon Jul 13 14:30:35 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |  3 -
 .../hadoop/yarn/event/AsyncDispatcher.java      | 24 ++++----
 .../hadoop/yarn/event/DrainDispatcher.java      | 13 +----
 .../hadoop/yarn/event/TestAsyncDispatcher.java  | 61 --------------------
 4 files changed, 14 insertions(+), 87 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2466460d/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 98f0e8d..5c17f04 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -648,9 +648,6 @@ Release 2.7.2 - UNRELEASED
 
     YARN-3690. [JDK8] 'mvn site' fails. (Brahma Reddy Battula via aajisaka)
 
-    YARN-3878. AsyncDispatcher can hang while stopping if it is configured for 
-    draining events on stop. (Varun Saxena via kasha)
-
 Release 2.7.1 - 2015-07-06 
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2466460d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
index 646611f..c54b9c7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
@@ -55,6 +55,9 @@ public class AsyncDispatcher extends AbstractService implements Dispatcher {
   // stop functionality.
   private volatile boolean drainEventsOnStop = false;
 
+  // Indicates all the remaining dispatcher's events on stop have been drained
+  // and processed.
+  private volatile boolean drained = true;
   private Object waitForDrained = new Object();
 
   // For drainEventsOnStop enabled only, block newly coming events into the
@@ -81,12 +84,13 @@ public class AsyncDispatcher extends AbstractService implements Dispatcher {
       @Override
       public void run() {
         while (!stopped && !Thread.currentThread().isInterrupted()) {
+          drained = eventQueue.isEmpty();
           // blockNewEvents is only set when dispatcher is draining to stop,
           // adding this check is to avoid the overhead of acquiring the lock
           // and calling notify every time in the normal run of the loop.
           if (blockNewEvents) {
             synchronized (waitForDrained) {
-              if (eventQueue.isEmpty()) {
+              if (drained) {
                 waitForDrained.notify();
               }
             }
@@ -135,7 +139,7 @@ public class AsyncDispatcher extends AbstractService implements Dispatcher {
       blockNewEvents = true;
       LOG.info("AsyncDispatcher is draining to stop, igonring any new events.");
       synchronized (waitForDrained) {
-        while (!eventQueue.isEmpty() && eventHandlingThread.isAlive()) {
+        while (!drained && eventHandlingThread.isAlive()) {
           waitForDrained.wait(1000);
           LOG.info("Waiting for AsyncDispatcher to drain. Thread state is :" +
               eventHandlingThread.getState());
@@ -219,21 +223,12 @@ public class AsyncDispatcher extends AbstractService implements Dispatcher {
     return handlerInstance;
   }
 
-  @VisibleForTesting
-  protected boolean hasPendingEvents() {
-    return !eventQueue.isEmpty();
-  }
-
-  @VisibleForTesting
-  protected boolean isEventThreadWaiting() {
-    return eventHandlingThread.getState() == Thread.State.WAITING;
-  }
-
   class GenericEventHandler implements EventHandler<Event> {
     public void handle(Event event) {
       if (blockNewEvents) {
         return;
       }
+      drained = false;
 
       /* all this method does is enqueue all the events onto the queue */
       int qSize = eventQueue.size();
@@ -290,4 +285,9 @@ public class AsyncDispatcher extends AbstractService implements Dispatcher {
       }
     };
   }
+
+  @VisibleForTesting
+  protected boolean isDrained() {
+    return this.drained;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2466460d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/DrainDispatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/DrainDispatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/DrainDispatcher.java
index d1f4fe9..da5ae44 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/DrainDispatcher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/DrainDispatcher.java
@@ -27,24 +27,15 @@ public class DrainDispatcher extends AsyncDispatcher {
     this(new LinkedBlockingQueue<Event>());
   }
 
-  public DrainDispatcher(BlockingQueue<Event> eventQueue) {
+  private DrainDispatcher(BlockingQueue<Event> eventQueue) {
     super(eventQueue);
   }
 
   /**
-   *  Wait till event thread enters WAITING state (i.e. waiting for new events).
-   */
-  public void waitForEventThreadToWait() {
-    while (!isEventThreadWaiting()) {
-      Thread.yield();
-    }
-  }
-
-  /**
    * Busy loop waiting for all queued events to drain.
    */
   public void await() {
-    while (hasPendingEvents()) {
+    while (!isDrained()) {
       Thread.yield();
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2466460d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/TestAsyncDispatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/TestAsyncDispatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/TestAsyncDispatcher.java
deleted file mode 100644
index ee17ddd..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/TestAsyncDispatcher.java
+++ /dev/null
@@ -1,61 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.event;
-
-import static org.mockito.Mockito.doThrow;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.spy;
-
-import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.LinkedBlockingQueue;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
-import org.junit.Assert;
-import org.junit.Test;
-
-public class TestAsyncDispatcher {
-
-  /* This test checks whether dispatcher hangs on close if following two things
-   * happen :
-   * 1. A thread which was putting event to event queue is interrupted.
-   * 2. Event queue is empty on close.
-   */
-  @SuppressWarnings({ "unchecked", "rawtypes" })
-  @Test(timeout=10000)
-  public void testDispatcherOnCloseIfQueueEmpty() throws Exception {
-    BlockingQueue<Event> eventQueue = spy(new LinkedBlockingQueue<Event>());
-    Event event = mock(Event.class);
-    doThrow(new InterruptedException()).when(eventQueue).put(event);
-    DrainDispatcher disp = new DrainDispatcher(eventQueue);
-    disp.init(new Configuration());
-    disp.setDrainEventsOnStop();
-    disp.start();
-    // Wait for event handler thread to start and begin waiting for events.
-    disp.waitForEventThreadToWait();
-    try {
-      disp.getEventHandler().handle(event);
-    } catch (YarnRuntimeException e) {
-    }
-    // Queue should be empty and dispatcher should not hang on close
-    Assert.assertTrue("Event Queue should have been empty",
-        eventQueue.isEmpty());
-    disp.close();
-  }
-}


[10/11] hadoop git commit: HDFS-8541. Mover should exit with NO_MOVE_PROGRESS if there is no move progress. Contributed by Surendra Singh Lilhore

Posted by ji...@apache.org.
HDFS-8541. Mover should exit with NO_MOVE_PROGRESS if there is no move progress.  Contributed by Surendra Singh Lilhore


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9ef03a4c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9ef03a4c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9ef03a4c

Branch: refs/heads/YARN-1197
Commit: 9ef03a4c5bb5573eadc7d04e371c4af2dc6bae37
Parents: f7c8311
Author: Tsz-Wo Nicholas Sze <sz...@hortonworks.com>
Authored: Mon Jul 13 15:12:26 2015 -0700
Committer: Tsz-Wo Nicholas Sze <sz...@hortonworks.com>
Committed: Mon Jul 13 15:12:26 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 +++
 .../hadoop/hdfs/server/balancer/Dispatcher.java | 18 +++++++++++++
 .../apache/hadoop/hdfs/server/mover/Mover.java  | 27 +++++++++++++++-----
 .../hadoop/hdfs/server/mover/TestMover.java     |  2 +-
 4 files changed, 43 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9ef03a4c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 1491990..e843dcc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -716,6 +716,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-8751. Remove setBlocks API from INodeFile and misc code cleanup. (Zhe
     Zhang via jing9)
 
+    HDFS-8541. Mover should exit with NO_MOVE_PROGRESS if there is no move
+    progress.  (Surendra Singh Lilhore via szetszwo)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9ef03a4c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
index 4a8f40f..298b86d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
@@ -317,6 +317,7 @@ public class Dispatcher {
         sendRequest(out, eb, accessToken);
         receiveResponse(in);
         nnc.getBytesMoved().addAndGet(block.getNumBytes());
+        target.getDDatanode().setHasSuccess();
         LOG.info("Successfully moved " + this);
       } catch (IOException e) {
         LOG.warn("Failed to move " + this + ": " + e.getMessage());
@@ -500,6 +501,7 @@ public class Dispatcher {
     /** blocks being moved but not confirmed yet */
     private final List<PendingMove> pendings;
     private volatile boolean hasFailure = false;
+    private volatile boolean hasSuccess = false;
     private final int maxConcurrentMoves;
 
     @Override
@@ -573,6 +575,10 @@ public class Dispatcher {
     void setHasFailure() {
       this.hasFailure = true;
     }
+
+    void setHasSuccess() {
+      this.hasSuccess = true;
+    }
   }
 
   /** A node that can be the sources of a block move */
@@ -965,6 +971,18 @@ public class Dispatcher {
   }
 
   /**
+   * @return true if some moves are success.
+   */
+  public static boolean checkForSuccess(
+      Iterable<? extends StorageGroup> targets) {
+    boolean hasSuccess = false;
+    for (StorageGroup t : targets) {
+      hasSuccess |= t.getDDatanode().hasSuccess;
+    }
+    return hasSuccess;
+  }
+
+  /**
    * Decide if the block is a good candidate to be moved from source to target.
    * A block is a good candidate if
    * 1. the block is not in the process of being moved/has not been moved;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9ef03a4c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
index 344b9fc..afacebb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
@@ -269,10 +269,14 @@ public class Mover {
       // wait for pending move to finish and retry the failed migration
       boolean hasFailed = Dispatcher.waitForMoveCompletion(storages.targets
           .values());
-      if (hasFailed) {
+      boolean hasSuccess = Dispatcher.checkForSuccess(storages.targets
+          .values());
+      if (hasFailed && !hasSuccess) {
         if (retryCount.get() == retryMaxAttempts) {
-          throw new IOException("Failed to move some block's after "
+          result.setRetryFailed();
+          LOG.error("Failed to move some block's after "
               + retryMaxAttempts + " retries.");
+          return result;
         } else {
           retryCount.incrementAndGet();
         }
@@ -713,10 +717,12 @@ public class Mover {
 
     private boolean hasRemaining;
     private boolean noBlockMoved;
+    private boolean retryFailed;
 
     Result() {
       hasRemaining = false;
       noBlockMoved = true;
+      retryFailed = false;
     }
 
     boolean isHasRemaining() {
@@ -735,16 +741,25 @@ public class Mover {
       this.noBlockMoved = noBlockMoved;
     }
 
+    void setRetryFailed() {
+      this.retryFailed = true;
+    }
+
     /**
-     * @return SUCCESS if all moves are success and there is no remaining move.
+     * @return NO_MOVE_PROGRESS if no progress in move after some retry. Return
+     *         SUCCESS if all moves are success and there is no remaining move.
      *         Return NO_MOVE_BLOCK if there moves available but all the moves
      *         cannot be scheduled. Otherwise, return IN_PROGRESS since there
      *         must be some remaining moves.
      */
     ExitStatus getExitStatus() {
-      return !isHasRemaining() ? ExitStatus.SUCCESS
-          : isNoBlockMoved() ? ExitStatus.NO_MOVE_BLOCK
-              : ExitStatus.IN_PROGRESS;
+      if (retryFailed) {
+        return ExitStatus.NO_MOVE_PROGRESS;
+      } else {
+        return !isHasRemaining() ? ExitStatus.SUCCESS
+            : isNoBlockMoved() ? ExitStatus.NO_MOVE_BLOCK
+                : ExitStatus.IN_PROGRESS;
+      }
     }
 
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9ef03a4c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java
index 899b5c0..d3d814c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java
@@ -404,7 +404,7 @@ public class TestMover {
       int rc = ToolRunner.run(conf, new Mover.Cli(),
           new String[] {"-p", file.toString()});
       Assert.assertEquals("Movement should fail after some retry",
-          ExitStatus.IO_EXCEPTION.getExitCode(), rc);
+          ExitStatus.NO_MOVE_PROGRESS.getExitCode(), rc);
     } finally {
       cluster.shutdown();
     }


[02/11] hadoop git commit: YARN-3849. Too much of preemption activity causing continuos killing of containers across queues. (Sunil G via wangda)

Posted by ji...@apache.org.
YARN-3849. Too much of preemption activity causing continuos killing of containers across queues. (Sunil G via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1df39c1e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1df39c1e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1df39c1e

Branch: refs/heads/YARN-1197
Commit: 1df39c1efc9ed26d3f1a5887c31c38c873e0b784
Parents: 1ea3629
Author: Wangda Tan <wa...@apache.org>
Authored: Sat Jul 11 10:26:46 2015 -0700
Committer: Wangda Tan <wa...@apache.org>
Committed: Sat Jul 11 10:26:46 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |   3 +
 .../ProportionalCapacityPreemptionPolicy.java   |   4 +-
 ...estProportionalCapacityPreemptionPolicy.java | 253 ++++++++++++++-----
 ...pacityPreemptionPolicyForNodePartitions.java | 114 +++++++--
 4 files changed, 289 insertions(+), 85 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1df39c1e/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index f78bbfa..1365747 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -613,6 +613,9 @@ Release 2.8.0 - UNRELEASED
     YARN-3888. ApplicationMaster link is broken in RM WebUI when appstate is NEW.
     (Bibin A Chundatt via xgong)
 
+    YARN-3849. Too much of preemption activity causing continuos killing of
+    containers across queues. (Sunil G via wangda)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1df39c1e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
index 5a20a6f..6e661d4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
@@ -840,12 +840,12 @@ public class ProportionalCapacityPreemptionPolicy implements SchedulingEditPolic
     synchronized (curQueue) {
       String queueName = curQueue.getQueueName();
       QueueCapacities qc = curQueue.getQueueCapacities();
-      float absUsed = qc.getAbsoluteUsedCapacity(partitionToLookAt);
       float absCap = qc.getAbsoluteCapacity(partitionToLookAt);
       float absMaxCap = qc.getAbsoluteMaximumCapacity(partitionToLookAt);
       boolean preemptionDisabled = curQueue.getPreemptionDisabled();
 
-      Resource current = Resources.multiply(partitionResource, absUsed);
+      Resource current = curQueue.getQueueResourceUsage().getUsed(
+          partitionToLookAt);
       Resource guaranteed = Resources.multiply(partitionResource, absCap);
       Resource maxCapacity = Resources.multiply(partitionResource, absMaxCap);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1df39c1e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java
index 2c0c6d7..3057360 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java
@@ -81,7 +81,9 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEv
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy.OrderingPolicy;
 import org.apache.hadoop.yarn.util.Clock;
 import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator;
+import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
+import org.apache.hadoop.yarn.util.resource.Resources;
 import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
@@ -372,7 +374,7 @@ public class TestProportionalCapacityPreemptionPolicy {
             appA.getApplicationId(), appA.getAttemptId());
     assertTrue("appA should be running on queueB",
         mCS.getAppsInQueue("queueB").contains(expectedAttemptOnQueueB));
-    verify(mDisp, times(9)).handle(argThat(new IsPreemptionRequestFor(appA)));
+    verify(mDisp, times(10)).handle(argThat(new IsPreemptionRequestFor(appA)));
 
     // Need to call setup() again to reset mDisp
     setup();
@@ -395,7 +397,7 @@ public class TestProportionalCapacityPreemptionPolicy {
     // Resources should have come from queueE (appC) and neither of queueA's
     // children.
     verify(mDisp, never()).handle(argThat(new IsPreemptionRequestFor(appA)));
-    verify(mDisp, times(9)).handle(argThat(new IsPreemptionRequestFor(appC)));
+    verify(mDisp, times(10)).handle(argThat(new IsPreemptionRequestFor(appC)));
   }
 
   @Test
@@ -470,8 +472,8 @@ public class TestProportionalCapacityPreemptionPolicy {
     // With all queues preemptable, resources should be taken from queueC(appA)
     // and queueD(appB). Resources taken more from queueD(appB) than
     // queueC(appA) because it's over its capacity by a larger percentage.
-    verify(mDisp, times(16)).handle(argThat(new IsPreemptionRequestFor(appA)));
-    verify(mDisp, times(182)).handle(argThat(new IsPreemptionRequestFor(appB)));
+    verify(mDisp, times(17)).handle(argThat(new IsPreemptionRequestFor(appA)));
+    verify(mDisp, times(183)).handle(argThat(new IsPreemptionRequestFor(appB)));
 
     // Turn off preemption for queueA and it's children. queueF(appC)'s request
     // should starve.
@@ -635,7 +637,7 @@ public class TestProportionalCapacityPreemptionPolicy {
     policy.editSchedule();
     // verify capacity taken from A1, not B1 despite B1 being far over
     // its absolute guaranteed capacity
-    verify(mDisp, times(9)).handle(argThat(new IsPreemptionRequestFor(appA)));
+    verify(mDisp, times(10)).handle(argThat(new IsPreemptionRequestFor(appA)));
   }
 
   @Test
@@ -676,7 +678,7 @@ public class TestProportionalCapacityPreemptionPolicy {
     // we verify both that C has priority on B and D (has it has >0 guarantees)
     // and that B and D are force to share their over capacity fairly (as they
     // are both zero-guarantees) hence D sees some of its containers preempted
-    verify(mDisp, times(14)).handle(argThat(new IsPreemptionRequestFor(appC)));
+    verify(mDisp, times(15)).handle(argThat(new IsPreemptionRequestFor(appC)));
   }
   
   
@@ -703,8 +705,8 @@ public class TestProportionalCapacityPreemptionPolicy {
 
     // XXX note: compensating for rounding error in Resources.multiplyTo
     // which is likely triggered since we use small numbers for readability
-    verify(mDisp, times(7)).handle(argThat(new IsPreemptionRequestFor(appA)));
-    verify(mDisp, times(5)).handle(argThat(new IsPreemptionRequestFor(appE)));
+    verify(mDisp, times(9)).handle(argThat(new IsPreemptionRequestFor(appA)));
+    verify(mDisp, times(6)).handle(argThat(new IsPreemptionRequestFor(appE)));
   }
 
   @Test
@@ -868,6 +870,34 @@ public class TestProportionalCapacityPreemptionPolicy {
     setAMContainer = false;
   }
 
+  @Test
+  public void testPreemptionWithVCoreResource() {
+    int[][] qData = new int[][]{
+        // / A B
+        {100, 100, 100}, // maxcap
+        {5, 1, 1}, // apps
+        {2, 0, 0}, // subqueues
+    };
+
+    // Resources can be set like memory:vcores
+    String[][] resData = new String[][]{
+        // / A B
+        {"100:100", "50:50", "50:50"}, // abs
+        {"10:100", "10:100", "0"}, // used
+        {"70:20", "70:20", "10:100"}, // pending
+        {"0", "0", "0"}, // reserved
+        {"-1", "1:10", "1:10"}, // req granularity
+    };
+
+    // Passing last param as TRUE to use DominantResourceCalculator
+    ProportionalCapacityPreemptionPolicy policy = buildPolicy(qData, resData,
+        true);
+    policy.editSchedule();
+
+    // 5 containers will be preempted here
+    verify(mDisp, times(5)).handle(argThat(new IsPreemptionRequestFor(appA)));
+  }
+
   static class IsPreemptionRequestFor
       extends ArgumentMatcher<ContainerPreemptEvent> {
     private final ApplicationAttemptId appAttId;
@@ -892,13 +922,40 @@ public class TestProportionalCapacityPreemptionPolicy {
   }
 
   ProportionalCapacityPreemptionPolicy buildPolicy(int[][] qData) {
-    ProportionalCapacityPreemptionPolicy policy =
-      new ProportionalCapacityPreemptionPolicy(conf, rmContext, mCS, mClock);
+    ProportionalCapacityPreemptionPolicy policy = new ProportionalCapacityPreemptionPolicy(
+        conf, rmContext, mCS, mClock);
+    clusterResources = Resource.newInstance(
+        leafAbsCapacities(qData[0], qData[7]), 0);
     ParentQueue mRoot = buildMockRootQueue(rand, qData);
     when(mCS.getRootQueue()).thenReturn(mRoot);
 
-    clusterResources =
-      Resource.newInstance(leafAbsCapacities(qData[0], qData[7]), 0);
+    setResourceAndNodeDetails();
+    return policy;
+  }
+
+  ProportionalCapacityPreemptionPolicy buildPolicy(int[][] qData,
+      String[][] resData) {
+    return buildPolicy(qData, resData, false);
+  }
+
+  ProportionalCapacityPreemptionPolicy buildPolicy(int[][] qData,
+      String[][] resData, boolean useDominantResourceCalculator) {
+    if (useDominantResourceCalculator) {
+      when(mCS.getResourceCalculator()).thenReturn(
+          new DominantResourceCalculator());
+    }
+    ProportionalCapacityPreemptionPolicy policy =
+        new ProportionalCapacityPreemptionPolicy(conf, rmContext, mCS, mClock);
+    clusterResources = leafAbsCapacities(parseResourceDetails(resData[0]),
+        qData[2]);
+    ParentQueue mRoot = buildMockRootQueue(rand, resData, qData);
+    when(mCS.getRootQueue()).thenReturn(mRoot);
+
+    setResourceAndNodeDetails();
+    return policy;
+  }
+
+  private void setResourceAndNodeDetails() {
     when(mCS.getClusterResource()).thenReturn(clusterResources);
     when(lm.getResourceByLabel(anyString(), any(Resource.class))).thenReturn(
         clusterResources);
@@ -906,35 +963,78 @@ public class TestProportionalCapacityPreemptionPolicy {
     SchedulerNode mNode = mock(SchedulerNode.class);
     when(mNode.getPartition()).thenReturn(RMNodeLabelsManager.NO_LABEL);
     when(mCS.getSchedulerNode(any(NodeId.class))).thenReturn(mNode);
-    return policy;
   }
 
   ParentQueue buildMockRootQueue(Random r, int[]... queueData) {
-    int[] abs      = queueData[0];
-    int[] maxCap   = queueData[1];
-    int[] used     = queueData[2];
-    int[] pending  = queueData[3];
-    int[] reserved = queueData[4];
-    int[] apps     = queueData[5];
-    int[] gran     = queueData[6];
-    int[] queues   = queueData[7];
-
-    return mockNested(abs, maxCap, used, pending,  reserved, apps, gran, queues);
+    Resource[] abs = generateResourceList(queueData[0]);
+    Resource[] used = generateResourceList(queueData[2]);
+    Resource[] pending = generateResourceList(queueData[3]);
+    Resource[] reserved = generateResourceList(queueData[4]);
+    Resource[] gran = generateResourceList(queueData[6]);
+    int[] maxCap = queueData[1];
+    int[] apps = queueData[5];
+    int[] queues = queueData[7];
+
+    return mockNested(abs, maxCap, used, pending, reserved, apps, gran, queues);
   }
 
-  ParentQueue mockNested(int[] abs, int[] maxCap, int[] used,
-      int[] pending, int[] reserved, int[] apps, int[] gran, int[] queues) {
-    float tot = leafAbsCapacities(abs, queues);
+  ParentQueue buildMockRootQueue(Random r, String[][] resData,
+      int[]... queueData) {
+    Resource[] abs = parseResourceDetails(resData[0]);
+    Resource[] used = parseResourceDetails(resData[1]);
+    Resource[] pending = parseResourceDetails(resData[2]);
+    Resource[] reserved = parseResourceDetails(resData[3]);
+    Resource[] gran = parseResourceDetails(resData[4]);
+    int[] maxCap = queueData[0];
+    int[] apps = queueData[1];
+    int[] queues = queueData[2];
+
+    return mockNested(abs, maxCap, used, pending, reserved, apps, gran, queues);
+  }
+
+  Resource[] parseResourceDetails(String[] resData) {
+    List<Resource> resourceList = new ArrayList<Resource>();
+    for (int i = 0; i < resData.length; i++) {
+      String[] resource = resData[i].split(":");
+      if (resource.length == 1) {
+        resourceList.add(Resource.newInstance(Integer.valueOf(resource[0]), 0));
+      } else {
+        resourceList.add(Resource.newInstance(Integer.valueOf(resource[0]),
+            Integer.valueOf(resource[1])));
+      }
+    }
+    return resourceList.toArray(new Resource[resourceList.size()]);
+  }
+
+  Resource[] generateResourceList(int[] qData) {
+    List<Resource> resourceList = new ArrayList<Resource>();
+    for (int i = 0; i < qData.length; i++) {
+      resourceList.add(Resource.newInstance(qData[i], 0));
+    }
+    return resourceList.toArray(new Resource[resourceList.size()]);
+  }
+
+  ParentQueue mockNested(Resource[] abs, int[] maxCap, Resource[] used,
+      Resource[] pending, Resource[] reserved, int[] apps, Resource[] gran,
+      int[] queues) {
+    ResourceCalculator rc = mCS.getResourceCalculator();
+    Resource tot = leafAbsCapacities(abs, queues);
     Deque<ParentQueue> pqs = new LinkedList<ParentQueue>();
     ParentQueue root = mockParentQueue(null, queues[0], pqs);
+    ResourceUsage resUsage = new ResourceUsage();
+    resUsage.setUsed(used[0]);
     when(root.getQueueName()).thenReturn(CapacitySchedulerConfiguration.ROOT);
-    when(root.getAbsoluteUsedCapacity()).thenReturn(used[0] / tot);
-    when(root.getAbsoluteCapacity()).thenReturn(abs[0] / tot);
-    when(root.getAbsoluteMaximumCapacity()).thenReturn(maxCap[0] / tot);
+    when(root.getAbsoluteUsedCapacity()).thenReturn(
+        Resources.divide(rc, tot, used[0], tot));
+    when(root.getAbsoluteCapacity()).thenReturn(
+        Resources.divide(rc, tot, abs[0], tot));
+    when(root.getAbsoluteMaximumCapacity()).thenReturn(
+        maxCap[0] / (float) tot.getMemory());
+    when(root.getQueueResourceUsage()).thenReturn(resUsage);
     QueueCapacities rootQc = new QueueCapacities(true);
-    rootQc.setAbsoluteUsedCapacity(used[0] / tot);
-    rootQc.setAbsoluteCapacity(abs[0] / tot);
-    rootQc.setAbsoluteMaximumCapacity(maxCap[0] / tot);
+    rootQc.setAbsoluteUsedCapacity(Resources.divide(rc, tot, used[0], tot));
+    rootQc.setAbsoluteCapacity(Resources.divide(rc, tot, abs[0], tot));
+    rootQc.setAbsoluteMaximumCapacity(maxCap[0] / (float) tot.getMemory());
     when(root.getQueueCapacities()).thenReturn(rootQc);
     when(root.getQueuePath()).thenReturn(CapacitySchedulerConfiguration.ROOT);
     boolean preemptionDisabled = mockPreemptionStatus("root");
@@ -943,28 +1043,35 @@ public class TestProportionalCapacityPreemptionPolicy {
     for (int i = 1; i < queues.length; ++i) {
       final CSQueue q;
       final ParentQueue p = pqs.removeLast();
-      final String queueName = "queue" + ((char)('A' + i - 1));
+      final String queueName = "queue" + ((char) ('A' + i - 1));
       if (queues[i] > 0) {
         q = mockParentQueue(p, queues[i], pqs);
+        ResourceUsage resUsagePerQueue = new ResourceUsage();
+        resUsagePerQueue.setUsed(used[i]);
+        when(q.getQueueResourceUsage()).thenReturn(resUsagePerQueue);
       } else {
         q = mockLeafQueue(p, tot, i, abs, used, pending, reserved, apps, gran);
       }
       when(q.getParent()).thenReturn(p);
       when(q.getQueueName()).thenReturn(queueName);
-      when(q.getAbsoluteUsedCapacity()).thenReturn(used[i] / tot);
-      when(q.getAbsoluteCapacity()).thenReturn(abs[i] / tot);
-      when(q.getAbsoluteMaximumCapacity()).thenReturn(maxCap[i] / tot);
+      when(q.getAbsoluteUsedCapacity()).thenReturn(
+          Resources.divide(rc, tot, used[i], tot));
+      when(q.getAbsoluteCapacity()).thenReturn(
+          Resources.divide(rc, tot, abs[i], tot));
+      when(q.getAbsoluteMaximumCapacity()).thenReturn(
+          maxCap[i] / (float) tot.getMemory());
 
       // We need to make these fields to QueueCapacities
       QueueCapacities qc = new QueueCapacities(false);
-      qc.setAbsoluteUsedCapacity(used[i] / tot);
-      qc.setAbsoluteCapacity(abs[i] / tot);
-      qc.setAbsoluteMaximumCapacity(maxCap[i] / tot);
+      qc.setAbsoluteUsedCapacity(Resources.divide(rc, tot, used[i], tot));
+      qc.setAbsoluteCapacity(Resources.divide(rc, tot, abs[i], tot));
+      qc.setAbsoluteMaximumCapacity(maxCap[i] / (float) tot.getMemory());
       when(q.getQueueCapacities()).thenReturn(qc);
 
       String parentPathName = p.getQueuePath();
       parentPathName = (parentPathName == null) ? "root" : parentPathName;
-      String queuePathName = (parentPathName+"."+queueName).replace("/","root");
+      String queuePathName = (parentPathName + "." + queueName).replace("/",
+          "root");
       when(q.getQueuePath()).thenReturn(queuePathName);
       preemptionDisabled = mockPreemptionStatus(queuePathName);
       when(q.getPreemptionDisabled()).thenReturn(preemptionDisabled);
@@ -1004,16 +1111,18 @@ public class TestProportionalCapacityPreemptionPolicy {
   }
 
   @SuppressWarnings("rawtypes")
-  LeafQueue mockLeafQueue(ParentQueue p, float tot, int i, int[] abs, 
-      int[] used, int[] pending, int[] reserved, int[] apps, int[] gran) {
+  LeafQueue mockLeafQueue(ParentQueue p, Resource tot, int i, Resource[] abs,
+      Resource[] used, Resource[] pending, Resource[] reserved, int[] apps,
+      Resource[] gran) {
     LeafQueue lq = mock(LeafQueue.class);
+    ResourceCalculator rc = mCS.getResourceCalculator();
     List<ApplicationAttemptId> appAttemptIdList = 
         new ArrayList<ApplicationAttemptId>();
-    when(lq.getTotalResourcePending()).thenReturn(
-        Resource.newInstance(pending[i], 0));
+    when(lq.getTotalResourcePending()).thenReturn(pending[i]);
     // need to set pending resource in resource usage as well
     ResourceUsage ru = new ResourceUsage();
-    ru.setPending(Resource.newInstance(pending[i], 0));
+    ru.setPending(pending[i]);
+    ru.setUsed(used[i]);
     when(lq.getQueueResourceUsage()).thenReturn(ru);
     // consider moving where CapacityScheduler::comparator accessible
     final NavigableSet<FiCaSchedulerApp> qApps = new TreeSet<FiCaSchedulerApp>(
@@ -1026,9 +1135,9 @@ public class TestProportionalCapacityPreemptionPolicy {
       });
     // applications are added in global L->R order in queues
     if (apps[i] != 0) {
-      int aUsed    = used[i] / apps[i];
-      int aPending = pending[i] / apps[i];
-      int aReserve = reserved[i] / apps[i];
+      Resource aUsed = Resources.divideAndCeil(rc, used[i], apps[i]);
+      Resource aPending = Resources.divideAndCeil(rc, pending[i], apps[i]);
+      Resource aReserve = Resources.divideAndCeil(rc, reserved[i], apps[i]);
       for (int a = 0; a < apps[i]; ++a) {
         FiCaSchedulerApp mockFiCaApp =
             mockApp(i, appAlloc, aUsed, aPending, aReserve, gran[i]);
@@ -1055,9 +1164,10 @@ public class TestProportionalCapacityPreemptionPolicy {
     return lq;
   }
 
-  FiCaSchedulerApp mockApp(int qid, int id, int used, int pending, int reserved,
-      int gran) {
+  FiCaSchedulerApp mockApp(int qid, int id, Resource used, Resource pending,
+      Resource reserved, Resource gran) {
     FiCaSchedulerApp app = mock(FiCaSchedulerApp.class);
+    ResourceCalculator rc = mCS.getResourceCalculator();
 
     ApplicationId appId = ApplicationId.newInstance(TS, id);
     ApplicationAttemptId appAttId = ApplicationAttemptId.newInstance(appId, 0);
@@ -1065,30 +1175,35 @@ public class TestProportionalCapacityPreemptionPolicy {
     when(app.getApplicationAttemptId()).thenReturn(appAttId);
 
     int cAlloc = 0;
-    Resource unit = Resource.newInstance(gran, 0);
+    Resource unit = gran;
     List<RMContainer> cReserved = new ArrayList<RMContainer>();
-    for (int i = 0; i < reserved; i += gran) {
-      cReserved.add(mockContainer(appAttId, cAlloc, unit, priority.CONTAINER
-          .getValue()));
+    Resource resIter = Resource.newInstance(0, 0);
+    for (; Resources.lessThan(rc, clusterResources, resIter, reserved); Resources
+        .addTo(resIter, gran)) {
+      cReserved.add(mockContainer(appAttId, cAlloc, unit,
+          priority.CONTAINER.getValue()));
       ++cAlloc;
     }
     when(app.getReservedContainers()).thenReturn(cReserved);
 
     List<RMContainer> cLive = new ArrayList<RMContainer>();
-    for (int i = 0; i < used; i += gran) {
-      if(setAMContainer && i == 0){
-        cLive.add(mockContainer(appAttId, cAlloc, unit, priority.AMCONTAINER
-            .getValue()));
-      }else if(setLabeledContainer && i ==1){
+    Resource usedIter = Resource.newInstance(0, 0);
+    int i = 0;
+    for (; Resources.lessThan(rc, clusterResources, usedIter, used); Resources
+        .addTo(usedIter, gran)) {
+      if (setAMContainer && i == 0) {
+        cLive.add(mockContainer(appAttId, cAlloc, unit,
+            priority.AMCONTAINER.getValue()));
+      } else if (setLabeledContainer && i == 1) {
         cLive.add(mockContainer(appAttId, cAlloc, unit,
             priority.LABELEDCONTAINER.getValue()));
-        ++used;
-      }
-      else{
-        cLive.add(mockContainer(appAttId, cAlloc, unit, priority.CONTAINER
-            .getValue()));
+        Resources.addTo(used, Resource.newInstance(1, 1));
+      } else {
+        cLive.add(mockContainer(appAttId, cAlloc, unit,
+            priority.CONTAINER.getValue()));
       }
       ++cAlloc;
+      ++i;
     }
     when(app.getLiveContainers()).thenReturn(cLive);
     return app;
@@ -1124,6 +1239,16 @@ public class TestProportionalCapacityPreemptionPolicy {
     return ret;
   }
 
+  static Resource leafAbsCapacities(Resource[] abs, int[] subqueues) {
+    Resource ret = Resource.newInstance(0, 0);
+    for (int i = 0; i < abs.length; ++i) {
+      if (0 == subqueues[i]) {
+        Resources.addTo(ret, abs[i]);
+      }
+    }
+    return ret;
+  }
+
   void printString(CSQueue nq, String indent) {
     if (nq instanceof ParentQueue) {
       System.out.println(indent + nq.getQueueName()

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1df39c1e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyForNodePartitions.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyForNodePartitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyForNodePartitions.java
index 114769c..b3ac79b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyForNodePartitions.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyForNodePartitions.java
@@ -70,6 +70,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEv
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy.OrderingPolicy;
 import org.apache.hadoop.yarn.util.Clock;
 import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator;
+import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.Resources;
 import org.junit.Assert;
@@ -771,6 +772,60 @@ public class TestProportionalCapacityPreemptionPolicyForNodePartitions {
         argThat(new IsPreemptionRequestFor(getAppAttemptId(2))));
   }
 
+  @Test
+  public void testNodePartitionPreemptionWithVCoreResource() throws IOException {
+    /**
+     * Queue structure is:
+     *
+     * <pre>
+     *       root
+     *       /  \
+     *      a    b
+     * </pre>
+     *
+     * Both a/b can access x, and guaranteed capacity of them is 50:50. Two
+     * nodes, n1 has 100 x, n2 has 100 NO_LABEL 4 applications in the cluster,
+     * app1/app2 in a, and app3/app4 in b. app1 uses 80 x, app2 uses 20
+     * NO_LABEL, app3 uses 20 x, app4 uses 80 NO_LABEL. Both a/b have 50 pending
+     * resource for x and NO_LABEL
+     *
+     * After preemption, it should preempt 30 from app1, and 30 from app4.
+     */
+    String labelsConfig = "=100:200,true;" + // default partition
+        "x=100:200,true"; // partition=x
+    String nodesConfig = "n1=x;" + // n1 has partition=x
+        "n2="; // n2 is default partition
+    String queuesConfig =
+    // guaranteed,max,used,pending
+    "root(=[100:200 100:200 100:200 100:200],x=[100:200 100:200 100:200 100:200]);"
+        + // root
+        "-a(=[50:100 100:200 20:40 50:100],x=[50:100 100:200 80:160 50:100]);" + // a
+        "-b(=[50:100 100:200 80:160 50:100],x=[50:100 100:200 20:40 50:100])"; // b
+    String appsConfig =
+    // queueName\t(priority,resource,host,expression,#repeat,reserved)
+    "a\t" // app1 in a
+        + "(1,1:2,n1,x,80,false);" + // 80 * x in n1
+        "a\t" // app2 in a
+        + "(1,1:2,n2,,20,false);" + // 20 default in n2
+        "b\t" // app3 in b
+        + "(1,1:2,n1,x,20,false);" + // 20 * x in n1
+        "b\t" // app4 in b
+        + "(1,1:2,n2,,80,false)"; // 80 default in n2
+
+    buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig, true);
+    policy.editSchedule();
+
+    // 30 preempted from app1, 30 preempted from app4, and nothing preempted
+    // from app2/app3
+    verify(mDisp, times(30)).handle(
+        argThat(new IsPreemptionRequestFor(getAppAttemptId(1))));
+    verify(mDisp, times(30)).handle(
+        argThat(new IsPreemptionRequestFor(getAppAttemptId(4))));
+    verify(mDisp, never()).handle(
+        argThat(new IsPreemptionRequestFor(getAppAttemptId(2))));
+    verify(mDisp, never()).handle(
+        argThat(new IsPreemptionRequestFor(getAppAttemptId(3))));
+  }
 
   private ApplicationAttemptId getAppAttemptId(int id) {
     ApplicationId appId = ApplicationId.newInstance(0L, id);
@@ -821,6 +876,16 @@ public class TestProportionalCapacityPreemptionPolicyForNodePartitions {
 
   private void buildEnv(String labelsConfig, String nodesConfig,
       String queuesConfig, String appsConfig) throws IOException {
+    buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig, false);
+  }
+
+  private void buildEnv(String labelsConfig, String nodesConfig,
+      String queuesConfig, String appsConfig,
+      boolean useDominantResourceCalculator) throws IOException {
+    if (useDominantResourceCalculator) {
+      when(cs.getResourceCalculator()).thenReturn(
+          new DominantResourceCalculator());
+    }
     mockNodeLabelsManager(labelsConfig);
     mockSchedulerNodes(nodesConfig);
     for (NodeId nodeId : nodeIdToSchedulerNodes.keySet()) {
@@ -832,7 +897,8 @@ public class TestProportionalCapacityPreemptionPolicyForNodePartitions {
     when(cs.getClusterResource()).thenReturn(clusterResource);
     mockApplications(appsConfig);
 
-    policy = new ProportionalCapacityPreemptionPolicy(conf, rmContext, cs, mClock);
+    policy = new ProportionalCapacityPreemptionPolicy(conf, rmContext, cs,
+        mClock);
   }
 
   private void mockContainers(String containersConfig, ApplicationAttemptId attemptId,
@@ -868,7 +934,7 @@ public class TestProportionalCapacityPreemptionPolicyForNodePartitions {
             + "(priority,resource,host,expression,repeat,reserved)");
       }
       Priority pri = Priority.newInstance(Integer.valueOf(values[0]));
-      Resource res = Resources.createResource(Integer.valueOf(values[1]));
+      Resource res = parseResourceFromString(values[1]);
       NodeId host = NodeId.newInstance(values[2], 1);
       String exp = values[3];
       int repeat = Integer.valueOf(values[4]);
@@ -1002,11 +1068,10 @@ public class TestProportionalCapacityPreemptionPolicyForNodePartitions {
     clusterResource = Resources.createResource(0);
     for (String p : partitionConfigArr) {
       String partitionName = p.substring(0, p.indexOf("="));
-      int totalResource =
-          Integer.valueOf(p.substring(p.indexOf("=") + 1, p.indexOf(",")));
-      boolean exclusivity =
+      Resource res = parseResourceFromString(p.substring(p.indexOf("=") + 1,
+          p.indexOf(",")));
+     boolean exclusivity =
           Boolean.valueOf(p.substring(p.indexOf(",") + 1, p.length()));
-      Resource res = Resources.createResource(totalResource);
       when(nlm.getResourceByLabel(eq(partitionName), any(Resource.class)))
           .thenReturn(res);
       when(nlm.isExclusiveNodeLabel(eq(partitionName))).thenReturn(exclusivity);
@@ -1022,6 +1087,18 @@ public class TestProportionalCapacityPreemptionPolicyForNodePartitions {
         partitionToResource.keySet());
   }
 
+  private Resource parseResourceFromString(String p) {
+    String[] resource = p.split(":");
+    Resource res = Resources.createResource(0);
+    if (resource.length == 1) {
+      res = Resources.createResource(Integer.valueOf(resource[0]));
+    } else {
+      res = Resources.createResource(Integer.valueOf(resource[0]),
+          Integer.valueOf(resource[1]));
+    }
+    return res;
+  }
+
   /**
    * Format is:
    * <pre>
@@ -1120,23 +1197,22 @@ public class TestProportionalCapacityPreemptionPolicyForNodePartitions {
       // Add a small epsilon to capacities to avoid truncate when doing
       // Resources.multiply
       float epsilon = 1e-6f;
-      float absGuaranteed =
-          Integer.valueOf(values[0].trim())
-              / (float) (partitionToResource.get(partitionName).getMemory())
-              + epsilon;
-      float absMax =
-          Integer.valueOf(values[1].trim())
-              / (float) (partitionToResource.get(partitionName).getMemory())
-              + epsilon;
-      float absUsed =
-          Integer.valueOf(values[2].trim())
-              / (float) (partitionToResource.get(partitionName).getMemory())
-              + epsilon;
-      Resource pending = Resources.createResource(Integer.valueOf(values[3].trim()));
+      Resource totResoucePerPartition = partitionToResource.get(partitionName);
+      float absGuaranteed = Resources.divide(rc, totResoucePerPartition,
+          parseResourceFromString(values[0].trim()), totResoucePerPartition)
+          + epsilon;
+      float absMax = Resources.divide(rc, totResoucePerPartition,
+          parseResourceFromString(values[1].trim()), totResoucePerPartition)
+          + epsilon;
+      float absUsed = Resources.divide(rc, totResoucePerPartition,
+          parseResourceFromString(values[2].trim()), totResoucePerPartition)
+          + epsilon;
+      Resource pending = parseResourceFromString(values[3].trim());
       qc.setAbsoluteCapacity(partitionName, absGuaranteed);
       qc.setAbsoluteMaximumCapacity(partitionName, absMax);
       qc.setAbsoluteUsedCapacity(partitionName, absUsed);
       ru.setPending(partitionName, pending);
+      ru.setUsed(partitionName, parseResourceFromString(values[2].trim()));
       LOG.debug("Setup queue=" + queueName + " partition=" + partitionName
           + " [abs_guaranteed=" + absGuaranteed + ",abs_max=" + absMax
           + ",abs_used" + absUsed + ",pending_resource=" + pending + "]");