You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by aw...@apache.org on 2016/08/31 15:05:21 UTC

[01/19] hadoop git commit: HDFS-10760. DataXceiver#run() should not log InvalidToken exception as an error. Contributed by Pan Yuxuan. [Forced Update!]

Repository: hadoop
Updated Branches:
  refs/heads/HADOOP-13341 af9f0e7bf -> 536ad247b (forced update)


HDFS-10760. DataXceiver#run() should not log InvalidToken exception as an error. Contributed by Pan Yuxuan.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c4ee6915
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c4ee6915
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c4ee6915

Branch: refs/heads/HADOOP-13341
Commit: c4ee6915a14e00342755d7cdcbf2d61518f306aa
Parents: af50860
Author: Wei-Chiu Chuang <we...@apache.org>
Authored: Tue Aug 30 10:43:20 2016 -0700
Committer: Wei-Chiu Chuang <we...@apache.org>
Committed: Tue Aug 30 10:43:20 2016 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hdfs/server/datanode/DataXceiver.java    | 6 ++++++
 1 file changed, 6 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4ee6915/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
index c2cf76e..fee16b3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
@@ -312,6 +312,12 @@ class DataXceiver extends Receiver implements Runnable {
         } else {
           LOG.info(s1 + "; " + t);          
         }
+      } else if (t instanceof InvalidToken) {
+        // The InvalidToken exception has already been logged in
+        // checkAccess() method and this is not a server error.
+        if (LOG.isTraceEnabled()) {
+          LOG.trace(s, t);
+        }
       } else {
         LOG.error(s, t);
       }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[05/19] hadoop git commit: YARN-5221. Expose UpdateResourceRequest API to allow AM to request for change in container properties. (asuresh)

Posted by aw...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMNullStateStoreService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMNullStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMNullStateStoreService.java
index 112095e..4bcdf5c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMNullStateStoreService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMNullStateStoreService.java
@@ -70,7 +70,7 @@ public class NMNullStateStoreService extends NMStateStoreService {
   }
 
   @Override
-  public void storeContainer(ContainerId containerId,
+  public void storeContainer(ContainerId containerId, int version,
       StartContainerRequest startRequest) throws IOException {
   }
 
@@ -90,7 +90,7 @@ public class NMNullStateStoreService extends NMStateStoreService {
 
   @Override
   public void storeContainerResourceChanged(ContainerId containerId,
-      Resource capability) throws IOException {
+      int version, Resource capability) throws IOException {
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMStateStoreService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMStateStoreService.java
index 57f35a4..9f9ee75 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMStateStoreService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMStateStoreService.java
@@ -77,6 +77,7 @@ public abstract class NMStateStoreService extends AbstractService {
     private int remainingRetryAttempts = ContainerRetryContext.RETRY_INVALID;
     private String workDir;
     private String logDir;
+    int version;
 
     public RecoveredContainerStatus getStatus() {
       return status;
@@ -94,6 +95,10 @@ public abstract class NMStateStoreService extends AbstractService {
       return diagnostics;
     }
 
+    public int getVersion() {
+      return version;
+    }
+
     public StartContainerRequest getStartRequest() {
       return startRequest;
     }
@@ -130,6 +135,7 @@ public abstract class NMStateStoreService extends AbstractService {
     public String toString() {
       return new StringBuffer("Status: ").append(getStatus())
           .append(", Exit code: ").append(exitCode)
+          .append(", Version: ").append(version)
           .append(", Killed: ").append(getKilled())
           .append(", Diagnostics: ").append(getDiagnostics())
           .append(", Capability: ").append(getCapability())
@@ -306,11 +312,13 @@ public abstract class NMStateStoreService extends AbstractService {
   /**
    * Record a container start request
    * @param containerId the container ID
+   * @param containerVersion the container Version
    * @param startRequest the container start request
    * @throws IOException
    */
   public abstract void storeContainer(ContainerId containerId,
-      StartContainerRequest startRequest) throws IOException;
+      int containerVersion, StartContainerRequest startRequest)
+      throws IOException;
 
   /**
    * Record that a container has been queued at the NM
@@ -331,11 +339,12 @@ public abstract class NMStateStoreService extends AbstractService {
   /**
    * Record that a container resource has been changed
    * @param containerId the container ID
+   * @param containerVersion the container version
    * @param capability the container resource capability
    * @throws IOException
    */
   public abstract void storeContainerResourceChanged(ContainerId containerId,
-      Resource capability) throws IOException;
+      int containerVersion, Resource capability) throws IOException;
 
   /**
    * Record that a container has completed

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
index ee2677c..f6593f9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
@@ -875,7 +875,7 @@ public class TestNodeManagerResync {
         ApplicationAttemptId.newInstance(applicationId, 1);
     ContainerId containerId = ContainerId.newContainerId(applicationAttemptId, id);
     NMContainerStatus containerReport =
-        NMContainerStatus.newInstance(containerId, containerState,
+        NMContainerStatus.newInstance(containerId, 0, containerState,
           Resource.newInstance(1024, 1), "recover container", 0,
           Priority.newInstance(10), 0);
     return containerReport;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java
index c71b1e6..977cb76 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java
@@ -251,7 +251,7 @@ public class TestNodeStatusUpdater {
         String user = "testUser";
         ContainerTokenIdentifier containerToken = BuilderUtils
             .newContainerTokenIdentifier(BuilderUtils.newContainerToken(
-                firstContainerID, InetAddress.getByName("localhost")
+                firstContainerID, 0, InetAddress.getByName("localhost")
                     .getCanonicalHostName(), 1234, user, resource,
                 currentTime + 10000, 123, "password".getBytes(), currentTime));
         Context context = mock(Context.class);
@@ -292,7 +292,7 @@ public class TestNodeStatusUpdater {
         Resource resource = BuilderUtils.newResource(3, 1);
         ContainerTokenIdentifier containerToken = BuilderUtils
             .newContainerTokenIdentifier(BuilderUtils.newContainerToken(
-                secondContainerID, InetAddress.getByName("localhost")
+                secondContainerID, 0, InetAddress.getByName("localhost")
                     .getCanonicalHostName(), 1234, user, resource,
                 currentTime + 10000, 123, "password".getBytes(), currentTime));
         Context context = mock(Context.class);
@@ -1013,7 +1013,7 @@ public class TestNodeStatusUpdater {
   
     ContainerId cId = ContainerId.newContainerId(appAttemptId, 1);
     Token containerToken =
-        BuilderUtils.newContainerToken(cId, "anyHost", 1234, "anyUser",
+        BuilderUtils.newContainerToken(cId, 0, "anyHost", 1234, "anyUser",
             BuilderUtils.newResource(1024, 1), 0, 123,
             "password".getBytes(), 0);
     Container anyCompletedContainer = new ContainerImpl(conf, null,
@@ -1035,7 +1035,7 @@ public class TestNodeStatusUpdater {
     ContainerId runningContainerId =
         ContainerId.newContainerId(appAttemptId, 3);
     Token runningContainerToken =
-        BuilderUtils.newContainerToken(runningContainerId, "anyHost",
+        BuilderUtils.newContainerToken(runningContainerId, 0, "anyHost",
           1234, "anyUser", BuilderUtils.newResource(1024, 1), 0, 123,
           "password".getBytes(), 0);
     Container runningContainer =
@@ -1103,7 +1103,7 @@ public class TestNodeStatusUpdater {
     ContainerId runningContainerId =
         ContainerId.newContainerId(appAttemptId, 1);
     Token runningContainerToken =
-        BuilderUtils.newContainerToken(runningContainerId, "anyHost",
+        BuilderUtils.newContainerToken(runningContainerId, 0, "anyHost",
           1234, "anyUser", BuilderUtils.newResource(1024, 1), 0, 123,
           "password".getBytes(), 0);
     Container runningContainer =
@@ -1131,14 +1131,16 @@ public class TestNodeStatusUpdater {
         appAttemptId, 2);
     ContainerTokenIdentifier killedQueuedContainerTokenId1 = BuilderUtils
         .newContainerTokenIdentifier(BuilderUtils.newContainerToken(
-            killedQueuedContainerId1, "anyHost", 1234, "anyUser", BuilderUtils
-                .newResource(1024, 1), 0, 123, "password".getBytes(), 0));
+            killedQueuedContainerId1, 0, "anyHost", 1234, "anyUser",
+            BuilderUtils.newResource(1024, 1), 0, 123,
+            "password".getBytes(), 0));
     ContainerId killedQueuedContainerId2 = ContainerId.newContainerId(
         appAttemptId, 3);
     ContainerTokenIdentifier killedQueuedContainerTokenId2 = BuilderUtils
         .newContainerTokenIdentifier(BuilderUtils.newContainerToken(
-            killedQueuedContainerId2, "anyHost", 1234, "anyUser", BuilderUtils
-                .newResource(1024, 1), 0, 123, "password".getBytes(), 0));
+            killedQueuedContainerId2, 0, "anyHost", 1234, "anyUser",
+            BuilderUtils.newResource(1024, 1), 0, 123,
+            "password".getBytes(), 0));
 
     nm.getNMContext().getQueuingContext().getKilledQueuedContainers().put(
         killedQueuedContainerTokenId1, "Queued container killed.");
@@ -1214,7 +1216,7 @@ public class TestNodeStatusUpdater {
         ApplicationAttemptId.newInstance(appId, 0);
     ContainerId containerId = ContainerId.newContainerId(appAttemptId, 1);
     Token containerToken =
-        BuilderUtils.newContainerToken(containerId, "host", 1234, "user",
+        BuilderUtils.newContainerToken(containerId, 0, "host", 1234, "user",
             BuilderUtils.newResource(1024, 1), 0, 123,
             "password".getBytes(), 0);
 
@@ -1253,7 +1255,7 @@ public class TestNodeStatusUpdater {
 
     ContainerId cId = ContainerId.newContainerId(appAttemptId, 1);
     Token containerToken =
-        BuilderUtils.newContainerToken(cId, "anyHost", 1234, "anyUser",
+        BuilderUtils.newContainerToken(cId, 0, "anyHost", 1234, "anyUser",
             BuilderUtils.newResource(1024, 1), 0, 123,
             "password".getBytes(), 0);
     Container anyCompletedContainer = new ContainerImpl(conf, null,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/MockResourceManagerFacade.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/MockResourceManagerFacade.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/MockResourceManagerFacade.java
index 0652e96..2ccf827 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/MockResourceManagerFacade.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/MockResourceManagerFacade.java
@@ -106,6 +106,7 @@ import org.apache.hadoop.yarn.api.records.NMToken;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.NodeReport;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.UpdatedContainer;
 import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.exceptions.YarnException;
@@ -297,8 +298,7 @@ public class MockResourceManagerFacade implements
         new ArrayList<ContainerStatus>(), containerList,
         new ArrayList<NodeReport>(), null, AMCommand.AM_RESYNC, 1, null,
         new ArrayList<NMToken>(),
-        new ArrayList<Container>(),
-        new ArrayList<Container>());
+        new ArrayList<UpdatedContainer>());
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
index 726b353..ec38501 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
@@ -403,7 +403,7 @@ public abstract class BaseContainerManagerTest {
       LogAggregationContext logAggregationContext, ExecutionType executionType)
       throws IOException {
     ContainerTokenIdentifier containerTokenIdentifier =
-        new ContainerTokenIdentifier(cId, nodeId.toString(), user, resource,
+        new ContainerTokenIdentifier(cId, 0, nodeId.toString(), user, resource,
             System.currentTimeMillis() + 100000L, 123, rmIdentifier,
             Priority.newInstance(0), 0, logAggregationContext, null,
             ContainerType.TASK, executionType);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMMemoryStateStoreService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMMemoryStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMMemoryStateStoreService.java
index 3c5edc0..15c0e84 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMMemoryStateStoreService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMMemoryStateStoreService.java
@@ -125,9 +125,10 @@ public class NMMemoryStateStoreService extends NMStateStoreService {
 
   @Override
   public synchronized void storeContainer(ContainerId containerId,
-      StartContainerRequest startRequest) throws IOException {
+      int version, StartContainerRequest startRequest) throws IOException {
     RecoveredContainerState rcs = new RecoveredContainerState();
     rcs.startRequest = startRequest;
+    rcs.version = version;
     containerStates.put(containerId, rcs);
   }
 
@@ -156,9 +157,11 @@ public class NMMemoryStateStoreService extends NMStateStoreService {
 
   @Override
   public synchronized void storeContainerResourceChanged(
-      ContainerId containerId, Resource capability) throws IOException {
+      ContainerId containerId, int version, Resource capability)
+      throws IOException {
     RecoveredContainerState rcs = getRecoveredContainerState(containerId);
     rcs.capability = capability;
+    rcs.version = version;
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java
index d254e4b..1b21628 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java
@@ -262,11 +262,12 @@ public class TestNMLeveldbStateStoreService {
         StartContainerRequest.newInstance(clc, containerToken);
 
     // store a container and verify recovered
-    stateStore.storeContainer(containerId, containerReq);
+    stateStore.storeContainer(containerId, 1, containerReq);
     restartStateStore();
     recoveredContainers = stateStore.loadContainersState();
     assertEquals(1, recoveredContainers.size());
     RecoveredContainerState rcs = recoveredContainers.get(0);
+    assertEquals(1, rcs.getVersion());
     assertEquals(RecoveredContainerStatus.REQUESTED, rcs.getStatus());
     assertEquals(ContainerExitStatus.INVALID, rcs.getExitCode());
     assertEquals(false, rcs.getKilled());
@@ -308,11 +309,13 @@ public class TestNMLeveldbStateStoreService {
     assertEquals(diags.toString(), rcs.getDiagnostics());
 
     // increase the container size, and verify recovered
-    stateStore.storeContainerResourceChanged(containerId, Resource.newInstance(2468, 4));
+    stateStore.storeContainerResourceChanged(containerId, 2,
+        Resource.newInstance(2468, 4));
     restartStateStore();
     recoveredContainers = stateStore.loadContainersState();
     assertEquals(1, recoveredContainers.size());
     rcs = recoveredContainers.get(0);
+    assertEquals(2, rcs.getVersion());
     assertEquals(RecoveredContainerStatus.LAUNCHED, rcs.getStatus());
     assertEquals(ContainerExitStatus.INVALID, rcs.getExitCode());
     assertEquals(false, rcs.getKilled());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/MockContainer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/MockContainer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/MockContainer.java
index 7513fdf..8332b2a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/MockContainer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/MockContainer.java
@@ -67,7 +67,7 @@ public class MockContainer implements Container {
     long currentTime = System.currentTimeMillis();
     this.containerTokenIdentifier =
         BuilderUtils.newContainerTokenIdentifier(BuilderUtils
-          .newContainerToken(id, "127.0.0.1", 1234, user,
+          .newContainerToken(id, 0, "127.0.0.1", 1234, user,
             BuilderUtils.newResource(1024, 1), currentTime + 10000, 123,
             "password".getBytes(), currentTime));
     this.state = ContainerState.NEW;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServer.java
index 41037f7..be1dae1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServer.java
@@ -212,9 +212,9 @@ public class TestNMWebServer {
           recordFactory.newRecordInstance(ContainerLaunchContext.class);
       long currentTime = System.currentTimeMillis();
       Token containerToken =
-          BuilderUtils.newContainerToken(containerId, "127.0.0.1", 1234, user,
-            BuilderUtils.newResource(1024, 1), currentTime + 10000L, 123,
-            "password".getBytes(), currentTime);
+          BuilderUtils.newContainerToken(containerId, 0, "127.0.0.1", 1234,
+              user, BuilderUtils.newResource(1024, 1), currentTime + 10000L,
+              123, "password".getBytes(), currentTime);
       Context context = mock(Context.class);
       Container container =
           new ContainerImpl(conf, dispatcher, launchContext,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
index f575961..4d73ba2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
@@ -55,6 +55,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerUpdateType;
 import org.apache.hadoop.yarn.api.records.NMToken;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.NodeReport;
@@ -66,6 +67,9 @@ import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.api.records.StrictPreemptionContract;
+import org.apache.hadoop.yarn.api.records.UpdateContainerError;
+import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
+import org.apache.hadoop.yarn.api.records.UpdatedContainer;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.ApplicationAttemptNotFoundException;
 import org.apache.hadoop.yarn.exceptions.ApplicationMasterNotRegisteredException;
@@ -87,6 +91,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptS
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptRegistrationEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptStatusupdateEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptUnregistrationEvent;
+
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Allocation;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport;
@@ -489,15 +494,6 @@ public class ApplicationMasterService extends AbstractService implements
         throw e;
       }
 
-      try {
-        RMServerUtils.increaseDecreaseRequestSanityCheck(rmContext,
-            request.getIncreaseRequests(), request.getDecreaseRequests(),
-            maximumCapacity);
-      } catch (InvalidResourceRequestException e) {
-        LOG.warn(e);
-        throw e;
-      }
-
       // In the case of work-preserving AM restart, it's possible for the
       // AM to release containers from the earlier attempt.
       if (!app.getApplicationSubmissionContext()
@@ -505,11 +501,22 @@ public class ApplicationMasterService extends AbstractService implements
         try {
           RMServerUtils.validateContainerReleaseRequest(release, appAttemptId);
         } catch (InvalidContainerReleaseException e) {
-          LOG.warn("Invalid container release by application " + appAttemptId, e);
+          LOG.warn("Invalid container release by application " + appAttemptId,
+              e);
           throw e;
         }
       }
 
+      // Split Update Resource Requests into increase and decrease.
+      // No Exceptions are thrown here. All update errors are aggregated
+      // and returned to the AM.
+      List<UpdateContainerRequest> increaseResourceReqs = new ArrayList<>();
+      List<UpdateContainerRequest> decreaseResourceReqs = new ArrayList<>();
+      List<UpdateContainerError> updateContainerErrors =
+          RMServerUtils.validateAndSplitUpdateResourceRequests(rmContext,
+              request, maximumCapacity, increaseResourceReqs,
+              decreaseResourceReqs);
+
       // Send new requests to appAttempt.
       Allocation allocation;
       RMAppAttemptState state =
@@ -524,7 +531,7 @@ public class ApplicationMasterService extends AbstractService implements
         allocation =
             this.rScheduler.allocate(appAttemptId, ask, release,
                 blacklistAdditions, blacklistRemovals,
-                request.getIncreaseRequests(), request.getDecreaseRequests());
+                increaseResourceReqs, decreaseResourceReqs);
       }
 
       if (!blacklistAdditions.isEmpty() || !blacklistRemovals.isEmpty()) {
@@ -539,6 +546,10 @@ public class ApplicationMasterService extends AbstractService implements
         allocateResponse.setNMTokens(allocation.getNMTokens());
       }
 
+      // Notify the AM of container update errors
+      if (!updateContainerErrors.isEmpty()) {
+        allocateResponse.setUpdateErrors(updateContainerErrors);
+      }
       // update the response with the deltas of node status changes
       List<RMNode> updatedNodes = new ArrayList<RMNode>();
       if(app.pullRMNodeUpdates(updatedNodes) > 0) {
@@ -572,8 +583,23 @@ public class ApplicationMasterService extends AbstractService implements
       allocateResponse.setAvailableResources(allocation.getResourceLimit());
       
       // Handling increased/decreased containers
-      allocateResponse.setIncreasedContainers(allocation.getIncreasedContainers());
-      allocateResponse.setDecreasedContainers(allocation.getDecreasedContainers());
+      List<UpdatedContainer> updatedContainers = new ArrayList<>();
+      if (allocation.getIncreasedContainers() != null) {
+        for (Container c : allocation.getIncreasedContainers()) {
+          updatedContainers.add(
+              UpdatedContainer.newInstance(
+                  ContainerUpdateType.INCREASE_RESOURCE, c));
+        }
+      }
+      if (allocation.getDecreasedContainers() != null) {
+        for (Container c : allocation.getDecreasedContainers()) {
+          updatedContainers.add(
+              UpdatedContainer.newInstance(
+                  ContainerUpdateType.DECREASE_RESOURCE, c));
+        }
+      }
+
+      allocateResponse.setUpdatedContainers(updatedContainers);
 
       allocateResponse.setNumClusterNodes(this.rScheduler.getNumClusterNodes());
 
@@ -623,7 +649,7 @@ public class ApplicationMasterService extends AbstractService implements
       return allocateResponse;
     }    
   }
-  
+
   private PreemptionMessage generatePreemptionMessage(Allocation allocation){
     PreemptionMessage pMsg = null;
     // assemble strict preemption request

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
index 5e9827a..7fcabab 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
@@ -6,9 +6,9 @@
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -33,28 +33,35 @@ import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.AccessControlList;
 import org.apache.hadoop.security.authorize.ProxyUsers;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
 import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.ContainerResourceChangeRequest;
 import org.apache.hadoop.yarn.api.records.NodeState;
 import org.apache.hadoop.yarn.api.records.QueueInfo;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.UpdateContainerError;
+import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
 import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.InvalidContainerReleaseException;
-import org.apache.hadoop.yarn.exceptions.InvalidResourceBlacklistRequestException;
+import org.apache.hadoop.yarn.exceptions
+    .InvalidResourceBlacklistRequestException;
 import org.apache.hadoop.yarn.exceptions.InvalidResourceRequestException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.security.YarnAuthorizationProvider;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
-import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt
+    .RMAppAttemptState;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler
+    .ResourceScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler
     .SchedContainerChangeRequest;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils;
@@ -68,6 +75,18 @@ import org.apache.hadoop.yarn.util.resource.Resources;
  */
 public class RMServerUtils {
 
+  private static final String UPDATE_OUTSTANDING_ERROR =
+      "UPDATE_OUTSTANDING_ERROR";
+  private static final String INCORRECT_CONTAINER_VERSION_ERROR =
+      "INCORRECT_CONTAINER_VERSION_ERROR";
+  private static final String INVALID_CONTAINER_ID =
+      "INVALID_CONTAINER_ID";
+  private static final String RESOURCE_OUTSIDE_ALLOWED_RANGE =
+      "RESOURCE_OUTSIDE_ALLOWED_RANGE";
+
+  protected static final RecordFactory RECORD_FACTORY =
+      RecordFactoryProvider.getRecordFactory(null);
+
   public static List<RMNode> queryRMNodes(RMContext context,
       EnumSet<NodeState> acceptedStates) {
     // nodes contains nodes that are NEW, RUNNING, UNHEALTHY or DECOMMISSIONING.
@@ -97,6 +116,78 @@ public class RMServerUtils {
   }
 
   /**
+   * Check if we have:
+   * - Request for same containerId and different target resource
+   * - If targetResources violates maximum/minimumAllocation
+   * @param rmContext RM context
+   * @param request Allocate Request
+   * @param maximumAllocation Maximum Allocation
+   * @param increaseResourceReqs Increase Resource Request
+   * @param decreaseResourceReqs Decrease Resource Request
+   * @return List of container Errors
+   */
+  public static List<UpdateContainerError>
+      validateAndSplitUpdateResourceRequests(RMContext rmContext,
+      AllocateRequest request, Resource maximumAllocation,
+      List<UpdateContainerRequest> increaseResourceReqs,
+      List<UpdateContainerRequest> decreaseResourceReqs) {
+    List<UpdateContainerError> errors = new ArrayList<>();
+    Set<ContainerId> outstandingUpdate = new HashSet<>();
+    for (UpdateContainerRequest updateReq : request.getUpdateRequests()) {
+      RMContainer rmContainer = rmContext.getScheduler().getRMContainer(
+          updateReq.getContainerId());
+      String msg = null;
+      if (rmContainer == null) {
+        msg = INVALID_CONTAINER_ID;
+      }
+      // Only allow updates if the requested version matches the current
+      // version
+      if (msg == null && updateReq.getContainerVersion() !=
+          rmContainer.getContainer().getVersion()) {
+        msg = INCORRECT_CONTAINER_VERSION_ERROR + "|"
+            + updateReq.getContainerVersion() + "|"
+            + rmContainer.getContainer().getVersion();
+      }
+      // No more than 1 container update per request.
+      if (msg == null &&
+          outstandingUpdate.contains(updateReq.getContainerId())) {
+        msg = UPDATE_OUTSTANDING_ERROR;
+      }
+      if (msg == null) {
+        Resource original = rmContainer.getContainer().getResource();
+        Resource target = updateReq.getCapability();
+        if (Resources.fitsIn(target, original)) {
+          // This is a decrease request
+          if (validateIncreaseDecreaseRequest(rmContext, updateReq,
+              maximumAllocation, false)) {
+            decreaseResourceReqs.add(updateReq);
+            outstandingUpdate.add(updateReq.getContainerId());
+          } else {
+            msg = RESOURCE_OUTSIDE_ALLOWED_RANGE;
+          }
+        } else {
+          // This is an increase request
+          if (validateIncreaseDecreaseRequest(rmContext, updateReq,
+              maximumAllocation, true)) {
+            increaseResourceReqs.add(updateReq);
+            outstandingUpdate.add(updateReq.getContainerId());
+          } else {
+            msg = RESOURCE_OUTSIDE_ALLOWED_RANGE;
+          }
+        }
+      }
+      if (msg != null) {
+        UpdateContainerError updateError = RECORD_FACTORY
+            .newRecordInstance(UpdateContainerError.class);
+        updateError.setReason(msg);
+        updateError.setUpdateContainerRequest(updateReq);
+        errors.add(updateError);
+      }
+    }
+    return errors;
+  }
+
+  /**
    * Utility method to validate a list resource requests, by insuring that the
    * requested memory/vcore is non-negative and not greater than max
    */
@@ -122,8 +213,6 @@ public class RMServerUtils {
    * the queue lock to make sure that the access to container resource is
    * atomic. Refer to LeafQueue.decreaseContainer() and
    * CapacityScheduelr.updateIncreaseRequests()
-   *
-   * 
    * <pre>
    * - Throw exception when any other error happens
    * </pre>
@@ -145,7 +234,7 @@ public class RMServerUtils {
     if (increase) {
       if (originalResource.getMemorySize() > targetResource.getMemorySize()
           || originalResource.getVirtualCores() > targetResource
-              .getVirtualCores()) {
+          .getVirtualCores()) {
         String msg =
             "Trying to increase a container, but target resource has some"
                 + " resource < original resource, target=" + targetResource
@@ -156,7 +245,7 @@ public class RMServerUtils {
     } else {
       if (originalResource.getMemorySize() < targetResource.getMemorySize()
           || originalResource.getVirtualCores() < targetResource
-              .getVirtualCores()) {
+          .getVirtualCores()) {
         String msg =
             "Trying to decrease a container, but target resource has "
                 + "some resource > original resource, target=" + targetResource
@@ -194,112 +283,46 @@ public class RMServerUtils {
       }
     }
   }
-  
-  /**
-   * Check if we have:
-   * - Request for same containerId and different target resource
-   * - If targetResources violates maximum/minimumAllocation
-   */
-  public static void increaseDecreaseRequestSanityCheck(RMContext rmContext,
-      List<ContainerResourceChangeRequest> incRequests,
-      List<ContainerResourceChangeRequest> decRequests,
-      Resource maximumAllocation) throws InvalidResourceRequestException {
-    checkDuplicatedIncreaseDecreaseRequest(incRequests, decRequests);
-    validateIncreaseDecreaseRequest(rmContext, incRequests, maximumAllocation,
-        true);
-    validateIncreaseDecreaseRequest(rmContext, decRequests, maximumAllocation,
-        false);
-  }
-  
-  private static void checkDuplicatedIncreaseDecreaseRequest(
-      List<ContainerResourceChangeRequest> incRequests,
-      List<ContainerResourceChangeRequest> decRequests)
-          throws InvalidResourceRequestException {
-    String msg = "There're multiple increase or decrease container requests "
-        + "for same containerId=";
-    Set<ContainerId> existedContainerIds = new HashSet<ContainerId>();
-    if (incRequests != null) {
-      for (ContainerResourceChangeRequest r : incRequests) {
-        if (!existedContainerIds.add(r.getContainerId())) {
-          throw new InvalidResourceRequestException(msg + r.getContainerId());
-        }
-      }
-    }
-    
-    if (decRequests != null) {
-      for (ContainerResourceChangeRequest r : decRequests) {
-        if (!existedContainerIds.add(r.getContainerId())) {
-          throw new InvalidResourceRequestException(msg + r.getContainerId());
-        }
-      }
-    }
-  }
 
   // Sanity check and normalize target resource
-  private static void validateIncreaseDecreaseRequest(RMContext rmContext,
-      List<ContainerResourceChangeRequest> requests, Resource maximumAllocation,
-      boolean increase)
-      throws InvalidResourceRequestException {
-    if (requests == null) {
-      return;
+  private static boolean validateIncreaseDecreaseRequest(RMContext rmContext,
+      UpdateContainerRequest request, Resource maximumAllocation,
+      boolean increase) {
+    if (request.getCapability().getMemorySize() < 0
+        || request.getCapability().getMemorySize() > maximumAllocation
+        .getMemorySize()) {
+      return false;
     }
-    for (ContainerResourceChangeRequest request : requests) {
-      if (request.getCapability().getMemorySize() < 0
-          || request.getCapability().getMemorySize() > maximumAllocation
-              .getMemorySize()) {
-        throw new InvalidResourceRequestException("Invalid "
-            + (increase ? "increase" : "decrease") + " request"
-            + ", requested memory < 0"
-            + ", or requested memory > max configured" + ", requestedMemory="
-            + request.getCapability().getMemorySize() + ", maxMemory="
-            + maximumAllocation.getMemorySize());
-      }
-      if (request.getCapability().getVirtualCores() < 0
-          || request.getCapability().getVirtualCores() > maximumAllocation
-              .getVirtualCores()) {
-        throw new InvalidResourceRequestException("Invalid "
-            + (increase ? "increase" : "decrease") + " request"
-            + ", requested virtual cores < 0"
-            + ", or requested virtual cores > max configured"
-            + ", requestedVirtualCores="
-            + request.getCapability().getVirtualCores() + ", maxVirtualCores="
-            + maximumAllocation.getVirtualCores());
-      }
-      ContainerId containerId = request.getContainerId();
-      ResourceScheduler scheduler = rmContext.getScheduler();
-      RMContainer rmContainer = scheduler.getRMContainer(containerId);
-      if (null == rmContainer) {
-        String msg =
-            "Failed to get rmContainer for "
-                + (increase ? "increase" : "decrease")
-                + " request, with container-id=" + containerId;
-        throw new InvalidResourceRequestException(msg);
-      }
-      ResourceCalculator rc = scheduler.getResourceCalculator();
-      Resource targetResource = Resources.normalize(rc, request.getCapability(),
-          scheduler.getMinimumResourceCapability(),
-          scheduler.getMaximumResourceCapability(),
-          scheduler.getMinimumResourceCapability());
-      // Update normalized target resource
-      request.setCapability(targetResource);
+    if (request.getCapability().getVirtualCores() < 0
+        || request.getCapability().getVirtualCores() > maximumAllocation
+        .getVirtualCores()) {
+      return false;
     }
+    ResourceScheduler scheduler = rmContext.getScheduler();
+    ResourceCalculator rc = scheduler.getResourceCalculator();
+    Resource targetResource = Resources.normalize(rc, request.getCapability(),
+        scheduler.getMinimumResourceCapability(),
+        scheduler.getMaximumResourceCapability(),
+        scheduler.getMinimumResourceCapability());
+    // Update normalized target resource
+    request.setCapability(targetResource);
+    return true;
   }
 
   /**
    * It will validate to make sure all the containers belong to correct
    * application attempt id. If not then it will throw
    * {@link InvalidContainerReleaseException}
-   * 
-   * @param containerReleaseList
-   *          containers to be released as requested by application master.
-   * @param appAttemptId
-   *          Application attempt Id
+   *
+   * @param containerReleaseList containers to be released as requested by
+   *                             application master.
+   * @param appAttemptId         Application attempt Id
    * @throws InvalidContainerReleaseException
    */
   public static void
       validateContainerReleaseRequest(List<ContainerId> containerReleaseList,
-          ApplicationAttemptId appAttemptId)
-          throws InvalidContainerReleaseException {
+      ApplicationAttemptId appAttemptId)
+      throws InvalidContainerReleaseException {
     for (ContainerId cId : containerReleaseList) {
       if (!appAttemptId.equals(cId.getApplicationAttemptId())) {
         throw new InvalidContainerReleaseException(
@@ -321,10 +344,11 @@ public class RMServerUtils {
   /**
    * Utility method to verify if the current user has access based on the
    * passed {@link AccessControlList}
+   *
    * @param authorizer the {@link AccessControlList} to check against
-   * @param method the method name to be logged
-   * @param module like AdminService or NodeLabelManager
-   * @param LOG the logger to use
+   * @param method     the method name to be logged
+   * @param module     like AdminService or NodeLabelManager
+   * @param LOG        the logger to use
    * @return {@link UserGroupInformation} of the current user
    * @throws IOException
    */
@@ -347,11 +371,11 @@ public class RMServerUtils {
           " to call '" + method + "'");
 
       RMAuditLogger.logFailure(user.getShortUserName(), method, "", module,
-        RMAuditLogger.AuditConstants.UNAUTHORIZED_USER);
+          RMAuditLogger.AuditConstants.UNAUTHORIZED_USER);
 
       throw new AccessControlException("User " + user.getShortUserName() +
-              " doesn't have permission" +
-              " to call '" + method + "'");
+          " doesn't have permission" +
+          " to call '" + method + "'");
     }
     if (LOG.isTraceEnabled()) {
       LOG.trace(method + " invoked by user " + user.getShortUserName());
@@ -362,56 +386,56 @@ public class RMServerUtils {
   public static YarnApplicationState createApplicationState(
       RMAppState rmAppState) {
     switch (rmAppState) {
-      case NEW:
-        return YarnApplicationState.NEW;
-      case NEW_SAVING:
-        return YarnApplicationState.NEW_SAVING;
-      case SUBMITTED:
-        return YarnApplicationState.SUBMITTED;
-      case ACCEPTED:
-        return YarnApplicationState.ACCEPTED;
-      case RUNNING:
-        return YarnApplicationState.RUNNING;
-      case FINISHING:
-      case FINISHED:
-        return YarnApplicationState.FINISHED;
-      case KILLED:
-        return YarnApplicationState.KILLED;
-      case FAILED:
-        return YarnApplicationState.FAILED;
-      default:
-        throw new YarnRuntimeException("Unknown state passed!");
-      }
+    case NEW:
+      return YarnApplicationState.NEW;
+    case NEW_SAVING:
+      return YarnApplicationState.NEW_SAVING;
+    case SUBMITTED:
+      return YarnApplicationState.SUBMITTED;
+    case ACCEPTED:
+      return YarnApplicationState.ACCEPTED;
+    case RUNNING:
+      return YarnApplicationState.RUNNING;
+    case FINISHING:
+    case FINISHED:
+      return YarnApplicationState.FINISHED;
+    case KILLED:
+      return YarnApplicationState.KILLED;
+    case FAILED:
+      return YarnApplicationState.FAILED;
+    default:
+      throw new YarnRuntimeException("Unknown state passed!");
+    }
   }
 
   public static YarnApplicationAttemptState createApplicationAttemptState(
       RMAppAttemptState rmAppAttemptState) {
     switch (rmAppAttemptState) {
-      case NEW:
-        return YarnApplicationAttemptState.NEW;
-      case SUBMITTED:
-        return YarnApplicationAttemptState.SUBMITTED;
-      case SCHEDULED:
-        return YarnApplicationAttemptState.SCHEDULED;
-      case ALLOCATED:
-        return YarnApplicationAttemptState.ALLOCATED;
-      case LAUNCHED:
-        return YarnApplicationAttemptState.LAUNCHED;
-      case ALLOCATED_SAVING:
-      case LAUNCHED_UNMANAGED_SAVING:
-        return YarnApplicationAttemptState.ALLOCATED_SAVING;
-      case RUNNING:
-        return YarnApplicationAttemptState.RUNNING;
-      case FINISHING:
-        return YarnApplicationAttemptState.FINISHING;
-      case FINISHED:
-        return YarnApplicationAttemptState.FINISHED;
-      case KILLED:
-        return YarnApplicationAttemptState.KILLED;
-      case FAILED:
-        return YarnApplicationAttemptState.FAILED;
-      default:
-        throw new YarnRuntimeException("Unknown state passed!");
+    case NEW:
+      return YarnApplicationAttemptState.NEW;
+    case SUBMITTED:
+      return YarnApplicationAttemptState.SUBMITTED;
+    case SCHEDULED:
+      return YarnApplicationAttemptState.SCHEDULED;
+    case ALLOCATED:
+      return YarnApplicationAttemptState.ALLOCATED;
+    case LAUNCHED:
+      return YarnApplicationAttemptState.LAUNCHED;
+    case ALLOCATED_SAVING:
+    case LAUNCHED_UNMANAGED_SAVING:
+      return YarnApplicationAttemptState.ALLOCATED_SAVING;
+    case RUNNING:
+      return YarnApplicationAttemptState.RUNNING;
+    case FINISHING:
+      return YarnApplicationAttemptState.FINISHING;
+    case FINISHED:
+      return YarnApplicationAttemptState.FINISHED;
+    case KILLED:
+      return YarnApplicationAttemptState.KILLED;
+    case FAILED:
+      return YarnApplicationAttemptState.FAILED;
+    default:
+      throw new YarnRuntimeException("Unknown state passed!");
     }
   }
 
@@ -420,13 +444,12 @@ public class RMServerUtils {
    * a return value when a valid report cannot be found.
    */
   public static final ApplicationResourceUsageReport
-    DUMMY_APPLICATION_RESOURCE_USAGE_REPORT =
+      DUMMY_APPLICATION_RESOURCE_USAGE_REPORT =
       BuilderUtils.newApplicationResourceUsageReport(-1, -1,
           Resources.createResource(-1, -1), Resources.createResource(-1, -1),
           Resources.createResource(-1, -1), 0, 0);
 
 
-
   /**
    * Find all configs whose name starts with
    * YarnConfiguration.RM_PROXY_USER_PREFIX, and add a record for each one by
@@ -438,7 +461,8 @@ public class RMServerUtils {
       String propName = entry.getKey();
       if (propName.startsWith(YarnConfiguration.RM_PROXY_USER_PREFIX)) {
         rmProxyUsers.put(ProxyUsers.CONF_HADOOP_PROXYUSER + "." +
-            propName.substring(YarnConfiguration.RM_PROXY_USER_PREFIX.length()),
+                propName.substring(YarnConfiguration.RM_PROXY_USER_PREFIX
+                    .length()),
             entry.getValue());
       }
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
index 755defd..45415de 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
@@ -40,7 +40,6 @@ import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.ContainerResourceChangeRequest;
 import org.apache.hadoop.yarn.api.records.ContainerState;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.NodeId;
@@ -48,6 +47,7 @@ import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceOption;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.InvalidResourceRequestException;
 import org.apache.hadoop.yarn.exceptions.YarnException;
@@ -437,6 +437,7 @@ public abstract class AbstractYarnScheduler
         Container.newInstance(status.getContainerId(), node.getNodeID(),
           node.getHttpAddress(), status.getAllocatedResource(),
           status.getPriority(), null);
+    container.setVersion(status.getVersion());
     ApplicationAttemptId attemptId =
         container.getId().getApplicationAttemptId();
     RMContainer rmContainer =
@@ -575,7 +576,7 @@ public abstract class AbstractYarnScheduler
   }
 
   protected void decreaseContainers(
-      List<ContainerResourceChangeRequest> decreaseRequests,
+      List<UpdateContainerRequest> decreaseRequests,
       SchedulerApplicationAttempt attempt) {
     if (null == decreaseRequests || decreaseRequests.isEmpty()) {
       return;
@@ -748,7 +749,7 @@ public abstract class AbstractYarnScheduler
   /**
    * Sanity check increase/decrease request, and return
    * SchedulerContainerResourceChangeRequest according to given
-   * ContainerResourceChangeRequest.
+   * UpdateContainerRequest.
    * 
    * <pre>
    * - Returns non-null value means validation succeeded
@@ -756,7 +757,7 @@ public abstract class AbstractYarnScheduler
    * </pre>
    */
   private SchedContainerChangeRequest createSchedContainerChangeRequest(
-      ContainerResourceChangeRequest request, boolean increase)
+      UpdateContainerRequest request, boolean increase)
       throws YarnException {
     ContainerId containerId = request.getContainerId();
     RMContainer rmContainer = getRMContainer(containerId);
@@ -775,11 +776,11 @@ public abstract class AbstractYarnScheduler
 
   protected List<SchedContainerChangeRequest>
       createSchedContainerChangeRequests(
-          List<ContainerResourceChangeRequest> changeRequests,
+          List<UpdateContainerRequest> changeRequests,
           boolean increase) {
     List<SchedContainerChangeRequest> schedulerChangeRequests =
         new ArrayList<SchedContainerChangeRequest>();
-    for (ContainerResourceChangeRequest r : changeRequests) {
+    for (UpdateContainerRequest r : changeRequests) {
       SchedContainerChangeRequest sr = null;
       try {
         sr = createSchedContainerChangeRequest(r, increase);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedContainerChangeRequest.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedContainerChangeRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedContainerChangeRequest.java
index e4ab3a2..94b006c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedContainerChangeRequest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedContainerChangeRequest.java
@@ -27,7 +27,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
 import org.apache.hadoop.yarn.util.resource.Resources;
 
 /**
- * This is ContainerResourceChangeRequest in scheduler side, it contains some
+ * This is UpdateContainerRequest in scheduler side, it contains some
  * pointers to runtime objects like RMContainer, SchedulerNode, etc. This will
  * be easier for scheduler making decision.
  */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
index c4b32a8..97d29cf 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
@@ -530,6 +530,9 @@ public class SchedulerApplicationAttempt implements SchedulableEntity {
       boolean newContainer, boolean increasedContainer) {
     Container container = rmContainer.getContainer();
     ContainerType containerType = ContainerType.TASK;
+    if (!newContainer) {
+      container.setVersion(container.getVersion() + 1);
+    }
     // The working knowledge is that masterContainer for AM is null as it
     // itself is the master container.
     if (isWaitingForAMContainer()) {
@@ -538,10 +541,11 @@ public class SchedulerApplicationAttempt implements SchedulableEntity {
     try {
       // create container token and NMToken altogether.
       container.setContainerToken(rmContext.getContainerTokenSecretManager()
-          .createContainerToken(container.getId(), container.getNodeId(),
-              getUser(), container.getResource(), container.getPriority(),
-              rmContainer.getCreationTime(), this.logAggregationContext,
-              rmContainer.getNodeLabelExpression(), containerType));
+          .createContainerToken(container.getId(), container.getVersion(),
+              container.getNodeId(), getUser(), container.getResource(),
+              container.getPriority(), rmContainer.getCreationTime(),
+              this.logAggregationContext, rmContainer.getNodeLabelExpression(),
+              containerType));
       NMToken nmToken =
           rmContext.getNMTokenSecretManager().createAndGetNMToken(getUser(),
               getApplicationAttemptId(), container);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/YarnScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/YarnScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/YarnScheduler.java
index 0aff669..c4f575f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/YarnScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/YarnScheduler.java
@@ -35,7 +35,6 @@ import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.ContainerResourceChangeRequest;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.QueueACL;
@@ -43,6 +42,7 @@ import org.apache.hadoop.yarn.api.records.QueueInfo;
 import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
 import org.apache.hadoop.yarn.exceptions.YarnException;
@@ -143,8 +143,8 @@ public interface YarnScheduler extends EventHandler<SchedulerEvent> {
   Allocation allocate(ApplicationAttemptId appAttemptId,
       List<ResourceRequest> ask, List<ContainerId> release,
       List<String> blacklistAdditions, List<String> blacklistRemovals,
-      List<ContainerResourceChangeRequest> increaseRequests,
-      List<ContainerResourceChangeRequest> decreaseRequests);
+      List<UpdateContainerRequest> increaseRequests,
+      List<UpdateContainerRequest> decreaseRequests);
 
   /**
    * Get node resource usage report.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index 35e1147..33fe9ad 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -51,7 +51,6 @@ import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.ContainerResourceChangeRequest;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.NodeState;
@@ -63,6 +62,7 @@ import org.apache.hadoop.yarn.api.records.ReservationId;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceOption;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.InvalidResourceRequestException;
 import org.apache.hadoop.yarn.exceptions.YarnException;
@@ -925,7 +925,7 @@ public class CapacityScheduler extends
   //    SchedContainerChangeRequest
   // 2. Deadlock with the scheduling thread.
   private LeafQueue updateIncreaseRequests(
-      List<ContainerResourceChangeRequest> increaseRequests,
+      List<UpdateContainerRequest> increaseRequests,
       FiCaSchedulerApp app) {
     if (null == increaseRequests || increaseRequests.isEmpty()) {
       return null;
@@ -953,8 +953,8 @@ public class CapacityScheduler extends
   public Allocation allocate(ApplicationAttemptId applicationAttemptId,
       List<ResourceRequest> ask, List<ContainerId> release,
       List<String> blacklistAdditions, List<String> blacklistRemovals,
-      List<ContainerResourceChangeRequest> increaseRequests,
-      List<ContainerResourceChangeRequest> decreaseRequests) {
+      List<UpdateContainerRequest> increaseRequests,
+      List<UpdateContainerRequest> decreaseRequests) {
 
     FiCaSchedulerApp application = getApplicationAttempt(applicationAttemptId);
     if (application == null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
index 73d56d7..140b4f7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
@@ -39,7 +39,6 @@ import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.ContainerResourceChangeRequest;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.NodeState;
@@ -50,6 +49,7 @@ import org.apache.hadoop.yarn.api.records.ReservationId;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceOption;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
@@ -942,8 +942,8 @@ public class FairScheduler extends
   public Allocation allocate(ApplicationAttemptId appAttemptId,
       List<ResourceRequest> ask, List<ContainerId> release,
       List<String> blacklistAdditions, List<String> blacklistRemovals,
-      List<ContainerResourceChangeRequest> increaseRequests,
-      List<ContainerResourceChangeRequest> decreaseRequests) {
+      List<UpdateContainerRequest> increaseRequests,
+      List<UpdateContainerRequest> decreaseRequests) {
 
     // Make sure this application exists
     FSAppAttempt application = getSchedulerApp(appAttemptId);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
index 2863a97..e9ffd09 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
@@ -40,7 +40,6 @@ import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.ContainerResourceChangeRequest;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.NodeState;
@@ -52,6 +51,7 @@ import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceOption;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.factories.RecordFactory;
@@ -330,8 +330,8 @@ public class FifoScheduler extends
   public Allocation allocate(ApplicationAttemptId applicationAttemptId,
       List<ResourceRequest> ask, List<ContainerId> release,
       List<String> blacklistAdditions, List<String> blacklistRemovals,
-      List<ContainerResourceChangeRequest> increaseRequests,
-      List<ContainerResourceChangeRequest> decreaseRequests) {
+      List<UpdateContainerRequest> increaseRequests,
+      List<UpdateContainerRequest> decreaseRequests) {
     FiCaSchedulerApp application = getApplicationAttempt(applicationAttemptId);
     if (application == null) {
       LOG.error("Calling allocate on removed " +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMContainerTokenSecretManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMContainerTokenSecretManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMContainerTokenSecretManager.java
index 6f00615..8c42255 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMContainerTokenSecretManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMContainerTokenSecretManager.java
@@ -26,6 +26,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ExecutionType;
 import org.apache.hadoop.yarn.api.records.LogAggregationContext;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.Priority;
@@ -168,39 +169,43 @@ public class RMContainerTokenSecretManager extends
   /**
    * Helper function for creating ContainerTokens
    *
-   * @param containerId
-   * @param nodeId
-   * @param appSubmitter
-   * @param capability
-   * @param priority
-   * @param createTime
+   * @param containerId Container Id
+   * @param containerVersion Container Version
+   * @param nodeId Node Id
+   * @param appSubmitter App Submitter
+   * @param capability Capability
+   * @param priority Priority
+   * @param createTime Create Time
    * @return the container-token
    */
-  public Token createContainerToken(ContainerId containerId, NodeId nodeId,
-      String appSubmitter, Resource capability, Priority priority,
-      long createTime) {
-    return createContainerToken(containerId, nodeId, appSubmitter, capability,
-      priority, createTime, null, null, ContainerType.TASK);
+  public Token createContainerToken(ContainerId containerId,
+      int containerVersion, NodeId nodeId, String appSubmitter,
+      Resource capability, Priority priority, long createTime) {
+    return createContainerToken(containerId, containerVersion, nodeId,
+        appSubmitter, capability, priority, createTime,
+        null, null, ContainerType.TASK);
   }
 
   /**
    * Helper function for creating ContainerTokens
    *
-   * @param containerId
-   * @param nodeId
-   * @param appSubmitter
-   * @param capability
-   * @param priority
-   * @param createTime
-   * @param logAggregationContext
-   * @param nodeLabelExpression
-   * @param containerType
+   * @param containerId Container Id
+   * @param containerVersion Container version
+   * @param nodeId Node Id
+   * @param appSubmitter App Submitter
+   * @param capability Capability
+   * @param priority Priority
+   * @param createTime Create Time
+   * @param logAggregationContext Log Aggregation Context
+   * @param nodeLabelExpression Node Label Expression
+   * @param containerType Container Type
    * @return the container-token
    */
-  public Token createContainerToken(ContainerId containerId, NodeId nodeId,
-      String appSubmitter, Resource capability, Priority priority,
-      long createTime, LogAggregationContext logAggregationContext,
-      String nodeLabelExpression, ContainerType containerType) {
+  public Token createContainerToken(ContainerId containerId,
+      int containerVersion, NodeId nodeId, String appSubmitter,
+      Resource capability, Priority priority, long createTime,
+      LogAggregationContext logAggregationContext, String nodeLabelExpression,
+      ContainerType containerType) {
     byte[] password;
     ContainerTokenIdentifier tokenIdentifier;
     long expiryTimeStamp =
@@ -210,11 +215,12 @@ public class RMContainerTokenSecretManager extends
     this.readLock.lock();
     try {
       tokenIdentifier =
-          new ContainerTokenIdentifier(containerId, nodeId.toString(),
-            appSubmitter, capability, expiryTimeStamp, this.currentMasterKey
-              .getMasterKey().getKeyId(),
-            ResourceManager.getClusterTimeStamp(), priority, createTime,
-            logAggregationContext, nodeLabelExpression, containerType);
+          new ContainerTokenIdentifier(containerId, containerVersion,
+              nodeId.toString(), appSubmitter, capability, expiryTimeStamp,
+              this.currentMasterKey.getMasterKey().getKeyId(),
+              ResourceManager.getClusterTimeStamp(), priority, createTime,
+              logAggregationContext, nodeLabelExpression, containerType,
+              ExecutionType.GUARANTEED);
       password = this.createPassword(tokenIdentifier);
 
     } finally {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java
index 1b11472..593de08 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java
@@ -34,12 +34,12 @@ import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRespo
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.ContainerResourceChangeRequest;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.api.records.ExecutionTypeRequest;
+import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
 import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
@@ -245,10 +245,9 @@ public class MockAM {
   }
   
   public AllocateResponse sendContainerResizingRequest(
-      List<ContainerResourceChangeRequest> increaseRequests,
-      List<ContainerResourceChangeRequest> decreaseRequests) throws Exception {
+      List<UpdateContainerRequest> updateRequests) throws Exception {
     final AllocateRequest req = AllocateRequest.newInstance(0, 0F, null, null,
-        null, increaseRequests, decreaseRequests);
+        null, updateRequests);
     return allocate(req);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationCleanup.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationCleanup.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationCleanup.java
index a7d8ba2..7c02264 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationCleanup.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationCleanup.java
@@ -386,9 +386,10 @@ public class TestApplicationCleanup {
     // nm1/nm2 register to rm2, and do a heartbeat
     nm1.setResourceTrackerService(rm2.getResourceTrackerService());
     nm1.registerNode(Arrays.asList(NMContainerStatus.newInstance(
-      ContainerId.newContainerId(am0.getApplicationAttemptId(), 1),
-      ContainerState.COMPLETE, Resource.newInstance(1024, 1), "", 0,
-      Priority.newInstance(0), 1234)), Arrays.asList(app0.getApplicationId()));
+        ContainerId.newContainerId(am0.getApplicationAttemptId(), 1), 0,
+        ContainerState.COMPLETE, Resource.newInstance(1024, 1), "", 0,
+        Priority.newInstance(0), 1234)),
+        Arrays.asList(app0.getApplicationId()));
     nm2.setResourceTrackerService(rm2.getResourceTrackerService());
     nm2.registerNode(Arrays.asList(app0.getApplicationId()));
 
@@ -598,7 +599,7 @@ public class TestApplicationCleanup {
       int memory) {
     ContainerId containerId = ContainerId.newContainerId(appAttemptId, id);
     NMContainerStatus containerReport =
-        NMContainerStatus.newInstance(containerId, containerState,
+        NMContainerStatus.newInstance(containerId, 0, containerState,
             Resource.newInstance(memory, 1), "recover container", 0,
             Priority.newInstance(0), 0);
     return containerReport;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterService.java
index 64673d2..93befcb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterService.java
@@ -35,16 +35,16 @@ import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRespo
 import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.AllocateRequestPBImpl;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.ContainerResourceChangeRequest;
+import org.apache.hadoop.yarn.api.records.ContainerUpdateType;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.Dispatcher;
 import org.apache.hadoop.yarn.event.DrainDispatcher;
 import org.apache.hadoop.yarn.exceptions.ApplicationMasterNotRegisteredException;
 import org.apache.hadoop.yarn.exceptions.InvalidContainerReleaseException;
-import org.apache.hadoop.yarn.exceptions.InvalidResourceRequestException;
 import org.apache.hadoop.yarn.proto.YarnServiceProtos.SchedulerResourceTypes;
 import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
@@ -383,57 +383,47 @@ public class TestApplicationMasterService {
       
       // Ask for a normal increase should be successfull
       am1.sendContainerResizingRequest(Arrays.asList(
-              ContainerResourceChangeRequest.newInstance(
-                  ContainerId.newContainerId(attempt1.getAppAttemptId(), 1),
-                  Resources.createResource(2048))), null);
+              UpdateContainerRequest.newInstance(
+                  0, ContainerId.newContainerId(attempt1.getAppAttemptId(), 1),
+                  ContainerUpdateType.INCREASE_RESOURCE,
+                  Resources.createResource(2048), null)));
       
       // Target resource is negative, should fail
-      boolean exceptionCaught = false;
-      try {
-        am1.sendContainerResizingRequest(Arrays.asList(
-                ContainerResourceChangeRequest.newInstance(
-                    ContainerId.newContainerId(attempt1.getAppAttemptId(), 1),
-                    Resources.createResource(-1))), null);
-      } catch (InvalidResourceRequestException e) {
-        // This is expected
-        exceptionCaught = true;
-      }
-      Assert.assertTrue(exceptionCaught);
-      
+      AllocateResponse response =
+          am1.sendContainerResizingRequest(Arrays.asList(
+              UpdateContainerRequest.newInstance(0,
+                  ContainerId.newContainerId(attempt1.getAppAttemptId(), 1),
+                  ContainerUpdateType.INCREASE_RESOURCE,
+                  Resources.createResource(-1), null)));
+      Assert.assertEquals(1, response.getUpdateErrors().size());
+      Assert.assertEquals("RESOURCE_OUTSIDE_ALLOWED_RANGE",
+          response.getUpdateErrors().get(0).getReason());
+
       // Target resource is more than maxAllocation, should fail
-      try {
-        am1.sendContainerResizingRequest(Arrays.asList(
-                ContainerResourceChangeRequest.newInstance(
-                    ContainerId.newContainerId(attempt1.getAppAttemptId(), 1),
-                    Resources
-                        .add(registerResponse.getMaximumResourceCapability(),
-                            Resources.createResource(1)))), null);
-      } catch (InvalidResourceRequestException e) {
-        // This is expected
-        exceptionCaught = true;
-      }
+      response = am1.sendContainerResizingRequest(Arrays.asList(
+          UpdateContainerRequest.newInstance(0,
+              ContainerId.newContainerId(attempt1.getAppAttemptId(), 1),
+              ContainerUpdateType.INCREASE_RESOURCE,
+              Resources.add(
+                  registerResponse.getMaximumResourceCapability(),
+                  Resources.createResource(1)), null)));
+      Assert.assertEquals(1, response.getUpdateErrors().size());
+      Assert.assertEquals("RESOURCE_OUTSIDE_ALLOWED_RANGE",
+          response.getUpdateErrors().get(0).getReason());
 
-      Assert.assertTrue(exceptionCaught);
-      
       // Contains multiple increase/decrease requests for same contaienrId 
-      try {
-        am1.sendContainerResizingRequest(Arrays.asList(
-                ContainerResourceChangeRequest.newInstance(
-                    ContainerId.newContainerId(attempt1.getAppAttemptId(), 1),
-                    Resources
-                        .add(registerResponse.getMaximumResourceCapability(),
-                            Resources.createResource(1)))), Arrays.asList(
-                ContainerResourceChangeRequest.newInstance(
-                    ContainerId.newContainerId(attempt1.getAppAttemptId(), 1),
-                    Resources
-                        .add(registerResponse.getMaximumResourceCapability(),
-                            Resources.createResource(1)))));
-      } catch (InvalidResourceRequestException e) {
-        // This is expected
-        exceptionCaught = true;
-      }
-
-      Assert.assertTrue(exceptionCaught);
+      response = am1.sendContainerResizingRequest(Arrays.asList(
+          UpdateContainerRequest.newInstance(0,
+              ContainerId.newContainerId(attempt1.getAppAttemptId(), 1),
+              ContainerUpdateType.INCREASE_RESOURCE,
+              Resources.createResource(2048, 4), null),
+          UpdateContainerRequest.newInstance(0,
+              ContainerId.newContainerId(attempt1.getAppAttemptId(), 1),
+              ContainerUpdateType.DECREASE_RESOURCE,
+              Resources.createResource(1024, 1), null)));
+      Assert.assertEquals(1, response.getUpdateErrors().size());
+      Assert.assertEquals("UPDATE_OUTSTANDING_ERROR",
+          response.getUpdateErrors().get(0).getReason());
     } finally {
       if (rm != null) {
         rm.close();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[06/19] hadoop git commit: YARN-5221. Expose UpdateResourceRequest API to allow AM to request for change in container properties. (asuresh)

Posted by aw...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateRequestPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateRequestPBImpl.java
index d6db32c..0f0f571 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateRequestPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateRequestPBImpl.java
@@ -27,17 +27,17 @@ import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
 import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.ContainerResourceChangeRequest;
 import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
 import org.apache.hadoop.yarn.api.records.impl.pb.ContainerIdPBImpl;
-import org.apache.hadoop.yarn.api.records.impl.pb.ContainerResourceChangeRequestPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.ResourceBlacklistRequestPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.ResourceRequestPBImpl;
+import org.apache.hadoop.yarn.api.records.impl.pb.UpdateContainerRequestPBImpl;
 import org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto;
-import org.apache.hadoop.yarn.proto.YarnProtos.ContainerResourceChangeRequestProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ResourceBlacklistRequestProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ResourceRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.UpdateContainerRequestProto;
 import org.apache.hadoop.yarn.proto.YarnServiceProtos.AllocateRequestProto;
 import org.apache.hadoop.yarn.proto.YarnServiceProtos.AllocateRequestProtoOrBuilder;
 
@@ -52,8 +52,7 @@ public class AllocateRequestPBImpl extends AllocateRequest {
 
   private List<ResourceRequest> ask = null;
   private List<ContainerId> release = null;
-  private List<ContainerResourceChangeRequest> increaseRequests = null;
-  private List<ContainerResourceChangeRequest> decreaseRequests = null;
+  private List<UpdateContainerRequest> updateRequests = null;
   private ResourceBlacklistRequest blacklistRequest = null;
   
   public AllocateRequestPBImpl() {
@@ -99,11 +98,8 @@ public class AllocateRequestPBImpl extends AllocateRequest {
     if (this.release != null) {
       addReleasesToProto();
     }
-    if (this.increaseRequests != null) {
-      addIncreaseRequestsToProto();
-    }
-    if (this.decreaseRequests != null) {
-      addDecreaseRequestsToProto();
+    if (this.updateRequests != null) {
+      addUpdateRequestsToProto();
     }
     if (this.blacklistRequest != null) {
       builder.setBlacklistRequest(convertToProtoFormat(this.blacklistRequest));
@@ -166,37 +162,19 @@ public class AllocateRequestPBImpl extends AllocateRequest {
   }
   
   @Override
-  public List<ContainerResourceChangeRequest> getIncreaseRequests() {
-    initIncreaseRequests();
-    return this.increaseRequests;
+  public List<UpdateContainerRequest> getUpdateRequests() {
+    initUpdateRequests();
+    return this.updateRequests;
   }
 
   @Override
-  public void setIncreaseRequests(
-      List<ContainerResourceChangeRequest> increaseRequests) {
-    if (increaseRequests == null) {
+  public void setUpdateRequests(List<UpdateContainerRequest> updateRequests) {
+    if (updateRequests == null) {
       return;
     }
-    initIncreaseRequests();
-    this.increaseRequests.clear();
-    this.increaseRequests.addAll(increaseRequests);
-  }
-
-  @Override
-  public List<ContainerResourceChangeRequest> getDecreaseRequests() {
-    initDecreaseRequests();
-    return this.decreaseRequests;
-  }
-
-  @Override
-  public void setDecreaseRequests(
-          List<ContainerResourceChangeRequest> decreaseRequests) {
-    if (decreaseRequests == null) {
-      return;
-    }
-    initDecreaseRequests();
-    this.decreaseRequests.clear();
-    this.decreaseRequests.addAll(decreaseRequests);
+    initUpdateRequests();
+    this.updateRequests.clear();
+    this.updateRequests.addAll(updateRequests);
   }
 
   @Override
@@ -239,7 +217,8 @@ public class AllocateRequestPBImpl extends AllocateRequest {
     builder.clearAsk();
     if (ask == null)
       return;
-    Iterable<ResourceRequestProto> iterable = new Iterable<ResourceRequestProto>() {
+    Iterable<ResourceRequestProto> iterable =
+        new Iterable<ResourceRequestProto>() {
       @Override
       public Iterator<ResourceRequestProto> iterator() {
         return new Iterator<ResourceRequestProto>() {
@@ -268,84 +247,34 @@ public class AllocateRequestPBImpl extends AllocateRequest {
     builder.addAllAsk(iterable);
   }
   
-  private void initIncreaseRequests() {
-    if (this.increaseRequests != null) {
+  private void initUpdateRequests() {
+    if (this.updateRequests != null) {
       return;
     }
     AllocateRequestProtoOrBuilder p = viaProto ? proto : builder;
-    List<ContainerResourceChangeRequestProto> list =
-        p.getIncreaseRequestList();
-    this.increaseRequests = new ArrayList<ContainerResourceChangeRequest>();
+    List<UpdateContainerRequestProto> list =
+        p.getUpdateRequestsList();
+    this.updateRequests = new ArrayList<>();
 
-    for (ContainerResourceChangeRequestProto c : list) {
-      this.increaseRequests.add(convertFromProtoFormat(c));
-    }
-  }
-
-  private void initDecreaseRequests() {
-    if (this.decreaseRequests != null) {
-      return;
-    }
-    AllocateRequestProtoOrBuilder p = viaProto ? proto : builder;
-    List<ContainerResourceChangeRequestProto> list =
-            p.getDecreaseRequestList();
-    this.decreaseRequests = new ArrayList<>();
-
-    for (ContainerResourceChangeRequestProto c : list) {
-      this.decreaseRequests.add(convertFromProtoFormat(c));
-    }
-  }
-
-  private void addIncreaseRequestsToProto() {
-    maybeInitBuilder();
-    builder.clearIncreaseRequest();
-    if (increaseRequests == null) {
-      return;
+    for (UpdateContainerRequestProto c : list) {
+      this.updateRequests.add(convertFromProtoFormat(c));
     }
-    Iterable<ContainerResourceChangeRequestProto> iterable =
-        new Iterable<ContainerResourceChangeRequestProto>() {
-          @Override
-          public Iterator<ContainerResourceChangeRequestProto> iterator() {
-            return new Iterator<ContainerResourceChangeRequestProto>() {
-
-              Iterator<ContainerResourceChangeRequest> iter =
-                  increaseRequests.iterator();
-
-              @Override
-              public boolean hasNext() {
-                return iter.hasNext();
-              }
-
-              @Override
-              public ContainerResourceChangeRequestProto next() {
-                return convertToProtoFormat(iter.next());
-              }
-
-              @Override
-              public void remove() {
-                throw new UnsupportedOperationException();
-              }
-            };
-
-          }
-        };
-    builder.addAllIncreaseRequest(iterable);
   }
 
-  private void addDecreaseRequestsToProto() {
+  private void addUpdateRequestsToProto() {
     maybeInitBuilder();
-    builder.clearDecreaseRequest();
-    if (decreaseRequests == null) {
+    builder.clearUpdateRequests();
+    if (updateRequests == null) {
       return;
     }
-    Iterable<ContainerResourceChangeRequestProto> iterable =
-        new Iterable<ContainerResourceChangeRequestProto>() {
+    Iterable<UpdateContainerRequestProto> iterable =
+        new Iterable<UpdateContainerRequestProto>() {
           @Override
-          public Iterator<ContainerResourceChangeRequestProto> iterator() {
-            return new Iterator<ContainerResourceChangeRequestProto>() {
+          public Iterator<UpdateContainerRequestProto> iterator() {
+            return new Iterator<UpdateContainerRequestProto>() {
 
-              Iterator<ContainerResourceChangeRequest> iter =
-                      decreaseRequests.iterator();
+              private Iterator<UpdateContainerRequest> iter =
+                  updateRequests.iterator();
 
               @Override
               public boolean hasNext() {
@@ -353,7 +282,7 @@ public class AllocateRequestPBImpl extends AllocateRequest {
               }
 
               @Override
-              public ContainerResourceChangeRequestProto next() {
+              public UpdateContainerRequestProto next() {
                 return convertToProtoFormat(iter.next());
               }
 
@@ -365,7 +294,7 @@ public class AllocateRequestPBImpl extends AllocateRequest {
 
           }
         };
-    builder.addAllDecreaseRequest(iterable);
+    builder.addAllUpdateRequests(iterable);
   }
 
   @Override
@@ -438,14 +367,14 @@ public class AllocateRequestPBImpl extends AllocateRequest {
     return ((ResourceRequestPBImpl)t).getProto();
   }
   
-  private ContainerResourceChangeRequestPBImpl convertFromProtoFormat(
-      ContainerResourceChangeRequestProto p) {
-    return new ContainerResourceChangeRequestPBImpl(p);
+  private UpdateContainerRequestPBImpl convertFromProtoFormat(
+      UpdateContainerRequestProto p) {
+    return new UpdateContainerRequestPBImpl(p);
   }
 
-  private ContainerResourceChangeRequestProto convertToProtoFormat(
-      ContainerResourceChangeRequest t) {
-    return ((ContainerResourceChangeRequestPBImpl) t).getProto();
+  private UpdateContainerRequestProto convertToProtoFormat(
+      UpdateContainerRequest t) {
+    return ((UpdateContainerRequestPBImpl) t).getProto();
   }
 
   private ContainerIdPBImpl convertFromProtoFormat(ContainerIdProto p) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java
index d096a6f..b4f51ef 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java
@@ -36,6 +36,8 @@ import org.apache.hadoop.yarn.api.records.PreemptionMessage;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.Token;
+import org.apache.hadoop.yarn.api.records.UpdateContainerError;
+import org.apache.hadoop.yarn.api.records.UpdatedContainer;
 import org.apache.hadoop.yarn.api.records.impl.pb.ContainerPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.ContainerStatusPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.NMTokenPBImpl;
@@ -45,12 +47,14 @@ import org.apache.hadoop.yarn.api.records.impl.pb.PriorityPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.ProtoUtils;
 import org.apache.hadoop.yarn.api.records.impl.pb.ResourcePBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.TokenPBImpl;
+import org.apache.hadoop.yarn.api.records.impl.pb.UpdatedContainerPBImpl;
 import org.apache.hadoop.yarn.proto.YarnProtos.ContainerProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ContainerStatusProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.NodeReportProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.PreemptionMessageProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.PriorityProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos;
 import org.apache.hadoop.yarn.proto.YarnServiceProtos.AllocateResponseProto;
 import org.apache.hadoop.yarn.proto.YarnServiceProtos.AllocateResponseProtoOrBuilder;
 import org.apache.hadoop.yarn.proto.YarnServiceProtos.NMTokenProto;
@@ -69,10 +73,10 @@ public class AllocateResponsePBImpl extends AllocateResponse {
   private List<Container> allocatedContainers = null;
   private List<NMToken> nmTokens = null;
   private List<ContainerStatus> completedContainersStatuses = null;
-  private List<Container> increasedContainers = null;
-  private List<Container> decreasedContainers = null;
+  private List<UpdatedContainer> updatedContainers = null;
 
   private List<NodeReport> updatedNodes = null;
+  private List<UpdateContainerError> updateErrors = null;
   private PreemptionMessage preempt;
   private Token amrmToken = null;
   private Priority appPriority = null;
@@ -143,17 +147,17 @@ public class AllocateResponsePBImpl extends AllocateResponse {
     if (this.preempt != null) {
       builder.setPreempt(convertToProtoFormat(this.preempt));
     }
-    if (this.increasedContainers != null) {
-      builder.clearIncreasedContainers();
-      Iterable<ContainerProto> iterable =
-          getContainerProtoIterable(this.increasedContainers);
-      builder.addAllIncreasedContainers(iterable);
+    if (this.updatedContainers != null) {
+      builder.clearUpdatedContainers();
+      Iterable<YarnServiceProtos.UpdatedContainerProto> iterable =
+          getUpdatedContainerProtoIterable(this.updatedContainers);
+      builder.addAllUpdatedContainers(iterable);
     }
-    if (this.decreasedContainers != null) {
-      builder.clearDecreasedContainers();
-      Iterable<ContainerProto> iterable =
-          getContainerProtoIterable(this.decreasedContainers);
-      builder.addAllDecreasedContainers(iterable);
+    if (this.updateErrors != null) {
+      builder.clearUpdateErrors();
+      Iterable<YarnServiceProtos.UpdateContainerErrorProto> iterable =
+          getUpdateErrorsIterable(this.updateErrors);
+      builder.addAllUpdateErrors(iterable);
     }
     if (this.amrmToken != null) {
       builder.setAmRmToken(convertToProtoFormat(this.amrmToken));
@@ -248,49 +252,52 @@ public class AllocateResponsePBImpl extends AllocateResponse {
   }
 
   @Override
-  public synchronized List<Container> getAllocatedContainers() {
-    initLocalNewContainerList();
-    return this.allocatedContainers;
+  public synchronized List<UpdateContainerError> getUpdateErrors() {
+    initLocalUpdateErrorsList();
+    return this.updateErrors;
   }
 
   @Override
-  public synchronized void setAllocatedContainers(
-      final List<Container> containers) {
-    if (containers == null)
+  public synchronized void setUpdateErrors(
+      List<UpdateContainerError> updateErrors) {
+    if (updateErrors == null) {
+      this.updateErrors.clear();
       return;
-    // this looks like a bug because it results in append and not set
-    initLocalNewContainerList();
-    allocatedContainers.addAll(containers);
+    }
+    this.updateErrors = new ArrayList<>(
+        updateErrors.size());
+    this.updateErrors.addAll(updateErrors);
   }
 
   @Override
-  public synchronized List<Container> getIncreasedContainers() {
-    initLocalIncreasedContainerList();
-    return this.increasedContainers;
+  public synchronized List<Container> getAllocatedContainers() {
+    initLocalNewContainerList();
+    return this.allocatedContainers;
   }
 
   @Override
-  public synchronized void setIncreasedContainers(
+  public synchronized void setAllocatedContainers(
       final List<Container> containers) {
     if (containers == null)
       return;
-    initLocalIncreasedContainerList();
-    increasedContainers.addAll(containers);
+    // this looks like a bug because it results in append and not set
+    initLocalNewContainerList();
+    allocatedContainers.addAll(containers);
   }
 
   @Override
-  public synchronized List<Container> getDecreasedContainers() {
-    initLocalDecreasedContainerList();
-    return this.decreasedContainers;
+  public synchronized List<UpdatedContainer> getUpdatedContainers() {
+    initLocalUpdatedContainerList();
+    return this.updatedContainers;
   }
 
   @Override
-  public synchronized void setDecreasedContainers(
-      final List<Container> containers) {
+  public synchronized void setUpdatedContainers(
+      final List<UpdatedContainer> containers) {
     if (containers == null)
       return;
-    initLocalDecreasedContainerList();
-    decreasedContainers.addAll(containers);
+    initLocalUpdatedContainerList();
+    updatedContainers.addAll(containers);
   }
 
   //// Finished containers
@@ -423,29 +430,17 @@ public class AllocateResponsePBImpl extends AllocateResponse {
     this.appPriority = priority;
   }
 
-  private synchronized void initLocalIncreasedContainerList() {
-    if (this.increasedContainers != null) {
+  private synchronized void initLocalUpdatedContainerList() {
+    if (this.updatedContainers != null) {
       return;
     }
     AllocateResponseProtoOrBuilder p = viaProto ? proto : builder;
-    List<ContainerProto> list = p.getIncreasedContainersList();
-    increasedContainers = new ArrayList<>();
+    List<YarnServiceProtos.UpdatedContainerProto> list =
+        p.getUpdatedContainersList();
+    updatedContainers = new ArrayList<>();
 
-    for (ContainerProto c : list) {
-      increasedContainers.add(convertFromProtoFormat(c));
-    }
-  }
-
-  private synchronized void initLocalDecreasedContainerList() {
-    if (this.decreasedContainers != null) {
-      return;
-    }
-    AllocateResponseProtoOrBuilder p = viaProto ? proto : builder;
-    List<ContainerProto> list = p.getDecreasedContainersList();
-    decreasedContainers = new ArrayList<>();
-
-    for (ContainerProto c : list) {
-      decreasedContainers.add(convertFromProtoFormat(c));
+    for (YarnServiceProtos.UpdatedContainerProto c : list) {
+      updatedContainers.add(convertFromProtoFormat(c));
     }
   }
 
@@ -491,6 +486,53 @@ public class AllocateResponsePBImpl extends AllocateResponse {
     }
   }
 
+  private synchronized void initLocalUpdateErrorsList() {
+    if (updateErrors != null) {
+      return;
+    }
+    AllocateResponseProtoOrBuilder p = viaProto ? proto : builder;
+    List<YarnServiceProtos.UpdateContainerErrorProto> list =
+        p.getUpdateErrorsList();
+    this.updateErrors = new ArrayList<UpdateContainerError>();
+    for (YarnServiceProtos.UpdateContainerErrorProto t : list) {
+      updateErrors.add(ProtoUtils.convertFromProtoFormat(t));
+    }
+  }
+
+  private synchronized Iterable<YarnServiceProtos.UpdateContainerErrorProto>
+      getUpdateErrorsIterable(
+      final List<UpdateContainerError> updateErrorsList) {
+    maybeInitBuilder();
+    return new Iterable<YarnServiceProtos.UpdateContainerErrorProto>() {
+      @Override
+      public synchronized Iterator<YarnServiceProtos
+          .UpdateContainerErrorProto> iterator() {
+        return new Iterator<YarnServiceProtos.UpdateContainerErrorProto>() {
+
+          private Iterator<UpdateContainerError> iter =
+              updateErrorsList.iterator();
+
+          @Override
+          public synchronized boolean hasNext() {
+            return iter.hasNext();
+          }
+
+          @Override
+          public synchronized YarnServiceProtos.UpdateContainerErrorProto
+              next() {
+            return ProtoUtils.convertToProtoFormat(iter.next());
+          }
+
+          @Override
+          public synchronized void remove() {
+            throw new UnsupportedOperationException();
+          }
+        };
+
+      }
+    };
+  }
+
   private synchronized Iterable<ContainerProto> getContainerProtoIterable(
       final List<Container> newContainersList) {
     maybeInitBuilder();
@@ -522,6 +564,40 @@ public class AllocateResponsePBImpl extends AllocateResponse {
     };
   }
 
+  private synchronized Iterable<YarnServiceProtos.UpdatedContainerProto>
+        getUpdatedContainerProtoIterable(
+      final List<UpdatedContainer> newUpdatedContainersList) {
+    maybeInitBuilder();
+    return new Iterable<YarnServiceProtos.UpdatedContainerProto>() {
+      @Override
+      public synchronized Iterator<YarnServiceProtos.UpdatedContainerProto>
+          iterator() {
+        return new Iterator<YarnServiceProtos.UpdatedContainerProto>() {
+
+          private Iterator<UpdatedContainer> iter =
+              newUpdatedContainersList.iterator();
+
+          @Override
+          public synchronized boolean hasNext() {
+            return iter.hasNext();
+          }
+
+          @Override
+          public synchronized YarnServiceProtos.UpdatedContainerProto next() {
+            return convertToProtoFormat(iter.next());
+          }
+
+          @Override
+          public synchronized void remove() {
+            throw new UnsupportedOperationException();
+
+          }
+        };
+
+      }
+    };
+  }
+
   private synchronized Iterable<NMTokenProto> getTokenProtoIterable(
       final List<NMToken> nmTokenList) {
     maybeInitBuilder();
@@ -648,6 +724,16 @@ public class AllocateResponsePBImpl extends AllocateResponse {
     return ((ContainerPBImpl)t).getProto();
   }
 
+  private synchronized UpdatedContainerPBImpl convertFromProtoFormat(
+      YarnServiceProtos.UpdatedContainerProto p) {
+    return new UpdatedContainerPBImpl(p);
+  }
+
+  private synchronized YarnServiceProtos.UpdatedContainerProto
+      convertToProtoFormat(UpdatedContainer t) {
+    return ((UpdatedContainerPBImpl)t).getProto();
+  }
+
   private synchronized ContainerStatusPBImpl convertFromProtoFormat(
       ContainerStatusProto p) {
     return new ContainerStatusPBImpl(p);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerPBImpl.java
index 91b3e5f..6bf653d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerPBImpl.java
@@ -274,6 +274,18 @@ public class ContainerPBImpl extends Container {
     builder.setAllocationRequestId(allocationRequestID);
   }
 
+  @Override
+  public int getVersion() {
+    ContainerProtoOrBuilder p = viaProto ? proto : builder;
+    return p.getVersion();
+  }
+
+  @Override
+  public void setVersion(int version) {
+    maybeInitBuilder();
+    builder.setVersion(version);
+  }
+
   private ContainerIdPBImpl convertFromProtoFormat(ContainerIdProto p) {
     return new ContainerIdPBImpl(p);
   }
@@ -329,6 +341,7 @@ public class ContainerPBImpl extends Container {
     sb.append("ContainerId: ").append(getId()).append(", ");
     sb.append("AllocationRequestId: ").append(getAllocationRequestId())
         .append(", ");
+    sb.append("Version: ").append(getVersion()).append(", ");
     sb.append("NodeId: ").append(getNodeId()).append(", ");
     sb.append("NodeHttpAddress: ").append(getNodeHttpAddress()).append(", ");
     sb.append("Resource: ").append(getResource()).append(", ");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerResourceChangeRequestPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerResourceChangeRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerResourceChangeRequestPBImpl.java
deleted file mode 100644
index f382b8c..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerResourceChangeRequestPBImpl.java
+++ /dev/null
@@ -1,141 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.api.records.impl.pb;
-
-import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.ContainerResourceChangeRequest;
-import org.apache.hadoop.yarn.api.records.Resource;
-import org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto;
-import org.apache.hadoop.yarn.proto.YarnProtos.ContainerResourceChangeRequestProto;
-import org.apache.hadoop.yarn.proto.YarnProtos.ContainerResourceChangeRequestProtoOrBuilder;
-import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto;
-
-
-public class ContainerResourceChangeRequestPBImpl extends
-    ContainerResourceChangeRequest {
-  ContainerResourceChangeRequestProto proto =
-      ContainerResourceChangeRequestProto.getDefaultInstance();
-  ContainerResourceChangeRequestProto.Builder builder = null;
-  boolean viaProto = false;
-
-  private ContainerId existingContainerId = null;
-  private Resource targetCapability = null;
-
-  public ContainerResourceChangeRequestPBImpl() {
-    builder = ContainerResourceChangeRequestProto.newBuilder();
-  }
-
-  public ContainerResourceChangeRequestPBImpl(
-      ContainerResourceChangeRequestProto proto) {
-    this.proto = proto;
-    viaProto = true;
-  }
-
-  public ContainerResourceChangeRequestProto getProto() {
-    mergeLocalToProto();
-    proto = viaProto ? proto : builder.build();
-    viaProto = true;
-    return proto;
-  }
-
-  @Override
-  public ContainerId getContainerId() {
-    ContainerResourceChangeRequestProtoOrBuilder p = viaProto ? proto
-        : builder;
-    if (this.existingContainerId != null) {
-      return this.existingContainerId;
-    }
-    if (p.hasContainerId()) {
-      this.existingContainerId = convertFromProtoFormat(p.getContainerId());
-    }
-    return this.existingContainerId;
-  }
-
-  @Override
-  public void setContainerId(ContainerId existingContainerId) {
-    maybeInitBuilder();
-    if (existingContainerId == null) {
-      builder.clearContainerId();
-    }
-    this.existingContainerId = existingContainerId;
-  }
-
-  @Override
-  public Resource getCapability() {
-    ContainerResourceChangeRequestProtoOrBuilder p = viaProto ? proto
-        : builder;
-    if (this.targetCapability != null) {
-      return this.targetCapability;
-    }
-    if (p.hasCapability()) {
-      this.targetCapability = convertFromProtoFormat(p.getCapability());
-    }
-    return this.targetCapability;
-  }
-
-  @Override
-  public void setCapability(Resource targetCapability) {
-    maybeInitBuilder();
-    if (targetCapability == null) {
-      builder.clearCapability();
-    }
-    this.targetCapability = targetCapability;
-  }
-
-  private ContainerIdPBImpl convertFromProtoFormat(ContainerIdProto p) {
-    return new ContainerIdPBImpl(p);
-  }
-
-  private ContainerIdProto convertToProtoFormat(ContainerId t) {
-    return ((ContainerIdPBImpl) t).getProto();
-  }
-
-  private Resource convertFromProtoFormat(ResourceProto p) {
-    return new ResourcePBImpl(p);
-  }
-
-  private ResourceProto convertToProtoFormat(Resource t) {
-    return ((ResourcePBImpl) t).getProto();
-  }
-
-  private void mergeLocalToProto() {
-    if (viaProto) {
-      maybeInitBuilder();
-    }
-    mergeLocalToBuilder();
-    proto = builder.build();
-    viaProto = true;
-  }
-
-  private void maybeInitBuilder() {
-    if (viaProto || builder == null) {
-      builder = ContainerResourceChangeRequestProto.newBuilder(proto);
-    }
-    viaProto = false;
-  }
-
-  private void mergeLocalToBuilder() {
-    if (this.existingContainerId != null) {
-      builder.setContainerId(convertToProtoFormat(this.existingContainerId));
-    }
-    if (this.targetCapability != null) {
-      builder.setCapability(convertToProtoFormat(this.targetCapability));
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java
index 4b62358..128120e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java
@@ -27,8 +27,10 @@ import org.apache.hadoop.yarn.api.records.AMCommand;
 import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
 import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
 import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerRetryPolicy;
 import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.apache.hadoop.yarn.api.records.ContainerUpdateType;
 import org.apache.hadoop.yarn.api.records.ExecutionTypeRequest;
 import org.apache.hadoop.yarn.api.records.ExecutionType;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
@@ -41,12 +43,15 @@ import org.apache.hadoop.yarn.api.records.QueueACL;
 import org.apache.hadoop.yarn.api.records.QueueState;
 import org.apache.hadoop.yarn.api.records.ReservationRequestInterpreter;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.UpdateContainerError;
+import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
 import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.proto.YarnProtos;
 import org.apache.hadoop.yarn.proto.YarnProtos.AMCommandProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationAccessTypeProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationResourceUsageReportProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ContainerStateProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.FinalApplicationStatusProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceTypeProto;
@@ -57,6 +62,7 @@ import org.apache.hadoop.yarn.proto.YarnProtos.NodeStateProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.QueueACLProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.QueueStateProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ReservationRequestInterpreterProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.YarnApplicationAttemptStateProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.YarnApplicationStateProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ContainerRetryPolicyProto;
@@ -64,6 +70,7 @@ import org.apache.hadoop.yarn.proto.YarnProtos.ContainerTypeProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ExecutionTypeProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ExecutionTypeRequestProto;
 import org.apache.hadoop.yarn.proto.YarnServiceProtos;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.ContainerUpdateTypeProto;
 import org.apache.hadoop.yarn.server.api.ContainerType;
 
 import com.google.protobuf.ByteString;
@@ -303,15 +310,25 @@ public class ProtoUtils {
   }
 
   /*
+   * ContainerUpdateType
+   */
+  public static ContainerUpdateTypeProto convertToProtoFormat(
+      ContainerUpdateType e) {
+    return ContainerUpdateTypeProto.valueOf(e.name());
+  }
+  public static ContainerUpdateType convertFromProtoFormat(
+      ContainerUpdateTypeProto e) {
+    return ContainerUpdateType.valueOf(e.name());
+  }
+
+  /*
    * Resource
    */
-  public static synchronized YarnProtos.ResourceProto convertToProtoFormat(
-      Resource r) {
+  public static synchronized ResourceProto convertToProtoFormat(Resource r) {
     return ((ResourcePBImpl) r).getProto();
   }
 
-  public static Resource convertFromProtoFormat(
-      YarnProtos.ResourceProto resource) {
+  public static Resource convertFromProtoFormat(ResourceProto resource) {
     return new ResourcePBImpl(resource);
   }
 
@@ -349,8 +366,52 @@ public class ProtoUtils {
     return ((ContainerPBImpl)t).getProto();
   }
 
+  public static ContainerPBImpl convertFromProtoFormat(
+      YarnProtos.ContainerProto t) {
+    return new ContainerPBImpl(t);
+  }
+
   public static ContainerStatusPBImpl convertFromProtoFormat(
       YarnProtos.ContainerStatusProto p) {
     return new ContainerStatusPBImpl(p);
   }
+
+  /*
+   * ContainerId
+   */
+  public static ContainerIdPBImpl convertFromProtoFormat(ContainerIdProto p) {
+    return new ContainerIdPBImpl(p);
+  }
+
+  public static ContainerIdProto convertToProtoFormat(ContainerId t) {
+    return ((ContainerIdPBImpl) t).getProto();
+  }
+
+  /*
+   * UpdateContainerRequest
+   */
+  public static UpdateContainerRequestPBImpl convertFromProtoFormat(
+      YarnServiceProtos.UpdateContainerRequestProto p) {
+    return new UpdateContainerRequestPBImpl(p);
+  }
+
+  public static YarnServiceProtos.UpdateContainerRequestProto
+      convertToProtoFormat(UpdateContainerRequest t) {
+    return ((UpdateContainerRequestPBImpl) t).getProto();
+  }
+
+  /*
+   * UpdateContainerError
+   */
+  public static UpdateContainerErrorPBImpl convertFromProtoFormat(
+      YarnServiceProtos.UpdateContainerErrorProto p) {
+    return new UpdateContainerErrorPBImpl(p);
+  }
+
+  public static YarnServiceProtos.UpdateContainerErrorProto
+      convertToProtoFormat(UpdateContainerError t) {
+    return ((UpdateContainerErrorPBImpl) t).getProto();
+  }
 }
+
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/UpdateContainerErrorPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/UpdateContainerErrorPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/UpdateContainerErrorPBImpl.java
new file mode 100644
index 0000000..fb6c1a7
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/UpdateContainerErrorPBImpl.java
@@ -0,0 +1,125 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.records.impl.pb;
+
+import org.apache.hadoop.yarn.api.records.UpdateContainerError;
+import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos;
+
+/**
+ * Implementation of <code>UpdateContainerError</code>.
+ */
+public class UpdateContainerErrorPBImpl extends UpdateContainerError {
+  private YarnServiceProtos.UpdateContainerErrorProto proto =
+      YarnServiceProtos.UpdateContainerErrorProto.getDefaultInstance();
+  private YarnServiceProtos.UpdateContainerErrorProto.Builder builder = null;
+  private boolean viaProto = false;
+
+  private String reason = null;
+  private UpdateContainerRequest updateRequest = null;
+
+  public UpdateContainerErrorPBImpl() {
+    builder = YarnServiceProtos.UpdateContainerErrorProto.newBuilder();
+  }
+
+  public UpdateContainerErrorPBImpl(YarnServiceProtos
+      .UpdateContainerErrorProto proto) {
+    this.proto = proto;
+    viaProto = true;
+  }
+
+  public YarnServiceProtos.UpdateContainerErrorProto getProto() {
+    mergeLocalToProto();
+    proto = viaProto ? proto : builder.build();
+    viaProto = true;
+    return proto;
+  }
+
+  @Override
+  public String getReason() {
+    YarnServiceProtos.UpdateContainerErrorProtoOrBuilder p = viaProto ? proto
+        : builder;
+    if (this.reason != null) {
+      return this.reason;
+    }
+    if (p.hasReason()) {
+      this.reason = p.getReason();
+    }
+    return this.reason;
+  }
+
+  @Override
+  public void setReason(String reason) {
+    maybeInitBuilder();
+    if (reason == null) {
+      builder.clearReason();
+    }
+    this.reason = reason;
+  }
+
+  @Override
+  public UpdateContainerRequest getUpdateContainerRequest() {
+    YarnServiceProtos.UpdateContainerErrorProtoOrBuilder p = viaProto ? proto
+        : builder;
+    if (this.updateRequest != null) {
+      return this.updateRequest;
+    }
+    if (p.hasUpdateRequest()) {
+      this.updateRequest =
+          ProtoUtils.convertFromProtoFormat(p.getUpdateRequest());
+    }
+    return this.updateRequest;
+  }
+
+  @Override
+  public void setUpdateContainerRequest(
+      UpdateContainerRequest updateContainerRequest) {
+    maybeInitBuilder();
+    if (updateContainerRequest == null) {
+      builder.clearUpdateRequest();
+    }
+    this.updateRequest = updateContainerRequest;
+  }
+
+  private void mergeLocalToProto() {
+    if (viaProto) {
+      maybeInitBuilder();
+    }
+    mergeLocalToBuilder();
+    proto = builder.build();
+    viaProto = true;
+  }
+
+  private void maybeInitBuilder() {
+    if (viaProto || builder == null) {
+      builder = YarnServiceProtos.UpdateContainerErrorProto.newBuilder(proto);
+    }
+    viaProto = false;
+  }
+
+  private void mergeLocalToBuilder() {
+    if (this.reason != null) {
+      builder.setReason(this.reason);
+    }
+    if (this.updateRequest != null) {
+      builder.setUpdateRequest(
+          ProtoUtils.convertToProtoFormat(this.updateRequest));
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/UpdateContainerRequestPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/UpdateContainerRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/UpdateContainerRequestPBImpl.java
new file mode 100644
index 0000000..f2b3c09
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/UpdateContainerRequestPBImpl.java
@@ -0,0 +1,187 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.records.impl.pb;
+
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerUpdateType;
+import org.apache.hadoop.yarn.api.records.ExecutionType;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos;
+
+/**
+ * Implementation of <code>UpdateContainerRequest</code>.
+ */
+public class UpdateContainerRequestPBImpl extends UpdateContainerRequest {
+  private YarnServiceProtos.UpdateContainerRequestProto proto =
+      YarnServiceProtos.UpdateContainerRequestProto.getDefaultInstance();
+  private YarnServiceProtos.UpdateContainerRequestProto.Builder builder = null;
+  private boolean viaProto = false;
+
+  private ContainerId existingContainerId = null;
+  private Resource targetCapability = null;
+
+  public UpdateContainerRequestPBImpl() {
+    builder = YarnServiceProtos.UpdateContainerRequestProto.newBuilder();
+  }
+
+  public UpdateContainerRequestPBImpl(YarnServiceProtos
+      .UpdateContainerRequestProto proto) {
+    this.proto = proto;
+    viaProto = true;
+  }
+
+  public YarnServiceProtos.UpdateContainerRequestProto getProto() {
+    mergeLocalToProto();
+    proto = viaProto ? proto : builder.build();
+    viaProto = true;
+    return proto;
+  }
+
+  @Override
+  public int getContainerVersion() {
+    YarnServiceProtos.UpdateContainerRequestProtoOrBuilder p =
+        viaProto ? proto : builder;
+    if (!p.hasContainerVersion()) {
+      return 0;
+    }
+    return p.getContainerVersion();
+  }
+
+  @Override
+  public void setContainerVersion(int containerVersion) {
+    maybeInitBuilder();
+    builder.setContainerVersion(containerVersion);
+  }
+
+  @Override
+  public ContainerId getContainerId() {
+    YarnServiceProtos.UpdateContainerRequestProtoOrBuilder p =
+        viaProto ? proto : builder;
+    if (this.existingContainerId != null) {
+      return this.existingContainerId;
+    }
+    if (p.hasContainerId()) {
+      this.existingContainerId =
+          ProtoUtils.convertFromProtoFormat(p.getContainerId());
+    }
+    return this.existingContainerId;
+  }
+
+  @Override
+  public void setContainerId(ContainerId containerId) {
+    maybeInitBuilder();
+    if (containerId == null) {
+      builder.clearContainerId();
+    }
+    this.existingContainerId = containerId;
+  }
+
+  @Override
+  public Resource getCapability() {
+    YarnServiceProtos.UpdateContainerRequestProtoOrBuilder p = viaProto ? proto
+        : builder;
+    if (this.targetCapability != null) {
+      return this.targetCapability;
+    }
+    if (p.hasCapability()) {
+      this.targetCapability =
+          ProtoUtils.convertFromProtoFormat(p.getCapability());
+    }
+    return this.targetCapability;
+  }
+
+  @Override
+  public void setCapability(Resource capability) {
+    maybeInitBuilder();
+    if (capability == null) {
+      builder.clearCapability();
+    }
+    this.targetCapability = capability;
+  }
+
+  @Override
+  public ExecutionType getExecutionType() {
+    YarnServiceProtos.UpdateContainerRequestProtoOrBuilder p =
+        viaProto ? proto : builder;
+    if (!p.hasExecutionType()) {
+      return null;
+    }
+    return ProtoUtils.convertFromProtoFormat(p.getExecutionType());
+  }
+
+  @Override
+  public void setExecutionType(ExecutionType execType) {
+    maybeInitBuilder();
+    if (execType == null) {
+      builder.clearExecutionType();
+      return;
+    }
+    builder.setExecutionType(ProtoUtils.convertToProtoFormat(execType));
+  }
+
+  @Override
+  public ContainerUpdateType getContainerUpdateType() {
+    YarnServiceProtos.UpdateContainerRequestProtoOrBuilder p =
+        viaProto ? proto : builder;
+    if (!p.hasUpdateType()) {
+      return null;
+    }
+    return ProtoUtils.convertFromProtoFormat(p.getUpdateType());
+  }
+
+  @Override
+  public void setContainerUpdateType(ContainerUpdateType updateType) {
+    maybeInitBuilder();
+    if (updateType == null) {
+      builder.clearUpdateType();
+      return;
+    }
+    builder.setUpdateType(ProtoUtils.convertToProtoFormat(updateType));
+  }
+
+  private void mergeLocalToProto() {
+    if (viaProto) {
+      maybeInitBuilder();
+    }
+    mergeLocalToBuilder();
+    proto = builder.build();
+    viaProto = true;
+  }
+
+  private void maybeInitBuilder() {
+    if (viaProto || builder == null) {
+      builder = YarnServiceProtos.UpdateContainerRequestProto.newBuilder(proto);
+    }
+    viaProto = false;
+  }
+
+  private void mergeLocalToBuilder() {
+    if (this.existingContainerId != null) {
+      builder.setContainerId(
+          ProtoUtils.convertToProtoFormat(this.existingContainerId));
+    }
+    if (this.targetCapability != null) {
+      builder.setCapability(
+          ProtoUtils.convertToProtoFormat(this.targetCapability));
+    }
+  }
+
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/UpdatedContainerPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/UpdatedContainerPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/UpdatedContainerPBImpl.java
new file mode 100644
index 0000000..0cd1903
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/UpdatedContainerPBImpl.java
@@ -0,0 +1,117 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.records.impl.pb;
+
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerUpdateType;
+import org.apache.hadoop.yarn.api.records.UpdatedContainer;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos;
+
+/**
+ * Implementation of <code>UpdatedContainer</code>.
+ */
+public class UpdatedContainerPBImpl extends UpdatedContainer {
+  private YarnServiceProtos.UpdatedContainerProto proto =
+      YarnServiceProtos.UpdatedContainerProto.getDefaultInstance();
+  private YarnServiceProtos.UpdatedContainerProto.Builder builder = null;
+  private boolean viaProto = false;
+
+  private Container container = null;
+
+  public UpdatedContainerPBImpl() {
+    builder = YarnServiceProtos.UpdatedContainerProto.newBuilder();
+  }
+
+  public UpdatedContainerPBImpl(YarnServiceProtos.UpdatedContainerProto proto) {
+    this.proto = proto;
+    viaProto = true;
+  }
+
+  private void mergeLocalToBuilder() {
+    if (this.container != null) {
+      builder.setContainer(ProtoUtils.convertToProtoFormat(this.container));
+    }
+  }
+
+  private void mergeLocalToProto() {
+    if (viaProto) {
+      maybeInitBuilder();
+    }
+    mergeLocalToBuilder();
+    proto = builder.build();
+    viaProto = true;
+  }
+
+  private void maybeInitBuilder() {
+    if (viaProto || builder == null) {
+      builder = YarnServiceProtos.UpdatedContainerProto.newBuilder(proto);
+    }
+    viaProto = false;
+  }
+
+  public YarnServiceProtos.UpdatedContainerProto getProto() {
+    mergeLocalToProto();
+    proto = viaProto ? proto : builder.build();
+    viaProto = true;
+    return proto;
+  }
+
+  @Override
+  public ContainerUpdateType getUpdateType() {
+    YarnServiceProtos.UpdatedContainerProtoOrBuilder p =
+        viaProto ? proto : builder;
+    if (!p.hasUpdateType()) {
+      return null;
+    }
+    return ProtoUtils.convertFromProtoFormat(p.getUpdateType());
+  }
+
+  @Override
+  public void setUpdateType(ContainerUpdateType updateType) {
+    maybeInitBuilder();
+    if (updateType == null) {
+      builder.clearUpdateType();
+      return;
+    }
+    builder.setUpdateType(ProtoUtils.convertToProtoFormat(updateType));
+  }
+
+  @Override
+  public Container getContainer() {
+    YarnServiceProtos.UpdatedContainerProtoOrBuilder p =
+        viaProto ? proto : builder;
+    if (this.container != null) {
+      return this.container;
+    }
+    if (!p.hasContainer()) {
+      return null;
+    }
+    this.container = ProtoUtils.convertFromProtoFormat(p.getContainer());
+    return this.container;
+  }
+
+  @Override
+  public void setContainer(Container container) {
+    maybeInitBuilder();
+    if (container == null) {
+      builder.clearContainer();
+    }
+    this.container = container;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java
index 0b03f50..f8e9463 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java
@@ -87,14 +87,15 @@ public class ContainerTokenIdentifier extends TokenIdentifier {
       long rmIdentifier, Priority priority, long creationTime,
       LogAggregationContext logAggregationContext, String nodeLabelExpression,
       ContainerType containerType) {
-    this(containerID, hostName, appSubmitter, r, expiryTimeStamp, masterKeyId,
-        rmIdentifier, priority, creationTime, logAggregationContext,
-        nodeLabelExpression, containerType, ExecutionType.GUARANTEED);
+    this(containerID, 0, hostName, appSubmitter, r, expiryTimeStamp,
+        masterKeyId, rmIdentifier, priority, creationTime,
+        logAggregationContext, nodeLabelExpression, containerType,
+        ExecutionType.GUARANTEED);
   }
 
-  public ContainerTokenIdentifier(ContainerId containerID, String hostName,
-      String appSubmitter, Resource r, long expiryTimeStamp, int masterKeyId,
-      long rmIdentifier, Priority priority, long creationTime,
+  public ContainerTokenIdentifier(ContainerId containerID, int containerVersion,
+      String hostName, String appSubmitter, Resource r, long expiryTimeStamp,
+      int masterKeyId, long rmIdentifier, Priority priority, long creationTime,
       LogAggregationContext logAggregationContext, String nodeLabelExpression,
       ContainerType containerType, ExecutionType executionType) {
     ContainerTokenIdentifierProto.Builder builder =
@@ -102,6 +103,7 @@ public class ContainerTokenIdentifier extends TokenIdentifier {
     if (containerID != null) {
       builder.setContainerId(((ContainerIdPBImpl)containerID).getProto());
     }
+    builder.setVersion(containerVersion);
     builder.setNmHostAddr(hostName);
     builder.setAppSubmitter(appSubmitter);
     if (r != null) {
@@ -184,7 +186,7 @@ public class ContainerTokenIdentifier extends TokenIdentifier {
   }
 
   /**
-   * Get the ContainerType of container to allocate
+   * Get the ContainerType of container to allocate.
    * @return ContainerType
    */
   public ContainerType getContainerType(){
@@ -241,7 +243,18 @@ public class ContainerTokenIdentifier extends TokenIdentifier {
     return UserGroupInformation.createRemoteUser(
         containerId);
   }
-  
+
+  /**
+   * Get the Container version
+   * @return container version
+   */
+  public int getVersion() {
+    if (proto.hasVersion()) {
+      return proto.getVersion();
+    } else {
+      return 0;
+    }
+  }
   /**
    * Get the node-label-expression in the original ResourceRequest
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/proto/yarn_security_token.proto
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/proto/yarn_security_token.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/proto/yarn_security_token.proto
index 71434be..851920d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/proto/yarn_security_token.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/proto/yarn_security_token.proto
@@ -52,6 +52,7 @@ message ContainerTokenIdentifierProto {
   optional string nodeLabelExpression = 11;
   optional ContainerTypeProto containerType = 12;
   optional ExecutionTypeProto executionType = 13 [default = GUARANTEED];
+  optional int32 version = 14 [default = 0];
 }
 
 message ClientToAMTokenIdentifierProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java
index da52f14..11bf56b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java
@@ -121,7 +121,6 @@ import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
 import org.apache.hadoop.yarn.api.records.ContainerReport;
-import org.apache.hadoop.yarn.api.records.ContainerResourceChangeRequest;
 import org.apache.hadoop.yarn.api.records.ContainerRetryContext;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.ExecutionTypeRequest;
@@ -155,6 +154,9 @@ import org.apache.hadoop.yarn.api.records.SerializedException;
 import org.apache.hadoop.yarn.api.records.StrictPreemptionContract;
 import org.apache.hadoop.yarn.api.records.Token;
 import org.apache.hadoop.yarn.api.records.URL;
+import org.apache.hadoop.yarn.api.records.UpdateContainerError;
+import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
+import org.apache.hadoop.yarn.api.records.UpdatedContainer;
 import org.apache.hadoop.yarn.api.records.YarnClusterMetrics;
 import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationAttemptIdPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationAttemptReportPBImpl;
@@ -166,7 +168,6 @@ import org.apache.hadoop.yarn.api.records.impl.pb.ContainerIdPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.ContainerLaunchContextPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.ContainerPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.ContainerReportPBImpl;
-import org.apache.hadoop.yarn.api.records.impl.pb.ContainerResourceChangeRequestPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.ContainerRetryContextPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.ContainerStatusPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.ExecutionTypeRequestPBImpl;
@@ -190,6 +191,7 @@ import org.apache.hadoop.yarn.api.records.impl.pb.SerializedExceptionPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.StrictPreemptionContractPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.TokenPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.URLPBImpl;
+import org.apache.hadoop.yarn.api.records.impl.pb.UpdateContainerRequestPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.YarnClusterMetricsPBImpl;
 import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationAttemptIdProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationAttemptReportProto;
@@ -201,7 +203,6 @@ import org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ContainerLaunchContextProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ContainerProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ContainerReportProto;
-import org.apache.hadoop.yarn.proto.YarnProtos.ContainerResourceChangeRequestProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ContainerRetryContextProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ContainerStatusProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ExecutionTypeRequestProto;
@@ -489,7 +490,8 @@ public class TestPBImplRecords {
     generateByNewInstance(ContainerLaunchContext.class);
     generateByNewInstance(ApplicationSubmissionContext.class);
     generateByNewInstance(ContainerReport.class);
-    generateByNewInstance(ContainerResourceChangeRequest.class);
+    generateByNewInstance(UpdateContainerRequest.class);
+    generateByNewInstance(UpdateContainerError.class);
     generateByNewInstance(IncreaseContainersResourceRequest.class);
     generateByNewInstance(IncreaseContainersResourceResponse.class);
     generateByNewInstance(ContainerStatus.class);
@@ -501,6 +503,7 @@ public class TestPBImplRecords {
     generateByNewInstance(PreemptionMessage.class);
     generateByNewInstance(StartContainerRequest.class);
     generateByNewInstance(NodeLabel.class);
+    generateByNewInstance(UpdatedContainer.class);
     // genByNewInstance does not apply to QueueInfo, cause
     // it is recursive(has sub queues)
     typeValueCache.put(QueueInfo.class, QueueInfo.newInstance("root", 1.0f,
@@ -1010,9 +1013,9 @@ public class TestPBImplRecords {
   }
 
   @Test
-  public void testContainerResourceChangeRequestPBImpl() throws Exception {
-    validatePBImplRecord(ContainerResourceChangeRequestPBImpl.class,
-        ContainerResourceChangeRequestProto.class);
+  public void testUpdateContainerRequestPBImpl() throws Exception {
+    validatePBImplRecord(UpdateContainerRequestPBImpl.class,
+        YarnServiceProtos.UpdateContainerRequestProto.class);
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/security/TestYARNTokenIdentifier.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/security/TestYARNTokenIdentifier.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/security/TestYARNTokenIdentifier.java
index 3a5f003..130a65e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/security/TestYARNTokenIdentifier.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/security/TestYARNTokenIdentifier.java
@@ -153,7 +153,7 @@ public class TestYARNTokenIdentifier {
     long creationTime = 1000;
     
     ContainerTokenIdentifier token = new ContainerTokenIdentifier(
-        containerID, hostName, appSubmitter, r, expiryTimeStamp, 
+        containerID, hostName, appSubmitter, r, expiryTimeStamp,
         masterKeyId, rmIdentifier, priority, creationTime);
     
     ContainerTokenIdentifier anotherToken = new ContainerTokenIdentifier();
@@ -392,7 +392,7 @@ public class TestYARNTokenIdentifier {
         anotherToken.getExecutionType());
 
     token =
-        new ContainerTokenIdentifier(containerID, hostName, appSubmitter, r,
+        new ContainerTokenIdentifier(containerID, 0, hostName, appSubmitter, r,
             expiryTimeStamp, masterKeyId, rmIdentifier, priority, creationTime,
             null, CommonNodeLabelsManager.NO_LABEL, ContainerType.TASK,
             ExecutionType.OPPORTUNISTIC);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NMContainerStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NMContainerStatus.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NMContainerStatus.java
index 4067c11..ed950ce 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NMContainerStatus.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NMContainerStatus.java
@@ -35,21 +35,22 @@ public abstract class NMContainerStatus {
   
   // Used by tests only
   public static NMContainerStatus newInstance(ContainerId containerId,
-      ContainerState containerState, Resource allocatedResource,
+      int version, ContainerState containerState, Resource allocatedResource,
       String diagnostics, int containerExitStatus, Priority priority,
       long creationTime) {
-    return newInstance(containerId, containerState, allocatedResource,
+    return newInstance(containerId, version, containerState, allocatedResource,
         diagnostics, containerExitStatus, priority, creationTime,
         CommonNodeLabelsManager.NO_LABEL);
   }
 
   public static NMContainerStatus newInstance(ContainerId containerId,
-      ContainerState containerState, Resource allocatedResource,
+      int version, ContainerState containerState, Resource allocatedResource,
       String diagnostics, int containerExitStatus, Priority priority,
       long creationTime, String nodeLabelExpression) {
     NMContainerStatus status =
         Records.newRecord(NMContainerStatus.class);
     status.setContainerId(containerId);
+    status.setVersion(version);
     status.setContainerState(containerState);
     status.setAllocatedResource(allocatedResource);
     status.setDiagnostics(diagnostics);
@@ -125,4 +126,12 @@ public abstract class NMContainerStatus {
 
   public abstract void setNodeLabelExpression(
       String nodeLabelExpression);
+
+  public int getVersion() {
+    return 0;
+  }
+
+  public void setVersion(int version) {
+
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NMContainerStatusPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NMContainerStatusPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NMContainerStatusPBImpl.java
index 921c9d9..2d1046f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NMContainerStatusPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NMContainerStatusPBImpl.java
@@ -83,6 +83,7 @@ public class NMContainerStatusPBImpl extends NMContainerStatus {
     StringBuilder sb = new StringBuilder();
     sb.append("[").append(getContainerId()).append(", ")
         .append("CreateTime: ").append(getCreationTime()).append(", ")
+        .append("Version: ").append(getVersion()).append(", ")
         .append("State: ").append(getContainerState()).append(", ")
         .append("Capability: ").append(getAllocatedResource()).append(", ")
         .append("Diagnostics: ").append(getDiagnostics()).append(", ")
@@ -185,6 +186,18 @@ public class NMContainerStatusPBImpl extends NMContainerStatus {
   }
 
   @Override
+  public int getVersion() {
+    NMContainerStatusProtoOrBuilder p = viaProto ? proto : builder;
+    return p.getVersion();
+  }
+
+  @Override
+  public void setVersion(int version) {
+    maybeInitBuilder();
+    builder.setVersion(version);
+  }
+
+  @Override
   public Priority getPriority() {
     NMContainerStatusProtoOrBuilder p = viaProto ? proto : builder;
     if (this.priority != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/OpportunisticContainerAllocator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/OpportunisticContainerAllocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/OpportunisticContainerAllocator.java
index 41b5d56..9b2fd38 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/OpportunisticContainerAllocator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/OpportunisticContainerAllocator.java
@@ -324,7 +324,7 @@ public class OpportunisticContainerAllocator {
     long currTime = System.currentTimeMillis();
     ContainerTokenIdentifier containerTokenIdentifier =
         new ContainerTokenIdentifier(
-            cId, nodeId.getHost() + ":" + nodeId.getPort(), userName,
+            cId, 0, nodeId.getHost() + ":" + nodeId.getPort(), userName,
             capability, currTime + appParams.containerTokenExpiryInterval,
             tokenSecretManager.getCurrentKey().getKeyId(), rmIdentifier,
             rr.getPriority(), currTime,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java
index 8ecbea7..50df12e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java
@@ -64,9 +64,11 @@ import org.apache.hadoop.yarn.api.records.URL;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager;
 import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
 
 import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.yarn.server.api.ContainerType;
 
 /**
  * Builder utilities to construct various objects.
@@ -156,12 +158,14 @@ public class BuilderUtils {
     return cId;
   }
 
-  public static Token newContainerToken(ContainerId cId, String host,
-      int port, String user, Resource r, long expiryTime, int masterKeyId,
-      byte[] password, long rmIdentifier) throws IOException {
+  public static Token newContainerToken(ContainerId cId, int containerVersion,
+      String host, int port, String user, Resource r, long expiryTime,
+      int masterKeyId, byte[] password, long rmIdentifier) throws IOException {
     ContainerTokenIdentifier identifier =
-        new ContainerTokenIdentifier(cId, host + ":" + port, user, r,
-          expiryTime, masterKeyId, rmIdentifier, Priority.newInstance(0), 0);
+        new ContainerTokenIdentifier(cId, containerVersion, host + ":" + port,
+            user, r, expiryTime, masterKeyId, rmIdentifier,
+            Priority.newInstance(0), 0, null, CommonNodeLabelsManager.NO_LABEL,
+            ContainerType.TASK, ExecutionType.GUARANTEED);
     return newContainerToken(BuilderUtils.newNodeId(host, port), password,
         identifier);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto
index 55ac875..d485e6b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto
@@ -160,6 +160,7 @@ message NMContainerStatusProto {
   optional int32 container_exit_status = 6;
   optional int64 creation_time = 7;
   optional string nodeLabelExpression = 8;
+  optional int32 version = 9;
 }
 
 message SCMUploaderNotifyRequestProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/api/protocolrecords/TestProtocolRecords.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/api/protocolrecords/TestProtocolRecords.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/api/protocolrecords/TestProtocolRecords.java
index 35aa25e..9f4b436 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/api/protocolrecords/TestProtocolRecords.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/api/protocolrecords/TestProtocolRecords.java
@@ -63,7 +63,7 @@ public class TestProtocolRecords {
     Resource resource = Resource.newInstance(1000, 200);
 
     NMContainerStatus report =
-        NMContainerStatus.newInstance(containerId,
+        NMContainerStatus.newInstance(containerId, 0,
           ContainerState.COMPLETE, resource, "diagnostics",
           ContainerExitStatus.ABORTED, Priority.newInstance(10), 1234);
     NMContainerStatus reportProto =
@@ -87,7 +87,7 @@ public class TestProtocolRecords {
     ContainerId containerId = ContainerId.newContainerId(attemptId, 1);
 
     NMContainerStatus containerReport =
-        NMContainerStatus.newInstance(containerId,
+        NMContainerStatus.newInstance(containerId, 0,
           ContainerState.RUNNING, Resource.newInstance(1024, 1), "diagnostics",
           0, Priority.newInstance(10), 1234);
     List<NMContainerStatus> reports = Arrays.asList(containerReport);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/api/protocolrecords/TestRegisterNodeManagerRequest.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/api/protocolrecords/TestRegisterNodeManagerRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/api/protocolrecords/TestRegisterNodeManagerRequest.java
index 947dec1..9f91b87 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/api/protocolrecords/TestRegisterNodeManagerRequest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/api/protocolrecords/TestRegisterNodeManagerRequest.java
@@ -40,7 +40,7 @@ public class TestRegisterNodeManagerRequest {
           "version", Arrays.asList(NMContainerStatus.newInstance(
             ContainerId.newContainerId(
               ApplicationAttemptId.newInstance(
-                ApplicationId.newInstance(1234L, 1), 1), 1),
+                ApplicationId.newInstance(1234L, 1), 1), 1), 0,
             ContainerState.RUNNING, Resource.newInstance(1024, 1), "good", -1,
             Priority.newInstance(0), 1234)), Arrays.asList(
             ApplicationId.newInstance(1234L, 1),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
index de79e89..8c060bc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
@@ -1016,9 +1016,12 @@ public class ContainerManagerImpl extends CompositeService implements
           }
         }
 
+        this.context.getNMStateStore().storeContainer(containerId,
+            containerTokenIdentifier.getVersion(), request);
         dispatcher.getEventHandler().handle(
           new ApplicationContainerInitEvent(container));
-        this.context.getNMStateStore().storeContainer(containerId, request);
+        this.context.getNMStateStore().storeContainer(containerId,
+            containerTokenIdentifier.getVersion(), request);
 
         this.context.getContainerTokenSecretManager().startContainerSuccessful(
           containerTokenIdentifier);
@@ -1100,7 +1103,8 @@ public class ContainerManagerImpl extends CompositeService implements
           // an updated NMToken.
           updateNMTokenIdentifier(nmTokenIdentifier);
           Resource resource = containerTokenIdentifier.getResource();
-          changeContainerResourceInternal(containerId, resource, true);
+          changeContainerResourceInternal(containerId,
+              containerTokenIdentifier.getVersion(), resource, true);
           successfullyIncreasedContainers.add(containerId);
         } catch (YarnException | InvalidToken e) {
           failedContainers.put(containerId, SerializedException.newInstance(e));
@@ -1114,8 +1118,8 @@ public class ContainerManagerImpl extends CompositeService implements
   }
 
   @SuppressWarnings("unchecked")
-  private void changeContainerResourceInternal(
-      ContainerId containerId, Resource targetResource, boolean increase)
+  private void changeContainerResourceInternal(ContainerId containerId,
+      int containerVersion, Resource targetResource, boolean increase)
           throws YarnException, IOException {
     Container container = context.getContainers().get(containerId);
     // Check container existence
@@ -1182,7 +1186,7 @@ public class ContainerManagerImpl extends CompositeService implements
       if (!serviceStopped) {
         // Persist container resource change for recovery
         this.context.getNMStateStore().storeContainerResourceChanged(
-            containerId, targetResource);
+            containerId, containerVersion, targetResource);
         getContainersMonitor().handle(
             new ChangeMonitoringContainerResourceEvent(
                 containerId, targetResource));
@@ -1443,7 +1447,7 @@ public class ContainerManagerImpl extends CompositeService implements
           : containersDecreasedEvent.getContainersToDecrease()) {
         try {
           changeContainerResourceInternal(container.getId(),
-              container.getResource(), false);
+              container.getVersion(), container.getResource(), false);
         } catch (YarnException e) {
           LOG.error("Unable to decrease container resource", e);
         } catch (IOException e) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
index 3908971..0244d90 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
@@ -103,6 +103,7 @@ public class ContainerImpl implements Container {
   private final ContainerId containerId;
   private volatile Resource resource;
   private final String user;
+  private int version;
   private int exitCode = ContainerExitStatus.INVALID;
   private final StringBuilder diagnostics;
   private final int diagnosticsMaxSize;
@@ -152,6 +153,7 @@ public class ContainerImpl implements Container {
     this.daemonConf = conf;
     this.dispatcher = dispatcher;
     this.stateStore = context.getNMStateStore();
+    this.version = containerTokenIdentifier.getVersion();
     this.launchContext = launchContext;
     if (launchContext != null
         && launchContext.getContainerRetryContext() != null) {
@@ -223,6 +225,7 @@ public class ContainerImpl implements Container {
       this.resource = Resource.newInstance(recoveredCapability.getMemorySize(),
           recoveredCapability.getVirtualCores());
     }
+    this.version = rcs.getVersion();
     this.remainingRetryAttempts = rcs.getRemainingRetryAttempts();
     this.workDir = rcs.getWorkDir();
     this.logDir = rcs.getLogDir();
@@ -525,8 +528,8 @@ public class ContainerImpl implements Container {
   public NMContainerStatus getNMContainerStatus() {
     this.readLock.lock();
     try {
-      return NMContainerStatus.newInstance(this.containerId, getCurrentState(),
-          getResource(), diagnostics.toString(), exitCode,
+      return NMContainerStatus.newInstance(this.containerId, this.version,
+          getCurrentState(), getResource(), diagnostics.toString(), exitCode,
           containerTokenIdentifier.getPriority(),
           containerTokenIdentifier.getCreationTime(),
           containerTokenIdentifier.getNodeLabelExpression());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/queuing/QueuingContainerManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/queuing/QueuingContainerManagerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/queuing/QueuingContainerManagerImpl.java
index 38b1b07..5d2f4d4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/queuing/QueuingContainerManagerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/queuing/QueuingContainerManagerImpl.java
@@ -128,7 +128,8 @@ public class QueuingContainerManagerImpl extends ContainerManagerImpl {
       startAllocatedContainer(allocatedContInfo);
     } else {
       ContainerId cIdToStart = containerTokenIdentifier.getContainerID();
-      this.context.getNMStateStore().storeContainer(cIdToStart, request);
+      this.context.getNMStateStore().storeContainer(cIdToStart,
+          containerTokenIdentifier.getVersion(), request);
       this.context.getNMStateStore().storeContainerQueued(cIdToStart);
       LOG.info("No available resources for container {} to start its execution "
           + "immediately.", cIdToStart);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
index 5fe2713..a30024a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
@@ -104,6 +104,7 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
   private static final String CONTAINERS_KEY_PREFIX =
       "ContainerManager/containers/";
   private static final String CONTAINER_REQUEST_KEY_SUFFIX = "/request";
+  private static final String CONTAINER_VERSION_KEY_SUFFIX = "/version";
   private static final String CONTAINER_DIAGS_KEY_SUFFIX = "/diagnostics";
   private static final String CONTAINER_LAUNCHED_KEY_SUFFIX = "/launched";
   private static final String CONTAINER_QUEUED_KEY_SUFFIX = "/queued";
@@ -238,6 +239,8 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
       if (suffix.equals(CONTAINER_REQUEST_KEY_SUFFIX)) {
         rcs.startRequest = new StartContainerRequestPBImpl(
             StartContainerRequestProto.parseFrom(entry.getValue()));
+      } else if (suffix.equals(CONTAINER_VERSION_KEY_SUFFIX)) {
+        rcs.version = Integer.parseInt(asString(entry.getValue()));
       } else if (suffix.equals(CONTAINER_DIAGS_KEY_SUFFIX)) {
         rcs.diagnostics = asString(entry.getValue());
       } else if (suffix.equals(CONTAINER_QUEUED_KEY_SUFFIX)) {
@@ -272,18 +275,27 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
   }
 
   @Override
-  public void storeContainer(ContainerId containerId,
+  public void storeContainer(ContainerId containerId, int containerVersion,
       StartContainerRequest startRequest) throws IOException {
     if (LOG.isDebugEnabled()) {
       LOG.debug("storeContainer: containerId= " + containerId
           + ", startRequest= " + startRequest);
     }
-
-    String key = CONTAINERS_KEY_PREFIX + containerId.toString()
+    String keyRequest = CONTAINERS_KEY_PREFIX + containerId.toString()
         + CONTAINER_REQUEST_KEY_SUFFIX;
+    String keyVersion = CONTAINERS_KEY_PREFIX + containerId.toString()
+        + CONTAINER_VERSION_KEY_SUFFIX;
     try {
-      db.put(bytes(key),
-        ((StartContainerRequestPBImpl) startRequest).getProto().toByteArray());
+      WriteBatch batch = db.createWriteBatch();
+      try {
+        batch.put(bytes(keyRequest),
+            ((StartContainerRequestPBImpl) startRequest)
+                .getProto().toByteArray());
+        batch.put(bytes(keyVersion), bytes(Integer.toString(containerVersion)));
+        db.write(batch);
+      } finally {
+        batch.close();
+      }
     } catch (DBException e) {
       throw new IOException(e);
     }
@@ -339,18 +351,27 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
 
   @Override
   public void storeContainerResourceChanged(ContainerId containerId,
-      Resource capability) throws IOException {
+      int containerVersion, Resource capability) throws IOException {
     if (LOG.isDebugEnabled()) {
       LOG.debug("storeContainerResourceChanged: containerId=" + containerId
           + ", capability=" + capability);
     }
 
-    String key = CONTAINERS_KEY_PREFIX + containerId.toString()
+    String keyResChng = CONTAINERS_KEY_PREFIX + containerId.toString()
         + CONTAINER_RESOURCE_CHANGED_KEY_SUFFIX;
+    String keyVersion = CONTAINERS_KEY_PREFIX + containerId.toString()
+        + CONTAINER_VERSION_KEY_SUFFIX;
     try {
-      // New value will overwrite old values for the same key
-      db.put(bytes(key),
-          ((ResourcePBImpl) capability).getProto().toByteArray());
+      WriteBatch batch = db.createWriteBatch();
+      try {
+        // New value will overwrite old values for the same key
+        batch.put(bytes(keyResChng),
+            ((ResourcePBImpl) capability).getProto().toByteArray());
+        batch.put(bytes(keyVersion), bytes(Integer.toString(containerVersion)));
+        db.write(batch);
+      } finally {
+        batch.close();
+      }
     } catch (DBException e) {
       throw new IOException(e);
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[13/19] hadoop git commit: HADOOP-13355. Handle HADOOP_CLIENT_OPTS in a function (aw)

Posted by aw...@apache.org.
HADOOP-13355. Handle HADOOP_CLIENT_OPTS in a function (aw)

Signed-off-by: Allen Wittenauer <aw...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a72b64e8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a72b64e8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a72b64e8

Branch: refs/heads/HADOOP-13341
Commit: a72b64e8570716adaec38d3eded3220afb20e192
Parents: fd36192
Author: Allen Wittenauer <aw...@apache.org>
Authored: Fri Aug 26 14:33:33 2016 -0700
Committer: Allen Wittenauer <aw...@apache.org>
Committed: Wed Aug 31 08:04:44 2016 -0700

----------------------------------------------------------------------
 .../hadoop-common/src/main/bin/hadoop           |  6 +--
 .../src/main/bin/hadoop-functions.sh            | 21 ++++++++++
 .../test/scripts/hadoop_add_client_opts.bats    | 40 ++++++++++++++++++++
 .../hadoop-hdfs/src/main/bin/hdfs               | 14 +------
 hadoop-mapreduce-project/bin/mapred             | 12 +-----
 .../main/shellprofile.d/hadoop-archive-logs.sh  |  2 -
 .../src/main/shellprofile.d/hadoop-distcp.sh    |  4 --
 .../src/main/shellprofile.d/hadoop-extras.sh    |  2 -
 .../src/main/shellprofile.d/hadoop-rumen.sh     |  4 --
 .../hadoop-sls/src/main/bin/rumen2sls.sh        |  3 +-
 hadoop-tools/hadoop-sls/src/main/bin/slsrun.sh  |  3 +-
 .../src/main/shellprofile.d/hadoop-streaming.sh |  4 --
 hadoop-yarn-project/hadoop-yarn/bin/yarn        | 34 ++++++-----------
 13 files changed, 81 insertions(+), 68 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a72b64e8/hadoop-common-project/hadoop-common/src/main/bin/hadoop
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop b/hadoop-common-project/hadoop-common/src/main/bin/hadoop
index b57a4c1..4aae621 100755
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop
@@ -161,10 +161,6 @@ function hadoopcmd_case
       fi
     ;;
   esac
-
-  # Always respect HADOOP_OPTS and HADOOP_CLIENT_OPTS
-  hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
-  HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
 }
 
 # This script runs the hadoop core commands.
@@ -205,6 +201,8 @@ fi
 
 hadoop_verify_user "${HADOOP_SUBCMD}"
 
+hadoop_add_client_opts
+
 if [[ ${HADOOP_WORKER_MODE} = true ]]; then
   hadoop_common_worker_mode_execute "${HADOOP_COMMON_HOME}/bin/hadoop" "${HADOOP_USER_PARAMS[@]}"
   exit $?

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a72b64e8/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
index 6e58dca..9003913 100755
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
@@ -306,6 +306,13 @@ function hadoop_bootstrap
   HADOOP_TOOLS_DIR=${HADOOP_TOOLS_DIR:-"share/hadoop/tools"}
   HADOOP_TOOLS_LIB_JARS_DIR=${HADOOP_TOOLS_LIB_JARS_DIR:-"${HADOOP_TOOLS_DIR}/lib"}
 
+  # by default, whatever we are about to run doesn't support
+  # daemonization
+  HADOOP_SUBCMD_SUPPORTDAEMONIZATION=false
+
+  # shellcheck disable=SC2034
+  HADOOP_SUBCMD_SECURESERVICE=false
+
   # usage output set to zero
   hadoop_reset_usage
 
@@ -1230,6 +1237,20 @@ function hadoop_translate_cygwin_path
   fi
 }
 
+## @description  Adds the HADOOP_CLIENT_OPTS variable to
+## @description  HADOOP_OPTS if HADOOP_SUBCMD_SUPPORTDAEMONIZATION is false
+## @audience     public
+## @stability    stable
+## @replaceable  yes
+function hadoop_add_client_opts
+{
+  if [[ "${HADOOP_SUBCMD_SUPPORTDAEMONIZATION}" = false
+     || -z "${HADOOP_SUBCMD_SUPPORTDAEMONIZATION}" ]]; then
+    hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
+    HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
+  fi
+}
+
 ## @description  Finish configuring Hadoop specific system properties
 ## @description  prior to executing Java
 ## @audience     private

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a72b64e8/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_add_client_opts.bats
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_add_client_opts.bats b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_add_client_opts.bats
new file mode 100644
index 0000000..bc5051d
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_add_client_opts.bats
@@ -0,0 +1,40 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load hadoop-functions_test_helper
+
+@test "hadoop_subcommand_opts (daemonization false)" {
+  HADOOP_OPTS="1"
+  HADOOP_CLIENT_OPTS="2"
+  HADOOP_SUBCMD_SUPPORTDAEMONIZATION="false"
+  hadoop_add_client_opts
+  [ "${HADOOP_OPTS}" = "1 2" ]
+}
+
+@test "hadoop_subcommand_opts (daemonization true)" {
+  HADOOP_OPTS="1"
+  HADOOP_CLIENT_OPTS="2"
+  HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
+  hadoop_add_client_opts
+  [ "${HADOOP_OPTS}" = "1" ]
+}
+
+@test "hadoop_subcommand_opts (daemonization empty)" {
+  HADOOP_OPTS="1"
+  HADOOP_CLIENT_OPTS="2"
+  unset HADOOP_SUBCMD_SUPPORTDAEMONIZATION
+  hadoop_add_client_opts
+  [ "${HADOOP_OPTS}" = "1 2" ]
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a72b64e8/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
index 5059528..41b3c12 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
@@ -118,18 +118,12 @@ function hdfscmd_case
     ;;
     dfs)
       HADOOP_CLASSNAME=org.apache.hadoop.fs.FsShell
-      hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
-      HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
     ;;
     dfsadmin)
       HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.DFSAdmin
-      hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
-      HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
     ;;
     diskbalancer)
       HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.DiskBalancer
-      hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
-      HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
     ;;
     envvars)
       echo "JAVA_HOME='${JAVA_HOME}'"
@@ -144,16 +138,12 @@ function hdfscmd_case
     ;;
     erasurecode)
       HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.erasurecode.ECCli
-      hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
-      HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
     ;;
     fetchdt)
       HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.DelegationTokenFetcher
     ;;
     fsck)
       HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.DFSck
-      hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
-      HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
     ;;
     getconf)
       HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.GetConf
@@ -163,8 +153,6 @@ function hdfscmd_case
     ;;
     haadmin)
       HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.DFSHAAdmin
-      hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
-      HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
     ;;
     journalnode)
       HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
@@ -293,6 +281,8 @@ fi
 
 hadoop_verify_user "${HADOOP_SUBCMD}"
 
+hadoop_add_client_opts
+
 if [[ ${HADOOP_WORKER_MODE} = true ]]; then
   hadoop_common_worker_mode_execute "${HADOOP_HDFS_HOME}/bin/hdfs" "${HADOOP_USER_PARAMS[@]}"
   exit $?

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a72b64e8/hadoop-mapreduce-project/bin/mapred
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/bin/mapred b/hadoop-mapreduce-project/bin/mapred
index 046d48c..3243d20 100755
--- a/hadoop-mapreduce-project/bin/mapred
+++ b/hadoop-mapreduce-project/bin/mapred
@@ -79,31 +79,21 @@ function mapredcmd_case
     ;;
     hsadmin)
       HADOOP_CLASSNAME=org.apache.hadoop.mapreduce.v2.hs.client.HSAdmin
-      hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
-      HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
     ;;
     job)
       HADOOP_CLASSNAME=org.apache.hadoop.mapred.JobClient
-      hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
-      HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
     ;;
     pipes)
       HADOOP_CLASSNAME=org.apache.hadoop.mapred.pipes.Submitter
-      hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
-      HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
     ;;
     queue)
       HADOOP_CLASSNAME=org.apache.hadoop.mapred.JobQueueClient
     ;;
     sampler)
       HADOOP_CLASSNAME=org.apache.hadoop.mapred.lib.InputSampler
-      hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
-      HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
     ;;
     version)
       HADOOP_CLASSNAME=org.apache.hadoop.util.VersionInfo
-      hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
-      HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
     ;;
     *)
       HADOOP_CLASSNAME="${subcmd}"
@@ -152,6 +142,8 @@ fi
 
 hadoop_verify_user "${HADOOP_SUBCMD}"
 
+hadoop_add_client_opts
+
 if [[ ${HADOOP_SLAVE_MODE} = true ]]; then
   hadoop_common_slave_mode_execute "${HADOOP_MAPRED_HOME}/bin/mapred" "${HADOOP_USER_PARAMS[@]}"
   exit $?

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a72b64e8/hadoop-tools/hadoop-archive-logs/src/main/shellprofile.d/hadoop-archive-logs.sh
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-archive-logs/src/main/shellprofile.d/hadoop-archive-logs.sh b/hadoop-tools/hadoop-archive-logs/src/main/shellprofile.d/hadoop-archive-logs.sh
index ae7b6c6..c889816 100755
--- a/hadoop-tools/hadoop-archive-logs/src/main/shellprofile.d/hadoop-archive-logs.sh
+++ b/hadoop-tools/hadoop-archive-logs/src/main/shellprofile.d/hadoop-archive-logs.sh
@@ -32,8 +32,6 @@ function mapred_subcommand_archive-logs
   # shellcheck disable=SC2034
   HADOOP_CLASSNAME=org.apache.hadoop.tools.HadoopArchiveLogs
   hadoop_add_to_classpath_tools hadoop-archive-logs
-  hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
-  HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
 }
 
 fi

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a72b64e8/hadoop-tools/hadoop-distcp/src/main/shellprofile.d/hadoop-distcp.sh
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/src/main/shellprofile.d/hadoop-distcp.sh b/hadoop-tools/hadoop-distcp/src/main/shellprofile.d/hadoop-distcp.sh
index 0178c54..6e93ec1 100755
--- a/hadoop-tools/hadoop-distcp/src/main/shellprofile.d/hadoop-distcp.sh
+++ b/hadoop-tools/hadoop-distcp/src/main/shellprofile.d/hadoop-distcp.sh
@@ -32,8 +32,6 @@ function hadoop_subcommand_distcp
   # shellcheck disable=SC2034
   HADOOP_CLASSNAME=org.apache.hadoop.tools.DistCp
   hadoop_add_to_classpath_tools hadoop-distcp
-  hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
-  HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
 }
 
 fi
@@ -55,8 +53,6 @@ function mapred_subcommand_distcp
   # shellcheck disable=SC2034
   HADOOP_CLASSNAME=org.apache.hadoop.tools.DistCp
   hadoop_add_to_classpath_tools hadoop-distcp
-  hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
-  HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
 }
 
 fi

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a72b64e8/hadoop-tools/hadoop-extras/src/main/shellprofile.d/hadoop-extras.sh
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-extras/src/main/shellprofile.d/hadoop-extras.sh b/hadoop-tools/hadoop-extras/src/main/shellprofile.d/hadoop-extras.sh
index 829d406..1ce9aee 100755
--- a/hadoop-tools/hadoop-extras/src/main/shellprofile.d/hadoop-extras.sh
+++ b/hadoop-tools/hadoop-extras/src/main/shellprofile.d/hadoop-extras.sh
@@ -32,8 +32,6 @@ function hadoop_subcommand_distch
   # shellcheck disable=SC2034
   HADOOP_CLASSNAME=org.apache.hadoop.tools.DistCh
   hadoop_add_to_classpath_tools hadoop-extras
-  hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
-  HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
 }
 
 fi

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a72b64e8/hadoop-tools/hadoop-rumen/src/main/shellprofile.d/hadoop-rumen.sh
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-rumen/src/main/shellprofile.d/hadoop-rumen.sh b/hadoop-tools/hadoop-rumen/src/main/shellprofile.d/hadoop-rumen.sh
index d7d4022..77023ff 100755
--- a/hadoop-tools/hadoop-rumen/src/main/shellprofile.d/hadoop-rumen.sh
+++ b/hadoop-tools/hadoop-rumen/src/main/shellprofile.d/hadoop-rumen.sh
@@ -30,8 +30,6 @@ function hadoop_subcommand_rumenfolder
   # shellcheck disable=SC2034
   HADOOP_CLASSNAME=org.apache.hadoop.tools.rumen.Folder
   hadoop_add_to_classpath_tools hadoop-rumen
-  hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
-  HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
 }
 
 fi
@@ -51,8 +49,6 @@ function hadoop_subcommand_rumentrace
   # shellcheck disable=SC2034
   HADOOP_CLASSNAME=org.apache.hadoop.tools.rumen.TraceBuilder
   hadoop_add_to_classpath_tools hadoop-rumen
-  hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
-  HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
 }
 
 fi

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a72b64e8/hadoop-tools/hadoop-sls/src/main/bin/rumen2sls.sh
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/main/bin/rumen2sls.sh b/hadoop-tools/hadoop-sls/src/main/bin/rumen2sls.sh
index 0bd291b..565dfe6 100644
--- a/hadoop-tools/hadoop-sls/src/main/bin/rumen2sls.sh
+++ b/hadoop-tools/hadoop-sls/src/main/bin/rumen2sls.sh
@@ -68,8 +68,7 @@ function run_sls_generator()
   hadoop_add_param args -outputJobs "-outputJobs ${outputdir}/${outputprefix}-jobs.json"
   hadoop_add_param args -outputNodes "-outputNodes ${outputdir}/${outputprefix}-nodes.json"
 
-  hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
-  HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
+  hadoop_add_client_opts
 
   hadoop_finalize
   # shellcheck disable=SC2086

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a72b64e8/hadoop-tools/hadoop-sls/src/main/bin/slsrun.sh
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/main/bin/slsrun.sh b/hadoop-tools/hadoop-sls/src/main/bin/slsrun.sh
index 403c4bb..218dee4 100644
--- a/hadoop-tools/hadoop-sls/src/main/bin/slsrun.sh
+++ b/hadoop-tools/hadoop-sls/src/main/bin/slsrun.sh
@@ -96,8 +96,7 @@ function run_simulation() {
     hadoop_add_param args -printsimulation "-printsimulation"
   fi
 
-  hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
-  HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
+  hadoop_add_client_opts
 
   hadoop_finalize
   # shellcheck disable=SC2086

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a72b64e8/hadoop-tools/hadoop-streaming/src/main/shellprofile.d/hadoop-streaming.sh
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-streaming/src/main/shellprofile.d/hadoop-streaming.sh b/hadoop-tools/hadoop-streaming/src/main/shellprofile.d/hadoop-streaming.sh
index cca016d..c3010ff 100755
--- a/hadoop-tools/hadoop-streaming/src/main/shellprofile.d/hadoop-streaming.sh
+++ b/hadoop-tools/hadoop-streaming/src/main/shellprofile.d/hadoop-streaming.sh
@@ -46,10 +46,6 @@ function mapred_subcommand_streaming
   done
 
   IFS=${oldifs}
-
-  hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
-  HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
-
 }
 
 fi

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a72b64e8/hadoop-yarn-project/hadoop-yarn/bin/yarn
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn b/hadoop-yarn-project/hadoop-yarn/bin/yarn
index bd91633..66a87b6 100755
--- a/hadoop-yarn-project/hadoop-yarn/bin/yarn
+++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn
@@ -68,8 +68,6 @@ function yarncmd_case
   case ${subcmd} in
     application|applicationattempt|container)
       HADOOP_CLASSNAME=org.apache.hadoop.yarn.client.cli.ApplicationCLI
-      hadoop_debug "Append YARN_CLIENT_OPTS onto HADOOP_OPTS"
-      HADOOP_OPTS="${HADOOP_OPTS} ${YARN_CLIENT_OPTS}"
       set -- "${subcmd}" "$@"
       HADOOP_SUBCMD_ARGS=("$@")
     ;;
@@ -78,13 +76,9 @@ function yarncmd_case
     ;;
     cluster)
       HADOOP_CLASSNAME=org.apache.hadoop.yarn.client.cli.ClusterCLI
-      hadoop_debug "Append YARN_CLIENT_OPTS onto YARN_OPTS"
-      YARN_OPTS="${YARN_OPTS} ${YARN_CLIENT_OPTS}"
     ;;
     daemonlog)
       HADOOP_CLASSNAME=org.apache.hadoop.log.LogLevel
-      hadoop_debug "Append YARN_CLIENT_OPTS onto HADOOP_OPTS"
-      HADOOP_OPTS="${HADOOP_OPTS} ${YARN_CLIENT_OPTS}"
     ;;
     envvars)
       echo "JAVA_HOME='${JAVA_HOME}'"
@@ -99,8 +93,6 @@ function yarncmd_case
     ;;
     jar)
       HADOOP_CLASSNAME=org.apache.hadoop.util.RunJar
-      hadoop_debug "Append YARN_CLIENT_OPTS onto HADOOP_OPTS"
-      HADOOP_OPTS="${HADOOP_OPTS} ${YARN_CLIENT_OPTS}"
     ;;
     historyserver)
       HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
@@ -111,13 +103,9 @@ function yarncmd_case
     ;;
     logs)
       HADOOP_CLASSNAME=org.apache.hadoop.yarn.client.cli.LogsCLI
-      hadoop_debug "Append YARN_CLIENT_OPTS onto HADOOP_OPTS"
-      HADOOP_OPTS="${HADOOP_OPTS} ${YARN_CLIENT_OPTS}"
     ;;
     node)
       HADOOP_CLASSNAME=org.apache.hadoop.yarn.client.cli.NodeCLI
-      hadoop_debug "Append YARN_CLIENT_OPTS onto HADOOP_OPTS"
-      HADOOP_OPTS="${HADOOP_OPTS} ${YARN_CLIENT_OPTS}"
     ;;
     nodemanager)
       HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
@@ -142,8 +130,6 @@ function yarncmd_case
     ;;
     queue)
       HADOOP_CLASSNAME=org.apache.hadoop.yarn.client.cli.QueueCLI
-      hadoop_debug "Append YARN_CLIENT_OPTS onto HADOOP_OPTS"
-      HADOOP_OPTS="${HADOOP_OPTS} ${YARN_CLIENT_OPTS}"
     ;;
     resourcemanager)
       HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
@@ -158,13 +144,9 @@ function yarncmd_case
     ;;
     rmadmin)
       HADOOP_CLASSNAME='org.apache.hadoop.yarn.client.cli.RMAdminCLI'
-      hadoop_debug "Append YARN_CLIENT_OPTS onto HADOOP_OPTS"
-      HADOOP_OPTS="${HADOOP_OPTS} ${YARN_CLIENT_OPTS}"
     ;;
     scmadmin)
       HADOOP_CLASSNAME='org.apache.hadoop.yarn.client.SCMAdmin'
-      hadoop_debug "Append YARN_CLIENT_OPTS onto HADOOP_OPTS"
-      HADOOP_OPTS="${HADOOP_OPTS} ${YARN_CLIENT_OPTS}"
     ;;
     sharedcachemanager)
       HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
@@ -191,8 +173,6 @@ function yarncmd_case
     ;;
     version)
       HADOOP_CLASSNAME=org.apache.hadoop.util.VersionInfo
-      hadoop_debug "Append YARN_CLIENT_OPTS onto HADOOP_OPTS"
-      HADOOP_OPTS="${HADOOP_OPTS} ${YARN_CLIENT_OPTS}"
     ;;
     top)
       doNotSetCols=0
@@ -222,8 +202,6 @@ function yarncmd_case
         fi
       fi
       HADOOP_CLASSNAME=org.apache.hadoop.yarn.client.cli.TopCLI
-      hadoop_debug "Append YARN_CLIENT_OPTS onto HADOOP_OPTS"
-      HADOOP_OPTS="${HADOOP_OPTS} ${YARN_CLIENT_OPTS}"
       HADOOP_SUBCMD_ARGS=("$@")
     ;;
     *)
@@ -273,6 +251,18 @@ fi
 
 hadoop_verify_user "${HADOOP_SUBCMD}"
 
+# It's unclear if YARN_CLIENT_OPTS is actually a useful
+# thing to have separate from HADOOP_CLIENT_OPTS.  Someone
+# might use it, so let's not deprecate it and just override
+# HADOOP_CLIENT_OPTS instead before we (potentially) add it
+# to the command line
+if [[ -n "${YARN_CLIENT_OPTS}" ]]; then
+  # shellcheck disable=SC2034
+  HADOOP_CLIENT_OPTS=${YARN_CLIENT_OPTS}
+fi
+
+hadoop_add_client_opts
+
 if [[ ${HADOOP_WORKER_MODE} = true ]]; then
   hadoop_common_worker_mode_execute "${HADOOP_YARN_HOME}/bin/yarn" "${HADOOP_USER_PARAMS[@]}"
   exit $?


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[16/19] hadoop git commit: HADOOP-13562. Change hadoop_subcommand_opts to use only uppercase

Posted by aw...@apache.org.
HADOOP-13562. Change hadoop_subcommand_opts to use only uppercase


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a5a79f70
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a5a79f70
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a5a79f70

Branch: refs/heads/HADOOP-13341
Commit: a5a79f70a8108bebccbe207d1957146633282a85
Parents: 944f80f
Author: Allen Wittenauer <aw...@apache.org>
Authored: Tue Aug 30 08:52:41 2016 -0700
Committer: Allen Wittenauer <aw...@apache.org>
Committed: Wed Aug 31 08:04:44 2016 -0700

----------------------------------------------------------------------
 .../src/main/bin/hadoop-functions.sh            | 31 +++++++-------------
 .../test/scripts/hadoop_subcommand_opts.bats    | 18 ++++--------
 2 files changed, 16 insertions(+), 33 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5a79f70/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
index dd82347..695fd41 100755
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
@@ -2007,8 +2007,8 @@ function hadoop_subcommand_opts
 {
   declare program=$1
   declare command=$2
-  declare var
   declare uvar
+  declare depvar
   declare uprogram
   declare ucommand
 
@@ -2029,34 +2029,25 @@ function hadoop_subcommand_opts
     ucommand=${command^^}
   fi
 
-  # HDFS_namenode_OPTS
-  # HADOOP_distcp_OPTS
-  # MAPRED_distcp_OPTS
-  # YARN_sharedcachemanger_OPTS
-  # ...
-  var="${uprogram}_${command}_OPTS"
+  uvar="${uprogram}_${ucommand}_OPTS"
 
   # Let's handle all of the deprecation cases early
-  # HADOOP_NAMENODE_OPTS -> HDFS_namenode_OPTS
-  # YARN_RESOURCEMANAGER_OPTS -> YARN_resourcemanager_OPTS
+  # HADOOP_NAMENODE_OPTS -> HDFS_NAMENODE_OPTS
 
-  uvar="${uprogram}_${ucommand}_OPTS"
-  if [[ -n ${!uvar} ]]; then
-    hadoop_deprecate_envvar "${uvar}" "${var}"
-  fi
+  depvar="HADOOP_${ucommand}_OPTS"
 
-  uvar="HADOOP_${ucommand}_OPTS"
-  if [[ -n ${!uvar} ]]; then
-    hadoop_deprecate_envvar "${uvar}" "${var}"
+  if [[ "${depvar}" != "${uvar}" ]]; then
+    if [[ -n "${!depvar}" ]]; then
+      hadoop_deprecate_envvar "${depvar}" "${uvar}"
+    fi
   fi
 
-  if [[ -n ${!var} ]]; then
-    hadoop_debug "Appending ${!var} onto HADOOP_OPTS"
-    HADOOP_OPTS="${HADOOP_OPTS} ${!var}"
+  if [[ -n ${!uvar} ]]; then
+    hadoop_debug "Appending ${!uvar} onto HADOOP_OPTS"
+    HADOOP_OPTS="${HADOOP_OPTS} ${!uvar}"
     return 0
   fi
 }
-
 ## @description  Add custom (program)_(command)_SECURE_EXTRA_OPTS to HADOOP_OPTS.
 ## @description  This *does not* handle the pre-3.x deprecated cases
 ## @audience     public

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5a79f70/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_subcommand_opts.bats
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_subcommand_opts.bats b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_subcommand_opts.bats
index 1fbf343..0010a58 100644
--- a/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_subcommand_opts.bats
+++ b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_subcommand_opts.bats
@@ -29,7 +29,7 @@ load hadoop-functions_test_helper
 
 @test "hadoop_subcommand_opts (hadoop simple exist)" {
   HADOOP_OPTS="x"
-  HADOOP_test_OPTS="y"
+  HADOOP_TEST_OPTS="y"
   hadoop_subcommand_opts hadoop test
   echo "${HADOOP_OPTS}"
   [ "${HADOOP_OPTS}" = "x y" ]
@@ -37,7 +37,7 @@ load hadoop-functions_test_helper
 
 @test "hadoop_subcommand_opts (hadoop complex exist)" {
   HADOOP_OPTS="x"
-  HADOOP_test_OPTS="y z"
+  HADOOP_TEST_OPTS="y z"
   hadoop_subcommand_opts hadoop test
   echo "${HADOOP_OPTS}"
   [ "${HADOOP_OPTS}" = "x y z" ]
@@ -45,7 +45,7 @@ load hadoop-functions_test_helper
 
 @test "hadoop_subcommand_opts (hdfs simple exist)" {
   HADOOP_OPTS="x"
-  HDFS_test_OPTS="y"
+  HDFS_TEST_OPTS="y"
   hadoop_subcommand_opts hdfs test
   echo "${HADOOP_OPTS}"
   [ "${HADOOP_OPTS}" = "x y" ]
@@ -53,24 +53,16 @@ load hadoop-functions_test_helper
 
 @test "hadoop_subcommand_opts (yarn simple exist)" {
   HADOOP_OPTS="x"
-  YARN_test_OPTS="y"
+  YARN_TEST_OPTS="y"
   hadoop_subcommand_opts yarn test
   echo "${HADOOP_OPTS}"
   [ "${HADOOP_OPTS}" = "x y" ]
 }
 
-@test "hadoop_subcommand_opts (deprecation case #1)" {
+@test "hadoop_subcommand_opts (deprecation case)" {
   HADOOP_OPTS="x"
   HADOOP_NAMENODE_OPTS="y"
   hadoop_subcommand_opts hdfs namenode
   echo "${HADOOP_OPTS}"
   [ "${HADOOP_OPTS}" = "x y" ]
 }
-
-@test "hadoop_subcommand_opts (deprecation case #2)" {
-  HADOOP_OPTS="x"
-  YARN_RESOURCEMANAGER_OPTS="y"
-  hadoop_subcommand_opts yarn resourcemanager
-  echo "${HADOOP_OPTS}"
-  [ "${HADOOP_OPTS}" = "x y" ]
-}
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[14/19] hadoop git commit: HADOOP-13554. Add an equivalent of hadoop_subcmd_opts for secure opts (aw)

Posted by aw...@apache.org.
HADOOP-13554.  Add an equivalent of hadoop_subcmd_opts for secure opts (aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/944f80f4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/944f80f4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/944f80f4

Branch: refs/heads/HADOOP-13341
Commit: 944f80f47bae5a7c47160b1d303a644ab43d7ea5
Parents: a72b64e
Author: Allen Wittenauer <aw...@apache.org>
Authored: Tue Aug 30 08:16:23 2016 -0700
Committer: Allen Wittenauer <aw...@apache.org>
Committed: Wed Aug 31 08:04:44 2016 -0700

----------------------------------------------------------------------
 .../src/main/bin/hadoop-functions.sh            | 45 +++++++++++++++++
 .../scripts/hadoop_subcommand_secure_opts.bats  | 52 ++++++++++++++++++++
 2 files changed, 97 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/944f80f4/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
index 9003913..dd82347 100755
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
@@ -2057,6 +2057,51 @@ function hadoop_subcommand_opts
   fi
 }
 
+## @description  Add custom (program)_(command)_SECURE_EXTRA_OPTS to HADOOP_OPTS.
+## @description  This *does not* handle the pre-3.x deprecated cases
+## @audience     public
+## @stability    stable
+## @replaceable  yes
+## @param        program
+## @param        subcommand
+## @return       will exit on failure conditions
+function hadoop_subcommand_secure_opts
+{
+  declare program=$1
+  declare command=$2
+  declare uvar
+  declare uprogram
+  declare ucommand
+
+  if [[ -z "${program}" || -z "${command}" ]]; then
+    return 1
+  fi
+
+  # bash 4 and up have built-in ways to upper and lower
+  # case the contents of vars.  This is faster than
+  # calling tr.
+
+  if [[ -z "${BASH_VERSINFO[0]}" ]] \
+     || [[ "${BASH_VERSINFO[0]}" -lt 4 ]]; then
+    uprogram=$(echo "${program}" | tr '[:lower:]' '[:upper:]')
+    ucommand=$(echo "${command}" | tr '[:lower:]' '[:upper:]')
+  else
+    uprogram=${program^^}
+    ucommand=${command^^}
+  fi
+
+  # HDFS_DATANODE_SECURE_EXTRA_OPTS
+  # HDFS_NFS3_SECURE_EXTRA_OPTS
+  # ...
+  uvar="${uprogram}_${ucommand}_SECURE_EXTRA_OPTS"
+
+  if [[ -n ${!uvar} ]]; then
+    hadoop_debug "Appending ${!uvar} onto HADOOP_OPTS"
+    HADOOP_OPTS="${HADOOP_OPTS} ${!uvar}"
+    return 0
+  fi
+}
+
 ## @description  Perform the 'hadoop classpath', etc subcommand with the given
 ## @description  parameters
 ## @audience     private

http://git-wip-us.apache.org/repos/asf/hadoop/blob/944f80f4/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_subcommand_secure_opts.bats
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_subcommand_secure_opts.bats b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_subcommand_secure_opts.bats
new file mode 100644
index 0000000..1b3506c
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_subcommand_secure_opts.bats
@@ -0,0 +1,52 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load hadoop-functions_test_helper
+
+@test "hadoop_subcommand_secure_opts (missing param)" {
+  HADOOP_OPTS="x"
+  run hadoop_subcommand_secure_opts testvar
+  [ "${status}" = "1" ]
+}
+
+@test "hadoop_subcommand_secure_opts (simple not exist)" {
+  HADOOP_OPTS="x"
+  hadoop_subcommand_secure_opts hadoop subcommand
+  [ "${HADOOP_OPTS}" = "x" ]
+}
+
+@test "hadoop_subcommand_secure_opts (hadoop simple exist)" {
+  HADOOP_OPTS="x"
+  HADOOP_TEST_SECURE_EXTRA_OPTS="y"
+  hadoop_subcommand_secure_opts hadoop test
+  echo "${HADOOP_OPTS}"
+  [ "${HADOOP_OPTS}" = "x y" ]
+}
+
+@test "hadoop_subcommand_secure_opts (hadoop complex exist)" {
+  HADOOP_OPTS="x"
+  HADOOP_TEST_SECURE_EXTRA_OPTS="y z"
+  hadoop_subcommand_secure_opts hadoop test
+  echo "${HADOOP_OPTS}"
+  [ "${HADOOP_OPTS}" = "x y z" ]
+}
+
+@test "hadoop_subcommand_secure_opts (hdfs simple exist)" {
+  HADOOP_OPTS="x"
+  HDFS_TEST_SECURE_EXTRA_OPTS="y"
+  hadoop_subcommand_secure_opts hdfs test
+  echo "${HADOOP_OPTS}"
+  [ "${HADOOP_OPTS}" = "x y" ]
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[03/19] hadoop git commit: HDFS-9392. Admins support for maintenance state. Contributed by Ming Ma.

Posted by aw...@apache.org.
HDFS-9392. Admins support for maintenance state. Contributed by Ming Ma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9dcbdbdb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9dcbdbdb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9dcbdbdb

Branch: refs/heads/HADOOP-13341
Commit: 9dcbdbdb5a34d85910707f81ebc1bb1f81c99978
Parents: c4ee691
Author: Ming Ma <mi...@apache.org>
Authored: Tue Aug 30 14:00:13 2016 -0700
Committer: Ming Ma <mi...@apache.org>
Committed: Tue Aug 30 14:00:13 2016 -0700

----------------------------------------------------------------------
 .../hdfs/protocol/DatanodeAdminProperties.java  |  19 +
 .../hadoop/hdfs/protocol/DatanodeInfo.java      |  27 +-
 .../hadoop/hdfs/protocol/HdfsConstants.java     |   2 +-
 .../CombinedHostFileManager.java                |  23 +
 .../server/blockmanagement/DatanodeManager.java |  33 +-
 .../server/blockmanagement/DatanodeStats.java   |  10 +-
 .../blockmanagement/DecommissionManager.java    | 101 +++-
 .../blockmanagement/HeartbeatManager.java       |  27 +
 .../blockmanagement/HostConfigManager.java      |   7 +
 .../server/blockmanagement/HostFileManager.java |   6 +
 .../hdfs/server/namenode/FSNamesystem.java      |  29 +
 .../namenode/metrics/FSNamesystemMBean.java     |  15 +
 .../apache/hadoop/hdfs/AdminStatesBaseTest.java | 375 ++++++++++++
 .../apache/hadoop/hdfs/TestDecommission.java    | 592 ++++++-------------
 .../hadoop/hdfs/TestMaintenanceState.java       | 310 ++++++++++
 .../namenode/TestDecommissioningStatus.java     |   2 +-
 .../hadoop/hdfs/util/HostsFileWriter.java       |  55 +-
 .../hdfs/util/TestCombinedHostsFileReader.java  |   2 +-
 .../src/test/resources/dfs.hosts.json           |   2 +
 19 files changed, 1165 insertions(+), 472 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9dcbdbdb/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeAdminProperties.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeAdminProperties.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeAdminProperties.java
index 9f7b983..2abed81 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeAdminProperties.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeAdminProperties.java
@@ -33,6 +33,7 @@ public class DatanodeAdminProperties {
   private int port;
   private String upgradeDomain;
   private AdminStates adminState = AdminStates.NORMAL;
+  private long maintenanceExpireTimeInMS = Long.MAX_VALUE;
 
   /**
    * Return the host name of the datanode.
@@ -97,4 +98,22 @@ public class DatanodeAdminProperties {
   public void setAdminState(final AdminStates adminState) {
     this.adminState = adminState;
   }
+
+  /**
+   * Get the maintenance expiration time in milliseconds.
+   * @return the maintenance expiration time in milliseconds.
+   */
+  public long getMaintenanceExpireTimeInMS() {
+    return this.maintenanceExpireTimeInMS;
+  }
+
+  /**
+   * Get the maintenance expiration time in milliseconds.
+   * @param maintenanceExpireTimeInMS
+   *        the maintenance expiration time in milliseconds.
+   */
+  public void setMaintenanceExpireTimeInMS(
+      final long maintenanceExpireTimeInMS) {
+    this.maintenanceExpireTimeInMS = maintenanceExpireTimeInMS;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9dcbdbdb/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
index e04abdd..cd32a53 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
@@ -83,6 +83,7 @@ public class DatanodeInfo extends DatanodeID implements Node {
   }
 
   protected AdminStates adminState;
+  private long maintenanceExpireTimeInMS;
 
   public DatanodeInfo(DatanodeInfo from) {
     super(from);
@@ -499,17 +500,28 @@ public class DatanodeInfo extends DatanodeID implements Node {
   }
 
   /**
-   * Put a node to maintenance mode.
+   * Start the maintenance operation.
    */
   public void startMaintenance() {
-    adminState = AdminStates.ENTERING_MAINTENANCE;
+    this.adminState = AdminStates.ENTERING_MAINTENANCE;
   }
 
   /**
-   * Put a node to maintenance mode.
+   * Put a node directly to maintenance mode.
    */
   public void setInMaintenance() {
-    adminState = AdminStates.IN_MAINTENANCE;
+    this.adminState = AdminStates.IN_MAINTENANCE;
+  }
+
+  /**
+  * @param maintenanceExpireTimeInMS the time that the DataNode is in the
+  *        maintenance mode until in the unit of milliseconds.   */
+  public void setMaintenanceExpireTimeInMS(long maintenanceExpireTimeInMS) {
+    this.maintenanceExpireTimeInMS = maintenanceExpireTimeInMS;
+  }
+
+  public long getMaintenanceExpireTimeInMS() {
+    return this.maintenanceExpireTimeInMS;
   }
 
   /**
@@ -519,6 +531,9 @@ public class DatanodeInfo extends DatanodeID implements Node {
     adminState = null;
   }
 
+  public static boolean maintenanceNotExpired(long maintenanceExpireTimeInMS) {
+    return Time.monotonicNow() < maintenanceExpireTimeInMS;
+  }
   /**
    * Returns true if the node is is entering_maintenance
    */
@@ -541,6 +556,10 @@ public class DatanodeInfo extends DatanodeID implements Node {
         adminState == AdminStates.IN_MAINTENANCE);
   }
 
+  public boolean maintenanceExpired() {
+    return !maintenanceNotExpired(this.maintenanceExpireTimeInMS);
+  }
+
   public boolean isInService() {
     return getAdminState() == AdminStates.NORMAL;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9dcbdbdb/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
index 8df2d54..acbc8f6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
@@ -141,7 +141,7 @@ public final class HdfsConstants {
 
   // type of the datanode report
   public enum DatanodeReportType {
-    ALL, LIVE, DEAD, DECOMMISSIONING
+    ALL, LIVE, DEAD, DECOMMISSIONING, ENTERING_MAINTENANCE
   }
 
   public static final byte RS_6_3_POLICY_ID = 0;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9dcbdbdb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CombinedHostFileManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CombinedHostFileManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CombinedHostFileManager.java
index 3e913b9..6f9c35e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CombinedHostFileManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CombinedHostFileManager.java
@@ -148,6 +148,24 @@ public class CombinedHostFileManager extends HostConfigManager {
       };
     }
 
+    synchronized long getMaintenanceExpireTimeInMS(
+        final InetSocketAddress address) {
+      Iterable<DatanodeAdminProperties> datanode = Iterables.filter(
+          allDNs.get(address.getAddress()),
+          new Predicate<DatanodeAdminProperties>() {
+            public boolean apply(DatanodeAdminProperties input) {
+              return input.getAdminState().equals(
+                  AdminStates.IN_MAINTENANCE) &&
+                  (input.getPort() == 0 ||
+                  input.getPort() == address.getPort());
+            }
+          });
+      // if DN isn't set to maintenance state, ignore MaintenanceExpireTimeInMS
+      // set in the config.
+      return datanode.iterator().hasNext() ?
+          datanode.iterator().next().getMaintenanceExpireTimeInMS() : 0;
+    }
+
     static class HostIterator extends UnmodifiableIterator<InetSocketAddress> {
       private final Iterator<Map.Entry<InetAddress,
           DatanodeAdminProperties>> it;
@@ -236,6 +254,11 @@ public class CombinedHostFileManager extends HostConfigManager {
     return hostProperties.getUpgradeDomain(dn.getResolvedAddress());
   }
 
+  @Override
+  public long getMaintenanceExpirationTimeInMS(DatanodeID dn) {
+    return hostProperties.getMaintenanceExpireTimeInMS(dn.getResolvedAddress());
+  }
+
   /**
    * Set the properties lists by the new instances. The
    * old instance is discarded.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9dcbdbdb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index da02a90..fffe29c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -552,7 +552,7 @@ public class DatanodeManager {
 
 
   /** Get a datanode descriptor given corresponding DatanodeUUID */
-  DatanodeDescriptor getDatanode(final String datanodeUuid) {
+  public DatanodeDescriptor getDatanode(final String datanodeUuid) {
     if (datanodeUuid == null) {
       return null;
     }
@@ -902,10 +902,14 @@ public class DatanodeManager {
    *
    * @param nodeReg datanode
    */
-  void startDecommissioningIfExcluded(DatanodeDescriptor nodeReg) {
+  void startAdminOperationIfNecessary(DatanodeDescriptor nodeReg) {
+    long maintenanceExpireTimeInMS =
+        hostConfigManager.getMaintenanceExpirationTimeInMS(nodeReg);
     // If the registered node is in exclude list, then decommission it
     if (getHostConfigManager().isExcluded(nodeReg)) {
       decomManager.startDecommission(nodeReg);
+    } else if (nodeReg.maintenanceNotExpired(maintenanceExpireTimeInMS)) {
+      decomManager.startMaintenance(nodeReg, maintenanceExpireTimeInMS);
     }
   }
 
@@ -1017,7 +1021,7 @@ public class DatanodeManager {
           // also treat the registration message as a heartbeat
           heartbeatManager.register(nodeS);
           incrementVersionCount(nodeS.getSoftwareVersion());
-          startDecommissioningIfExcluded(nodeS);
+          startAdminOperationIfNecessary(nodeS);
           success = true;
         } finally {
           if (!success) {
@@ -1056,7 +1060,7 @@ public class DatanodeManager {
         heartbeatManager.addDatanode(nodeDescr);
         heartbeatManager.updateDnStat(nodeDescr);
         incrementVersionCount(nodeReg.getSoftwareVersion());
-        startDecommissioningIfExcluded(nodeDescr);
+        startAdminOperationIfNecessary(nodeDescr);
         success = true;
       } finally {
         if (!success) {
@@ -1122,9 +1126,14 @@ public class DatanodeManager {
       if (!hostConfigManager.isIncluded(node)) {
         node.setDisallowed(true); // case 2.
       } else {
-        if (hostConfigManager.isExcluded(node)) {
+        long maintenanceExpireTimeInMS =
+            hostConfigManager.getMaintenanceExpirationTimeInMS(node);
+        if (node.maintenanceNotExpired(maintenanceExpireTimeInMS)) {
+          decomManager.startMaintenance(node, maintenanceExpireTimeInMS);
+        } else if (hostConfigManager.isExcluded(node)) {
           decomManager.startDecommission(node); // case 3.
         } else {
+          decomManager.stopMaintenance(node);
           decomManager.stopDecommission(node); // case 4.
         }
       }
@@ -1157,7 +1166,12 @@ public class DatanodeManager {
     // A decommissioning DN may be "alive" or "dead".
     return getDatanodeListForReport(DatanodeReportType.DECOMMISSIONING);
   }
-  
+
+  /** @return list of datanodes that are entering maintenance. */
+  public List<DatanodeDescriptor> getEnteringMaintenanceNodes() {
+    return getDatanodeListForReport(DatanodeReportType.ENTERING_MAINTENANCE);
+  }
+
   /* Getter and Setter for stale DataNodes related attributes */
 
   /**
@@ -1342,6 +1356,9 @@ public class DatanodeManager {
     final boolean listDecommissioningNodes =
         type == DatanodeReportType.ALL ||
         type == DatanodeReportType.DECOMMISSIONING;
+    final boolean listEnteringMaintenanceNodes =
+        type == DatanodeReportType.ALL ||
+        type == DatanodeReportType.ENTERING_MAINTENANCE;
 
     ArrayList<DatanodeDescriptor> nodes;
     final HostSet foundNodes = new HostSet();
@@ -1353,10 +1370,12 @@ public class DatanodeManager {
       for (DatanodeDescriptor dn : datanodeMap.values()) {
         final boolean isDead = isDatanodeDead(dn);
         final boolean isDecommissioning = dn.isDecommissionInProgress();
+        final boolean isEnteringMaintenance = dn.isEnteringMaintenance();
 
         if (((listLiveNodes && !isDead) ||
             (listDeadNodes && isDead) ||
-            (listDecommissioningNodes && isDecommissioning)) &&
+            (listDecommissioningNodes && isDecommissioning) ||
+            (listEnteringMaintenanceNodes && isEnteringMaintenance)) &&
             hostConfigManager.isIncluded(dn)) {
           nodes.add(dn);
         }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9dcbdbdb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStats.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStats.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStats.java
index bcc9bba..0d4e235 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStats.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStats.java
@@ -47,7 +47,7 @@ class DatanodeStats {
 
   synchronized void add(final DatanodeDescriptor node) {
     xceiverCount += node.getXceiverCount();
-    if (!(node.isDecommissionInProgress() || node.isDecommissioned())) {
+    if (node.isInService()) {
       capacityUsed += node.getDfsUsed();
       blockPoolUsed += node.getBlockPoolUsed();
       nodesInService++;
@@ -56,7 +56,8 @@ class DatanodeStats {
       capacityRemaining += node.getRemaining();
       cacheCapacity += node.getCacheCapacity();
       cacheUsed += node.getCacheUsed();
-    } else if (!node.isDecommissioned()) {
+    } else if (node.isDecommissionInProgress() ||
+        node.isEnteringMaintenance()) {
       cacheCapacity += node.getCacheCapacity();
       cacheUsed += node.getCacheUsed();
     }
@@ -74,7 +75,7 @@ class DatanodeStats {
 
   synchronized void subtract(final DatanodeDescriptor node) {
     xceiverCount -= node.getXceiverCount();
-    if (!(node.isDecommissionInProgress() || node.isDecommissioned())) {
+    if (node.isInService()) {
       capacityUsed -= node.getDfsUsed();
       blockPoolUsed -= node.getBlockPoolUsed();
       nodesInService--;
@@ -83,7 +84,8 @@ class DatanodeStats {
       capacityRemaining -= node.getRemaining();
       cacheCapacity -= node.getCacheCapacity();
       cacheUsed -= node.getCacheUsed();
-    } else if (!node.isDecommissioned()) {
+    } else if (node.isDecommissionInProgress() ||
+        node.isEnteringMaintenance()) {
       cacheCapacity -= node.getCacheCapacity();
       cacheUsed -= node.getCacheUsed();
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9dcbdbdb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
index ec6d9ba..c456aba 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
@@ -86,8 +86,11 @@ public class DecommissionManager {
   private final ScheduledExecutorService executor;
 
   /**
-   * Map containing the decommission-in-progress datanodes that are being
-   * tracked so they can be be marked as decommissioned.
+   * Map containing the DECOMMISSION_INPROGRESS or ENTERING_MAINTENANCE
+   * datanodes that are being tracked so they can be be marked as
+   * DECOMMISSIONED or IN_MAINTENANCE. Even after the node is marked as
+   * IN_MAINTENANCE, the node remains in the map until
+   * maintenance expires checked during a monitor tick.
    * <p/>
    * This holds a set of references to the under-replicated blocks on the DN at
    * the time the DN is added to the map, i.e. the blocks that are preventing
@@ -102,12 +105,12 @@ public class DecommissionManager {
    * another check is done with the actual block map.
    */
   private final TreeMap<DatanodeDescriptor, AbstractList<BlockInfo>>
-      decomNodeBlocks;
+      outOfServiceNodeBlocks;
 
   /**
-   * Tracking a node in decomNodeBlocks consumes additional memory. To limit
-   * the impact on NN memory consumption, we limit the number of nodes in 
-   * decomNodeBlocks. Additional nodes wait in pendingNodes.
+   * Tracking a node in outOfServiceNodeBlocks consumes additional memory. To
+   * limit the impact on NN memory consumption, we limit the number of nodes in
+   * outOfServiceNodeBlocks. Additional nodes wait in pendingNodes.
    */
   private final Queue<DatanodeDescriptor> pendingNodes;
 
@@ -122,7 +125,7 @@ public class DecommissionManager {
     executor = Executors.newScheduledThreadPool(1,
         new ThreadFactoryBuilder().setNameFormat("DecommissionMonitor-%d")
             .setDaemon(true).build());
-    decomNodeBlocks = new TreeMap<>();
+    outOfServiceNodeBlocks = new TreeMap<>();
     pendingNodes = new LinkedList<>();
   }
 
@@ -222,13 +225,56 @@ public class DecommissionManager {
       }
       // Remove from tracking in DecommissionManager
       pendingNodes.remove(node);
-      decomNodeBlocks.remove(node);
+      outOfServiceNodeBlocks.remove(node);
     } else {
       LOG.trace("stopDecommission: Node {} in {}, nothing to do." +
           node, node.getAdminState());
     }
   }
 
+  /**
+   * Start maintenance of the specified datanode.
+   * @param node
+   */
+  @VisibleForTesting
+  public void startMaintenance(DatanodeDescriptor node,
+      long maintenanceExpireTimeInMS) {
+    // Even if the node is already in maintenance, we still need to adjust
+    // the expiration time.
+    node.setMaintenanceExpireTimeInMS(maintenanceExpireTimeInMS);
+    if (!node.isMaintenance()) {
+      // Update DN stats maintained by HeartbeatManager
+      hbManager.startMaintenance(node);
+      pendingNodes.add(node);
+    } else {
+      LOG.trace("startMaintenance: Node {} in {}, nothing to do." +
+          node, node.getAdminState());
+    }
+  }
+
+
+  /**
+   * Stop maintenance of the specified datanode.
+   * @param node
+   */
+  @VisibleForTesting
+  public void stopMaintenance(DatanodeDescriptor node) {
+    if (node.isMaintenance()) {
+      // Update DN stats maintained by HeartbeatManager
+      hbManager.stopMaintenance(node);
+
+      // TODO HDFS-9390 remove replicas from block maps
+      // or handle over replicated blocks.
+
+      // Remove from tracking in DecommissionManager
+      pendingNodes.remove(node);
+      outOfServiceNodeBlocks.remove(node);
+    } else {
+      LOG.trace("stopMaintenance: Node {} in {}, nothing to do." +
+          node, node.getAdminState());
+    }
+  }
+
   private void setDecommissioned(DatanodeDescriptor dn) {
     dn.setDecommissioned();
     LOG.info("Decommissioning complete for node {}", dn);
@@ -313,7 +359,7 @@ public class DecommissionManager {
 
   @VisibleForTesting
   public int getNumTrackedNodes() {
-    return decomNodeBlocks.size();
+    return outOfServiceNodeBlocks.size();
   }
 
   @VisibleForTesting
@@ -333,8 +379,8 @@ public class DecommissionManager {
      */
     private final int numBlocksPerCheck;
     /**
-     * The maximum number of nodes to track in decomNodeBlocks. A value of 0
-     * means no limit.
+     * The maximum number of nodes to track in outOfServiceNodeBlocks.
+     * A value of 0 means no limit.
      */
     private final int maxConcurrentTrackedNodes;
     /**
@@ -347,7 +393,7 @@ public class DecommissionManager {
      */
     private int numNodesChecked = 0;
     /**
-     * The last datanode in decomNodeBlocks that we've processed
+     * The last datanode in outOfServiceNodeBlocks that we've processed
      */
     private DatanodeDescriptor iterkey = new DatanodeDescriptor(new 
         DatanodeID("", "", "", 0, 0, 0, 0));
@@ -393,14 +439,15 @@ public class DecommissionManager {
     private void processPendingNodes() {
       while (!pendingNodes.isEmpty() &&
           (maxConcurrentTrackedNodes == 0 ||
-           decomNodeBlocks.size() < maxConcurrentTrackedNodes)) {
-        decomNodeBlocks.put(pendingNodes.poll(), null);
+          outOfServiceNodeBlocks.size() < maxConcurrentTrackedNodes)) {
+        outOfServiceNodeBlocks.put(pendingNodes.poll(), null);
       }
     }
 
     private void check() {
       final Iterator<Map.Entry<DatanodeDescriptor, AbstractList<BlockInfo>>>
-          it = new CyclicIteration<>(decomNodeBlocks, iterkey).iterator();
+          it = new CyclicIteration<>(outOfServiceNodeBlocks,
+              iterkey).iterator();
       final LinkedList<DatanodeDescriptor> toRemove = new LinkedList<>();
 
       while (it.hasNext() && !exceededNumBlocksPerCheck()) {
@@ -410,6 +457,17 @@ public class DecommissionManager {
         final DatanodeDescriptor dn = entry.getKey();
         AbstractList<BlockInfo> blocks = entry.getValue();
         boolean fullScan = false;
+        if (dn.isMaintenance()) {
+          // TODO HDFS-9390 make sure blocks are minimally replicated
+          // before transitioning the node to IN_MAINTENANCE state.
+
+          // If maintenance expires, stop tracking it.
+          if (dn.maintenanceExpired()) {
+            stopMaintenance(dn);
+            toRemove.add(dn);
+          }
+          continue;
+        }
         if (blocks == null) {
           // This is a newly added datanode, run through its list to schedule 
           // under-replicated blocks for replication and collect the blocks 
@@ -417,7 +475,7 @@ public class DecommissionManager {
           LOG.debug("Newly-added node {}, doing full scan to find " +
               "insufficiently-replicated blocks.", dn);
           blocks = handleInsufficientlyStored(dn);
-          decomNodeBlocks.put(dn, blocks);
+          outOfServiceNodeBlocks.put(dn, blocks);
           fullScan = true;
         } else {
           // This is a known datanode, check if its # of insufficiently 
@@ -436,7 +494,7 @@ public class DecommissionManager {
             LOG.debug("Node {} has finished replicating current set of "
                 + "blocks, checking with the full block map.", dn);
             blocks = handleInsufficientlyStored(dn);
-            decomNodeBlocks.put(dn, blocks);
+            outOfServiceNodeBlocks.put(dn, blocks);
           }
           // If the full scan is clean AND the node liveness is okay, 
           // we can finally mark as decommissioned.
@@ -460,11 +518,12 @@ public class DecommissionManager {
         }
         iterkey = dn;
       }
-      // Remove the datanodes that are decommissioned
+      // Remove the datanodes that are decommissioned or in service after
+      // maintenance expiration.
       for (DatanodeDescriptor dn : toRemove) {
-        Preconditions.checkState(dn.isDecommissioned(),
-            "Removing a node that is not yet decommissioned!");
-        decomNodeBlocks.remove(dn);
+        Preconditions.checkState(dn.isDecommissioned() || dn.isInService(),
+            "Removing a node that is not yet decommissioned or in service!");
+        outOfServiceNodeBlocks.remove(dn);
       }
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9dcbdbdb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java
index cec4a1a..d728ee2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java
@@ -265,6 +265,33 @@ class HeartbeatManager implements DatanodeStatistics {
     }
   }
 
+  synchronized void startMaintenance(final DatanodeDescriptor node) {
+    if (!node.isAlive()) {
+      LOG.info("Dead node {} is put in maintenance state immediately.", node);
+      node.setInMaintenance();
+    } else if (node.isDecommissioned()) {
+      LOG.info("Decommissioned node " + node + " is put in maintenance state"
+          + " immediately.");
+      node.setInMaintenance();
+    } else {
+      stats.subtract(node);
+      node.startMaintenance();
+      stats.add(node);
+    }
+  }
+
+  synchronized void stopMaintenance(final DatanodeDescriptor node) {
+    LOG.info("Stopping maintenance of {} node {}",
+        node.isAlive() ? "live" : "dead", node);
+    if (!node.isAlive()) {
+      node.stopMaintenance();
+    } else {
+      stats.subtract(node);
+      node.stopMaintenance();
+      stats.add(node);
+    }
+  }
+
   synchronized void stopDecommission(final DatanodeDescriptor node) {
     LOG.info("Stopping decommissioning of {} node {}",
         node.isAlive() ? "live" : "dead", node);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9dcbdbdb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostConfigManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostConfigManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostConfigManager.java
index f28ed29..0ab4ebc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostConfigManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostConfigManager.java
@@ -77,4 +77,11 @@ public abstract class HostConfigManager implements Configurable {
    * @return the upgrade domain of dn.
    */
   public abstract String getUpgradeDomain(DatanodeID dn);
+
+  /**
+   * Get the maintenance expiration time in milli seconds.
+   * @param dn the DatanodeID of the datanode
+   * @return the maintenance expiration time of dn.
+   */
+  public abstract long getMaintenanceExpirationTimeInMS(DatanodeID dn);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9dcbdbdb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostFileManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostFileManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostFileManager.java
index bcfebf2..59f907f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostFileManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostFileManager.java
@@ -138,6 +138,12 @@ public class HostFileManager extends HostConfigManager {
     return null;
   }
 
+  @Override
+  public long getMaintenanceExpirationTimeInMS(DatanodeID dn) {
+    // The include/exclude files based config doesn't support maintenance mode.
+    return 0;
+  }
+
   /**
    * Read the includes and excludes lists from the named files.  Any previous
    * includes and excludes lists are discarded.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9dcbdbdb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 52fbaa7..f4b742e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -7079,5 +7079,34 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     return blockManager.getBytesInFuture();
   }
 
+
+  @Override // FSNamesystemMBean
+  public int getNumInMaintenanceLiveDataNodes() {
+    final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
+    getBlockManager().getDatanodeManager().fetchDatanodes(live, null, true);
+    int liveInMaintenance = 0;
+    for (DatanodeDescriptor node : live) {
+      liveInMaintenance += node.isInMaintenance() ? 1 : 0;
+    }
+    return liveInMaintenance;
+  }
+
+  @Override // FSNamesystemMBean
+  public int getNumInMaintenanceDeadDataNodes() {
+    final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
+    getBlockManager().getDatanodeManager().fetchDatanodes(null, dead, true);
+    int deadInMaintenance = 0;
+    for (DatanodeDescriptor node : dead) {
+      deadInMaintenance += node.isInMaintenance() ? 1 : 0;
+    }
+    return deadInMaintenance;
+  }
+
+  @Override // FSNamesystemMBean
+  public int getNumEnteringMaintenanceDataNodes() {
+    return getBlockManager().getDatanodeManager().getEnteringMaintenanceNodes()
+        .size();
+  }
+
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9dcbdbdb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java
index b314f7f..f1e7515 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java
@@ -208,4 +208,19 @@ public interface FSNamesystemMBean {
    * Return total time spent doing sync operations on FSEditLog.
    */
   String getTotalSyncTimes();
+
+  /**
+   * @return Number of IN_MAINTENANCE live data nodes
+   */
+  int getNumInMaintenanceLiveDataNodes();
+
+  /**
+   * @return Number of IN_MAINTENANCE dead data nodes
+   */
+  int getNumInMaintenanceDeadDataNodes();
+
+  /**
+   * @return Number of ENTERING_MAINTENANCE data nodes
+   */
+  int getNumEnteringMaintenanceDataNodes();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9dcbdbdb/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AdminStatesBaseTest.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AdminStatesBaseTest.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AdminStatesBaseTest.java
new file mode 100644
index 0000000..0698628
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AdminStatesBaseTest.java
@@ -0,0 +1,375 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.server.blockmanagement.CombinedHostFileManager;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
+import org.apache.hadoop.hdfs.server.blockmanagement.HostConfigManager;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
+import org.apache.hadoop.hdfs.util.HostsFileWriter;
+import org.junit.After;
+import org.junit.Before;
+
+/**
+ * This class provide utilities for testing of the admin operations of nodes.
+ */
+public class AdminStatesBaseTest {
+  public static final Log LOG = LogFactory.getLog(AdminStatesBaseTest.class);
+  static final long seed = 0xDEADBEEFL;
+  static final int blockSize = 8192;
+  static final int fileSize = 16384;
+  static final int HEARTBEAT_INTERVAL = 1; // heartbeat interval in seconds
+  static final int BLOCKREPORT_INTERVAL_MSEC = 1000; //block report in msec
+  static final int NAMENODE_REPLICATION_INTERVAL = 1; //replication interval
+
+  final private Random myrand = new Random();
+
+  private HostsFileWriter hostsFileWriter;
+  private Configuration conf;
+  private MiniDFSCluster cluster = null;
+  private boolean useCombinedHostFileManager = false;
+
+  protected void setUseCombinedHostFileManager() {
+    useCombinedHostFileManager = true;
+  }
+
+  protected Configuration getConf() {
+    return conf;
+  }
+
+  protected MiniDFSCluster getCluster() {
+    return cluster;
+  }
+
+  @Before
+  public void setup() throws IOException {
+    // Set up the hosts/exclude files.
+    hostsFileWriter = new HostsFileWriter();
+    conf = new HdfsConfiguration();
+
+    if (useCombinedHostFileManager) {
+      conf.setClass(DFSConfigKeys.DFS_NAMENODE_HOSTS_PROVIDER_CLASSNAME_KEY,
+          CombinedHostFileManager.class, HostConfigManager.class);
+    }
+
+    // Setup conf
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY,
+        false);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
+        200);
+    conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, HEARTBEAT_INTERVAL);
+    conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,
+        BLOCKREPORT_INTERVAL_MSEC);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY,
+        NAMENODE_REPLICATION_INTERVAL);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 1);
+
+    hostsFileWriter.initialize(conf, "temp/admin");
+  }
+
+  @After
+  public void teardown() throws IOException {
+    hostsFileWriter.cleanup();
+    shutdownCluster();
+  }
+
+  protected void writeFile(FileSystem fileSys, Path name, int repl)
+      throws IOException {
+    writeFile(fileSys, name, repl, 2);
+  }
+
+  protected void writeFile(FileSystem fileSys, Path name, int repl,
+      int numOfBlocks) throws IOException {
+    writeFile(fileSys, name, repl, numOfBlocks, true);
+  }
+
+  protected FSDataOutputStream writeFile(FileSystem fileSys, Path name,
+      int repl, int numOfBlocks, boolean completeFile)
+    throws IOException {
+    // create and write a file that contains two blocks of data
+    FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
+        .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
+        (short) repl, blockSize);
+    byte[] buffer = new byte[blockSize*numOfBlocks];
+    Random rand = new Random(seed);
+    rand.nextBytes(buffer);
+    stm.write(buffer);
+    LOG.info("Created file " + name + " with " + repl + " replicas.");
+    if (completeFile) {
+      stm.close();
+      return null;
+    } else {
+      // Do not close stream, return it
+      // so that it is not garbage collected
+      return stm;
+    }
+  }
+
+  /*
+   * decommission the DN or put the DN into maintenance for datanodeUuid or one
+   * random node if datanodeUuid is null.
+   * And wait for the node to reach the given {@code waitForState}.
+   */
+  protected DatanodeInfo takeNodeOutofService(int nnIndex,
+      String datanodeUuid, long maintenanceExpirationInMS,
+      ArrayList<DatanodeInfo> decommissionedNodes,
+      AdminStates waitForState) throws IOException {
+    return takeNodeOutofService(nnIndex, datanodeUuid,
+        maintenanceExpirationInMS, decommissionedNodes, null, waitForState);
+  }
+
+  /*
+   * decommission the DN or put the DN to maintenance set by datanodeUuid
+   * Pick randome node if datanodeUuid == null
+   * wait for the node to reach the given {@code waitForState}.
+   */
+  protected DatanodeInfo takeNodeOutofService(int nnIndex,
+      String datanodeUuid, long maintenanceExpirationInMS,
+      List<DatanodeInfo> decommissionedNodes,
+      Map<DatanodeInfo, Long> inMaintenanceNodes, AdminStates waitForState)
+      throws IOException {
+    DFSClient client = getDfsClient(nnIndex);
+    DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.ALL);
+    boolean isDecommissionRequest =
+        waitForState == AdminStates.DECOMMISSION_INPROGRESS ||
+        waitForState == AdminStates.DECOMMISSIONED;
+
+    //
+    // pick one datanode randomly unless the caller specifies one.
+    //
+    int index = 0;
+    if (datanodeUuid == null) {
+      boolean found = false;
+      while (!found) {
+        index = myrand.nextInt(info.length);
+        if ((isDecommissionRequest && !info[index].isDecommissioned()) ||
+            (!isDecommissionRequest && !info[index].isInMaintenance())) {
+          found = true;
+        }
+      }
+    } else {
+      // The caller specifies a DN
+      for (; index < info.length; index++) {
+        if (info[index].getDatanodeUuid().equals(datanodeUuid)) {
+          break;
+        }
+      }
+      if (index == info.length) {
+        throw new IOException("invalid datanodeUuid " + datanodeUuid);
+      }
+    }
+    String nodename = info[index].getXferAddr();
+    LOG.info("Taking node: " + nodename + " out of service");
+
+    ArrayList<String> decommissionNodes = new ArrayList<String>();
+    if (decommissionedNodes != null) {
+      for (DatanodeInfo dn : decommissionedNodes) {
+        decommissionNodes.add(dn.getName());
+      }
+    }
+    Map<String, Long> maintenanceNodes = new HashMap<>();
+    if (inMaintenanceNodes != null) {
+      for (Map.Entry<DatanodeInfo, Long> dn :
+          inMaintenanceNodes.entrySet()) {
+        maintenanceNodes.put(dn.getKey().getName(), dn.getValue());
+      }
+    }
+
+    if (isDecommissionRequest) {
+      decommissionNodes.add(nodename);
+    } else {
+      maintenanceNodes.put(nodename, maintenanceExpirationInMS);
+    }
+
+    // write node names into the json host file.
+    hostsFileWriter.initOutOfServiceHosts(decommissionNodes, maintenanceNodes);
+    refreshNodes(nnIndex);
+    DatanodeInfo ret = NameNodeAdapter.getDatanode(
+        cluster.getNamesystem(nnIndex), info[index]);
+    waitNodeState(ret, waitForState);
+    return ret;
+  }
+
+  /* Ask a specific NN to put the datanode in service and wait for it
+   * to reach the NORMAL state.
+   */
+  protected void putNodeInService(int nnIndex,
+      DatanodeInfo outOfServiceNode) throws IOException {
+    LOG.info("Putting node: " + outOfServiceNode + " in service");
+    ArrayList<String> decommissionNodes = new ArrayList<>();
+    Map<String, Long> maintenanceNodes = new HashMap<>();
+
+    DatanodeManager dm =
+        cluster.getNamesystem(nnIndex).getBlockManager().getDatanodeManager();
+    List<DatanodeDescriptor> nodes =
+        dm.getDatanodeListForReport(DatanodeReportType.ALL);
+    for (DatanodeDescriptor node : nodes) {
+      if (node.isMaintenance()) {
+        maintenanceNodes.put(node.getName(),
+            node.getMaintenanceExpireTimeInMS());
+      } else if (node.isDecommissionInProgress() || node.isDecommissioned()) {
+        decommissionNodes.add(node.getName());
+      }
+    }
+    decommissionNodes.remove(outOfServiceNode.getName());
+    maintenanceNodes.remove(outOfServiceNode.getName());
+
+    hostsFileWriter.initOutOfServiceHosts(decommissionNodes, maintenanceNodes);
+    refreshNodes(nnIndex);
+    waitNodeState(outOfServiceNode, AdminStates.NORMAL);
+  }
+
+  protected void putNodeInService(int nnIndex,
+      String datanodeUuid) throws IOException {
+    DatanodeInfo datanodeInfo =
+        getDatanodeDesriptor(cluster.getNamesystem(nnIndex), datanodeUuid);
+    putNodeInService(nnIndex, datanodeInfo);
+  }
+
+  /*
+   * Wait till node is transitioned to the expected state.
+   */
+  protected void waitNodeState(DatanodeInfo node,
+      AdminStates state) {
+    boolean done = state == node.getAdminState();
+    while (!done) {
+      LOG.info("Waiting for node " + node + " to change state to "
+          + state + " current state: " + node.getAdminState());
+      try {
+        Thread.sleep(HEARTBEAT_INTERVAL * 500);
+      } catch (InterruptedException e) {
+        // nothing
+      }
+      done = state == node.getAdminState();
+    }
+    LOG.info("node " + node + " reached the state " + state);
+  }
+
+  protected void initIncludeHost(String hostNameAndPort) throws IOException {
+    hostsFileWriter.initIncludeHost(hostNameAndPort);
+  }
+
+  protected void initIncludeHosts(String[] hostNameAndPorts)
+      throws IOException {
+    hostsFileWriter.initIncludeHosts(hostNameAndPorts);
+  }
+
+  protected void initExcludeHost(String hostNameAndPort) throws IOException {
+    hostsFileWriter.initExcludeHost(hostNameAndPort);
+  }
+
+  protected void initExcludeHosts(List<String> hostNameAndPorts)
+      throws IOException {
+    hostsFileWriter.initExcludeHosts(hostNameAndPorts);
+  }
+
+  /* Get DFSClient to the namenode */
+  protected DFSClient getDfsClient(final int nnIndex) throws IOException {
+    return new DFSClient(cluster.getNameNode(nnIndex).getNameNodeAddress(),
+        conf);
+  }
+
+  /* Validate cluster has expected number of datanodes */
+  protected static void validateCluster(DFSClient client, int numDNs)
+      throws IOException {
+    DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
+    assertEquals("Number of Datanodes ", numDNs, info.length);
+  }
+
+  /** Start a MiniDFSCluster.
+   * @throws IOException */
+  protected void startCluster(int numNameNodes, int numDatanodes,
+      boolean setupHostsFile, long[] nodesCapacity,
+      boolean checkDataNodeHostConfig) throws IOException {
+    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf)
+        .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(numNameNodes))
+        .numDataNodes(numDatanodes);
+    if (setupHostsFile) {
+      builder.setupHostsFile(setupHostsFile);
+    }
+    if (nodesCapacity != null) {
+      builder.simulatedCapacities(nodesCapacity);
+    }
+    if (checkDataNodeHostConfig) {
+      builder.checkDataNodeHostConfig(checkDataNodeHostConfig);
+    }
+    cluster = builder.build();
+    cluster.waitActive();
+    for (int i = 0; i < numNameNodes; i++) {
+      DFSClient client = getDfsClient(i);
+      validateCluster(client, numDatanodes);
+    }
+  }
+
+  protected void startCluster(int numNameNodes, int numDatanodes)
+      throws IOException {
+    startCluster(numNameNodes, numDatanodes, false, null, false);
+  }
+
+  protected void startSimpleHACluster(int numDatanodes) throws IOException {
+    cluster = new MiniDFSCluster.Builder(conf)
+        .nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(
+        numDatanodes).build();
+    cluster.transitionToActive(0);
+    cluster.waitActive();
+  }
+
+  protected void shutdownCluster() {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  protected void refreshNodes(final int nnIndex) throws IOException {
+    cluster.getNamesystem(nnIndex).getBlockManager().getDatanodeManager().
+        refreshNodes(conf);
+  }
+
+  protected DatanodeDescriptor getDatanodeDesriptor(
+      final FSNamesystem ns, final String datanodeUuid) {
+    return ns.getBlockManager().getDatanodeManager().getDatanode(datanodeUuid);
+  }
+
+  protected void cleanupFile(FileSystem fileSys, Path name) throws IOException {
+    assertTrue(fileSys.exists(name));
+    fileSys.delete(name, true);
+    assertTrue(!fileSys.exists(name));
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[11/19] hadoop git commit: HADOOP-13357. Modify common to use hadoop_subcommand_opts

Posted by aw...@apache.org.
HADOOP-13357. Modify common to use hadoop_subcommand_opts


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9b621743
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9b621743
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9b621743

Branch: refs/heads/HADOOP-13341
Commit: 9b6217437f6aa46ce171458161c62af4ec815851
Parents: b20542f
Author: Allen Wittenauer <aw...@apache.org>
Authored: Tue Aug 30 12:42:32 2016 -0700
Committer: Allen Wittenauer <aw...@apache.org>
Committed: Wed Aug 31 08:04:44 2016 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/src/main/bin/hadoop | 5 +++++
 1 file changed, 5 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b621743/hadoop-common-project/hadoop-common/src/main/bin/hadoop
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop b/hadoop-common-project/hadoop-common/src/main/bin/hadoop
index 4aae621..9b682e6 100755
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop
@@ -208,8 +208,13 @@ if [[ ${HADOOP_WORKER_MODE} = true ]]; then
   exit $?
 fi
 
+hadoop_subcommand_opts "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"
+
 if [[ "${HADOOP_SUBCMD_SECURESERVICE}" = true ]]; then
   HADOOP_SECURE_USER="${HADOOP_SUBCMD_SECUREUSER}"
+
+  hadoop_subcommand_secure_opts "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"
+
   hadoop_verify_secure_prereq
   hadoop_setup_secure_service
   priv_outfile="${HADOOP_LOG_DIR}/privileged-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.out"


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[15/19] hadoop git commit: HADOOP-13360. Documentation for HADOOP_subcommand_OPTS

Posted by aw...@apache.org.
HADOOP-13360. Documentation for HADOOP_subcommand_OPTS

Signed-off-by: Allen Wittenauer <aw...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/536ad247
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/536ad247
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/536ad247

Branch: refs/heads/HADOOP-13341
Commit: 536ad247bf485d12f7ec1153dc2ae8ba9c198fbe
Parents: 8165de4
Author: Allen Wittenauer <aw...@apache.org>
Authored: Wed Aug 31 07:39:34 2016 -0700
Committer: Allen Wittenauer <aw...@apache.org>
Committed: Wed Aug 31 08:04:44 2016 -0700

----------------------------------------------------------------------
 .../src/site/markdown/ClusterSetup.md           | 19 ++++-------
 .../src/site/markdown/UnixShellGuide.md         | 34 +++++++++++++++++---
 .../src/site/markdown/HdfsNfsGateway.md         |  2 +-
 3 files changed, 37 insertions(+), 18 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/536ad247/hadoop-common-project/hadoop-common/src/site/markdown/ClusterSetup.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/ClusterSetup.md b/hadoop-common-project/hadoop-common/src/site/markdown/ClusterSetup.md
index 0d551b1..f222769 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/ClusterSetup.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/ClusterSetup.md
@@ -64,17 +64,17 @@ Administrators can configure individual daemons using the configuration options
 
 | Daemon | Environment Variable |
 |:---- |:---- |
-| NameNode | HADOOP\_NAMENODE\_OPTS |
-| DataNode | HADOOP\_DATANODE\_OPTS |
-| Secondary NameNode | HADOOP\_SECONDARYNAMENODE\_OPTS |
+| NameNode | HDFS\_NAMENODE\_OPTS |
+| DataNode | HDFS\_DATANODE\_OPTS |
+| Secondary NameNode | HDFS\_SECONDARYNAMENODE\_OPTS |
 | ResourceManager | YARN\_RESOURCEMANAGER\_OPTS |
 | NodeManager | YARN\_NODEMANAGER\_OPTS |
 | WebAppProxy | YARN\_PROXYSERVER\_OPTS |
-| Map Reduce Job History Server | HADOOP\_JOB\_HISTORYSERVER\_OPTS |
+| Map Reduce Job History Server | MAPRED\_HISTORYSERVER\_OPTS |
 
-For example, To configure Namenode to use parallelGC, the following statement should be added in hadoop-env.sh :
+For example, To configure Namenode to use parallelGC and a 4GB Java Heap, the following statement should be added in hadoop-env.sh :
 
-      export HADOOP_NAMENODE_OPTS="-XX:+UseParallelGC"
+      export HDFS_NAMENODE_OPTS="-XX:+UseParallelGC -Xmx4g"
 
 See `etc/hadoop/hadoop-env.sh` for other examples.
 
@@ -91,13 +91,6 @@ It is also traditional to configure `HADOOP_HOME` in the system-wide shell envir
       HADOOP_HOME=/path/to/hadoop
       export HADOOP_HOME
 
-| Daemon | Environment Variable |
-|:---- |:---- |
-| ResourceManager | YARN\_RESOURCEMANAGER\_HEAPSIZE |
-| NodeManager | YARN\_NODEMANAGER\_HEAPSIZE |
-| WebAppProxy | YARN\_PROXYSERVER\_HEAPSIZE |
-| Map Reduce Job History Server | HADOOP\_JOB\_HISTORYSERVER\_HEAPSIZE |
-
 ### Configuring the Hadoop Daemons
 
 This section deals with important parameters to be specified in the given configuration files:

http://git-wip-us.apache.org/repos/asf/hadoop/blob/536ad247/hadoop-common-project/hadoop-common/src/site/markdown/UnixShellGuide.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/UnixShellGuide.md b/hadoop-common-project/hadoop-common/src/site/markdown/UnixShellGuide.md
index 940627d..b130f0f 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/UnixShellGuide.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/UnixShellGuide.md
@@ -24,7 +24,7 @@ Apache Hadoop has many environment variables that control various aspects of the
 
 ### `HADOOP_CLIENT_OPTS`
 
-This environment variable is used for almost all end-user operations.  It can be used to set any Java options as well as any Apache Hadoop options via a system property definition. For example:
+This environment variable is used for all end-user, non-daemon operations.  It can be used to set any Java options as well as any Apache Hadoop options via a system property definition. For example:
 
 ```bash
 HADOOP_CLIENT_OPTS="-Xmx1g -Dhadoop.socks.server=localhost:4000" hadoop fs -ls /tmp
@@ -32,6 +32,18 @@ HADOOP_CLIENT_OPTS="-Xmx1g -Dhadoop.socks.server=localhost:4000" hadoop fs -ls /
 
 will increase the memory and send this command via a SOCKS proxy server.
 
+### `(command)_(subcommand)_OPTS`
+
+It is also possible to set options on a per subcommand basis.  This allows for one to create special options for particular cases.  The first part of the pattern is the command being used, but all uppercase.  The second part of the command is the subcommand being used.  Then finally followed by the string `_OPT`.
+
+For example, to configure `mapred distcp` to use a 2GB heap, one would use:
+
+```bash
+MAPRED_DISTCP_OPTS="-Xmx2g"
+```
+
+These options will appear *after* `HADOOP_CLIENT_OPTS` during execution and will generally take precedence.
+
 ### `HADOOP_CLASSPATH`
 
   NOTE: Site-wide settings should be configured via a shellprofile entry and permanent user-wide settings should be configured via ${HOME}/.hadooprc using the `hadoop_add_classpath` function. See below for more information.
@@ -56,6 +68,8 @@ For example:
 #
 
 HADOOP_CLIENT_OPTS="-Xmx1g"
+MAPRED_DISTCP_OPTS="-Xmx2g"
+HADOOP_DISTCP_OPTS="-Xmx2g"
 ```
 
 The `.hadoop-env` file can also be used to extend functionality and teach Apache Hadoop new tricks.  For example, to run hadoop commands accessing the server referenced in the environment variable `${HADOOP_SERVER}`, the following in the `.hadoop-env` will do just that:
@@ -71,11 +85,23 @@ One word of warning:  not all of Unix Shell API routines are available or work c
 
 ## Administrator Environment
 
-There are many environment variables that impact how the system operates.  By far, the most important are the series of `_OPTS` variables that control how daemons work.  These variables should contain all of the relevant settings for those daemons.
+In addition to the various XML files, there are two key capabilities for administrators to configure Apache Hadoop when using the Unix Shell:
+
+  * Many environment variables that impact how the system operates.  This guide will only highlight some key ones.  There is generally more information in the various `*-env.sh` files.
+
+  * Supplement or do some platform-specific changes to the existing scripts.  Apache Hadoop provides the capabilities to do function overrides so that the existing code base may be changed in place without all of that work.  Replacing functions is covered later under the Shell API documentation.
+
+### `(command)_(subcommand)_OPTS`
+
+By far, the most important are the series of `_OPTS` variables that control how daemons work.  These variables should contain all of the relevant settings for those daemons.
+
+Similar to the user commands above, all daemons will honor the `(command)_(subcommand)_OPTS` pattern.  It is generally recommended that these be set in `hadoop-env.sh` to guarantee that the system will know which settings it should use on restart.  Unlike user-facing subcommands, daemons will *NOT* honor `HADOOP_CLIENT_OPTS`.
+
+In addition, daemons that run in an extra security mode also support `(command)_(subcommand)_SECURE_EXTRA_OPTS`.  These options are *supplemental* to the generic `*_OPTS` and will appear after, therefore generally taking precedence.
 
-More, detailed information is contained in `hadoop-env.sh` and the other env.sh files.
+### `(command)_(subcommand)_USER`
 
-Advanced administrators may wish to supplement or do some platform-specific fixes to the existing scripts.  In some systems, this means copying the errant script or creating a custom build with these changes.  Apache Hadoop provides the capabilities to do function overrides so that the existing code base may be changed in place without all of that work.  Replacing functions is covered later under the Shell API documentation.
+Apache Hadoop provides a way to do a user check per-subcommand.  While this method is easily circumvented and should not be considered a security-feature, it does provide a mechanism by which to prevent accidents.  For example, setting `HDFS_NAMENODE_USER=hdfs` will make the `hdfs namenode` and `hdfs --daemon start namenode` commands verify that the user running the commands are the hdfs user by checking the `USER` environment variable.  This also works for non-daemons.  Setting `HADOOP_DISTCP_USER=jane` will verify that `USER` is set to `jane` before being allowed to execute the `hadoop distcp` command.
 
 ## Developer and Advanced Administrator Environment
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/536ad247/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md
index 6731189..4742637 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md
@@ -183,7 +183,7 @@ It's strongly recommended for the users to update a few configuration properties
         </property>
 
 *   JVM and log settings. You can export JVM settings (e.g., heap size and GC log) in
-    HADOOP\_NFS3\_OPTS. More NFS related settings can be found in hadoop-env.sh.
+    HDFS\_NFS3\_OPTS. More NFS related settings can be found in hadoop-env.sh.
     To get NFS debug trace, you can edit the log4j.property file
     to add the following. Note, debug trace, especially for ONCRPC, can be very verbose.
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[08/19] hadoop git commit: HDFS-10813. DiskBalancer: Add the getNodeList method in Command. Contributed by Yiqun Lin.

Posted by aw...@apache.org.
HDFS-10813. DiskBalancer: Add the getNodeList method in Command. Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/20ae1fa2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/20ae1fa2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/20ae1fa2

Branch: refs/heads/HADOOP-13341
Commit: 20ae1fa259b36a7bc11b0f8de1ebf753c858f93c
Parents: d6d9cff
Author: Anu Engineer <ae...@apache.org>
Authored: Tue Aug 30 18:42:55 2016 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Tue Aug 30 18:42:55 2016 -0700

----------------------------------------------------------------------
 .../server/diskbalancer/command/Command.java    | 44 +++++++++++++++++++-
 .../command/TestDiskBalancerCommand.java        | 22 ++++++++++
 2 files changed, 65 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/20ae1fa2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
index a1c15ae..5acd0ac 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
@@ -18,7 +18,10 @@
 
 package org.apache.hadoop.hdfs.server.diskbalancer.command;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.Option;
 import org.apache.commons.lang.StringUtils;
@@ -221,7 +224,7 @@ public abstract class Command extends Configured {
    * @return Set of node names
    * @throws IOException
    */
-  private Set<String> getNodeList(String listArg) throws IOException {
+  protected Set<String> getNodeList(String listArg) throws IOException {
     URL listURL;
     String nodeData;
     Set<String> resultSet = new TreeSet<>();
@@ -243,6 +246,37 @@ public abstract class Command extends Configured {
   }
 
   /**
+   * Returns a DiskBalancer Node list from the Cluster or null if not found.
+   *
+   * @param listArg String File URL or a comma separated list of node names.
+   * @return List of DiskBalancer Node
+   * @throws IOException
+   */
+  protected List<DiskBalancerDataNode> getNodes(String listArg)
+      throws IOException {
+    Set<String> nodeNames = null;
+    List<DiskBalancerDataNode> nodeList = Lists.newArrayList();
+
+    if ((listArg == null) || listArg.isEmpty()) {
+      return nodeList;
+    }
+    nodeNames = getNodeList(listArg);
+
+    DiskBalancerDataNode node = null;
+    if (!nodeNames.isEmpty()) {
+      for (String name : nodeNames) {
+        node = getNode(name);
+
+        if (node != null) {
+          nodeList.add(node);
+        }
+      }
+    }
+
+    return nodeList;
+  }
+
+  /**
    * Verifies if the command line options are sane.
    *
    * @param commandName - Name of the command
@@ -471,4 +505,12 @@ public abstract class Command extends Configured {
   public int getTopNodes() {
     return topNodes;
   }
+
+  /**
+   * Set DiskBalancer cluster
+   */
+  @VisibleForTesting
+  public void setCluster(DiskBalancerCluster newCluster) {
+    this.cluster = newCluster;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/20ae1fa2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
index 0d24f28..7d659af 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector;
 import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ConnectorFactory;
 import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster;
+import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -435,4 +436,25 @@ public class TestDiskBalancerCommand {
       miniDFSCluster.shutdown();
     }
   }
+
+  @Test(timeout = 60000)
+  public void testGetNodeList() throws Exception {
+    ClusterConnector jsonConnector =
+        ConnectorFactory.getCluster(clusterJson, conf);
+    DiskBalancerCluster diskBalancerCluster =
+        new DiskBalancerCluster(jsonConnector);
+    diskBalancerCluster.readClusterInfo();
+
+    int nodeNum = 5;
+    StringBuilder listArg = new StringBuilder();
+    for (int i = 0; i < nodeNum; i++) {
+      listArg.append(diskBalancerCluster.getNodes().get(i).getDataNodeUUID())
+          .append(",");
+    }
+
+    ReportCommand command = new ReportCommand(conf, null);
+    command.setCluster(diskBalancerCluster);
+    List<DiskBalancerDataNode> nodeList = command.getNodes(listArg.toString());
+    assertEquals(nodeNum, nodeList.size());
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[09/19] hadoop git commit: HADOOP-13358. Modify HDFS to use hadoop_subcommand_opts

Posted by aw...@apache.org.
HADOOP-13358. Modify HDFS to use hadoop_subcommand_opts

Signed-off-by: Allen Wittenauer <aw...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b20542fe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b20542fe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b20542fe

Branch: refs/heads/HADOOP-13341
Commit: b20542fec6c7ced3d154edbcd9ef99b7ab9842ff
Parents: a5a79f7
Author: Allen Wittenauer <aw...@apache.org>
Authored: Sun Aug 28 10:45:00 2016 -0700
Committer: Allen Wittenauer <aw...@apache.org>
Committed: Wed Aug 31 08:04:44 2016 -0700

----------------------------------------------------------------------
 .../src/main/bin/hadoop-functions.sh            |  1 +
 .../hadoop-common/src/main/conf/hadoop-env.sh   | 28 +++++++++----------
 .../hadoop-hdfs/src/main/bin/hdfs               | 29 ++++----------------
 .../hadoop-hdfs/src/main/bin/hdfs-config.sh     | 28 ++++++++++++-------
 4 files changed, 38 insertions(+), 48 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b20542fe/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
index 695fd41..db868bd 100755
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
@@ -2048,6 +2048,7 @@ function hadoop_subcommand_opts
     return 0
   fi
 }
+
 ## @description  Add custom (program)_(command)_SECURE_EXTRA_OPTS to HADOOP_OPTS.
 ## @description  This *does not* handle the pre-3.x deprecated cases
 ## @audience     public

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b20542fe/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
index f4493f1..4656f4d 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
+++ b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
@@ -294,16 +294,16 @@ esac
 # and therefore may override any similar flags set in HADOOP_OPTS
 #
 # a) Set JMX options
-# export HADOOP_NAMENODE_OPTS="-Dcom.sun.management.jmxremote=true -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=1026"
+# export HDFS_NAMENODE_OPTS="-Dcom.sun.management.jmxremote=true -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=1026"
 #
 # b) Set garbage collection logs
-# export HADOOP_NAMENODE_OPTS="${HADOOP_GC_SETTINGS} -Xloggc:${HADOOP_LOG_DIR}/gc-rm.log-$(date +'%Y%m%d%H%M')"
+# export HDFS_NAMENODE_OPTS="${HADOOP_GC_SETTINGS} -Xloggc:${HADOOP_LOG_DIR}/gc-rm.log-$(date +'%Y%m%d%H%M')"
 #
 # c) ... or set them directly
-# export HADOOP_NAMENODE_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xloggc:${HADOOP_LOG_DIR}/gc-rm.log-$(date +'%Y%m%d%H%M')"
+# export HDFS_NAMENODE_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xloggc:${HADOOP_LOG_DIR}/gc-rm.log-$(date +'%Y%m%d%H%M')"
 
 # this is the default:
-# export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=INFO,RFAS"
+# export HDFS_NAMENODE_OPTS="-Dhadoop.security.logger=INFO,RFAS"
 
 ###
 # SecondaryNameNode specific parameters
@@ -313,7 +313,7 @@ esac
 # and therefore may override any similar flags set in HADOOP_OPTS
 #
 # This is the default:
-# export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=INFO,RFAS"
+# export HDFS_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=INFO,RFAS"
 
 ###
 # DataNode specific parameters
@@ -323,7 +323,7 @@ esac
 # and therefore may override any similar flags set in HADOOP_OPTS
 #
 # This is the default:
-# export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS"
+# export HDFS_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS"
 
 # On secure datanodes, user to run the datanode as after dropping privileges.
 # This **MUST** be uncommented to enable secure HDFS if using privileged ports
@@ -336,7 +336,7 @@ esac
 # Supplemental options for secure datanodes
 # By default, Hadoop uses jsvc which needs to know to launch a
 # server jvm.
-# export HADOOP_DN_SECURE_EXTRA_OPTS="-jvm server"
+# export HDFS_DATANODE_SECURE_EXTRA_OPTS="-jvm server"
 
 # Where datanode log files are stored in the secure data environment.
 # This will replace the hadoop.log.dir Java property in secure mode.
@@ -352,18 +352,18 @@ esac
 # These options will be appended to the options specified as HADOOP_OPTS
 # and therefore may override any similar flags set in HADOOP_OPTS
 #
-# export HADOOP_NFS3_OPTS=""
+# export HDFS_NFS3_OPTS=""
 
 # Specify the JVM options to be used when starting the Hadoop portmapper.
 # These options will be appended to the options specified as HADOOP_OPTS
 # and therefore may override any similar flags set in HADOOP_OPTS
 #
-# export HADOOP_PORTMAP_OPTS="-Xmx512m"
+# export HDFS_PORTMAP_OPTS="-Xmx512m"
 
 # Supplemental options for priviliged gateways
 # By default, Hadoop uses jsvc which needs to know to launch a
 # server jvm.
-# export HADOOP_NFS3_SECURE_EXTRA_OPTS="-jvm server"
+# export HDFS_NFS3_SECURE_EXTRA_OPTS="-jvm server"
 
 # On privileged gateways, user to run the gateway as after dropping privileges
 # This will replace the hadoop.id.str Java property in secure mode.
@@ -376,7 +376,7 @@ esac
 # These options will be appended to the options specified as HADOOP_OPTS
 # and therefore may override any similar flags set in HADOOP_OPTS
 #
-# export HADOOP_ZKFC_OPTS=""
+# export HDFS_ZKFC_OPTS=""
 
 ###
 # QuorumJournalNode specific parameters
@@ -385,7 +385,7 @@ esac
 # These options will be appended to the options specified as HADOOP_OPTS
 # and therefore may override any similar flags set in HADOOP_OPTS
 #
-# export HADOOP_JOURNALNODE_OPTS=""
+# export HDFS_JOURNALNODE_OPTS=""
 
 ###
 # HDFS Balancer specific parameters
@@ -394,7 +394,7 @@ esac
 # These options will be appended to the options specified as HADOOP_OPTS
 # and therefore may override any similar flags set in HADOOP_OPTS
 #
-# export HADOOP_BALANCER_OPTS=""
+# export HDFS_BALANCER_OPTS=""
 
 ###
 # HDFS Mover specific parameters
@@ -403,7 +403,7 @@ esac
 # These options will be appended to the options specified as HADOOP_OPTS
 # and therefore may override any similar flags set in HADOOP_OPTS
 #
-# export HADOOP_MOVER_OPTS=""
+# export HDFS_MOVER_OPTS=""
 
 ###
 # Advanced Users Only!

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b20542fe/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
index 41b3c12..b704b00 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
@@ -79,8 +79,6 @@ function hdfscmd_case
     balancer)
       HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
       HADOOP_CLASSNAME=org.apache.hadoop.hdfs.server.balancer.Balancer
-      hadoop_debug "Appending HADOOP_BALANCER_OPTS onto HADOOP_OPTS"
-      HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_BALANCER_OPTS}"
     ;;
     cacheadmin)
       HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.CacheAdmin
@@ -103,13 +101,8 @@ function hdfscmd_case
         HADOOP_SECURE_PID_DIR="${HADOOP_SECURE_PID_DIR:-$HADOOP_SECURE_DN_PID_DIR}"
         HADOOP_SECURE_LOG_DIR="${HADOOP_SECURE_LOG_DIR:-$HADOOP_SECURE_DN_LOG_DIR}"
 
-        hadoop_debug "Appending HADOOP_DATANODE_OPTS onto HADOOP_OPTS"
-        hadoop_debug "Appending HADOOP_DN_SECURE_EXTRA_OPTS onto HADOOP_OPTS"
-        HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_DATANODE_OPTS} ${HADOOP_DN_SECURE_EXTRA_OPTS}"
         HADOOP_CLASSNAME="org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter"
       else
-        hadoop_debug "Appending HADOOP_DATANODE_OPTS onto HADOOP_OPTS"
-        HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_DATANODE_OPTS}"
         HADOOP_CLASSNAME='org.apache.hadoop.hdfs.server.datanode.DataNode'
       fi
     ;;
@@ -157,8 +150,6 @@ function hdfscmd_case
     journalnode)
       HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
       HADOOP_CLASSNAME='org.apache.hadoop.hdfs.qjournal.server.JournalNode'
-      hadoop_debug "Appending HADOOP_JOURNALNODE_OPTS onto HADOOP_OPTS"
-      HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_JOURNALNODE_OPTS}"
     ;;
     jmxget)
       HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.JMXGet
@@ -169,14 +160,10 @@ function hdfscmd_case
     mover)
       HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
       HADOOP_CLASSNAME=org.apache.hadoop.hdfs.server.mover.Mover
-      hadoop_debug "Appending HADOOP_MOVER_OPTS onto HADOOP_OPTS"
-      HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_MOVER_OPTS}"
     ;;
     namenode)
       HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
       HADOOP_CLASSNAME='org.apache.hadoop.hdfs.server.namenode.NameNode'
-      hadoop_debug "Appending HADOOP_NAMENODE_OPTS onto HADOOP_OPTS"
-      HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_NAMENODE_OPTS}"
       hadoop_add_param HADOOP_OPTS hdfs.audit.logger "-Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER}"
     ;;
     nfs3)
@@ -189,13 +176,8 @@ function hdfscmd_case
         HADOOP_SECURE_PID_DIR="${HADOOP_SECURE_PID_DIR:-$HADOOP_SECURE_NFS3_PID_DIR}"
         HADOOP_SECURE_LOG_DIR="${HADOOP_SECURE_LOG_DIR:-$HADOOP_SECURE_NFS3_LOG_DIR}"
 
-        hadoop_debug "Appending HADOOP_NFS3_OPTS onto HADOOP_OPTS"
-        hadoop_debug "Appending HADOOP_NFS3_SECURE_EXTRA_OPTS onto HADOOP_OPTS"
-        HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_NFS3_OPTS} ${HADOOP_NFS3_SECURE_EXTRA_OPTS}"
         HADOOP_CLASSNAME=org.apache.hadoop.hdfs.nfs.nfs3.PrivilegedNfsGatewayStarter
       else
-        hadoop_debug "Appending HADOOP_NFS3_OPTS onto HADOOP_OPTS"
-        HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_NFS3_OPTS}"
         HADOOP_CLASSNAME=org.apache.hadoop.hdfs.nfs.nfs3.Nfs3
       fi
     ;;
@@ -211,14 +193,10 @@ function hdfscmd_case
     portmap)
       HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
       HADOOP_CLASSNAME=org.apache.hadoop.portmap.Portmap
-      hadoop_debug "Appending HADOOP_PORTMAP_OPTS onto HADOOP_OPTS"
-      HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_PORTMAP_OPTS}"
     ;;
     secondarynamenode)
       HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
       HADOOP_CLASSNAME='org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode'
-      hadoop_debug "Appending HADOOP_SECONDARYNAMENODE_OPTS onto HADOOP_OPTS"
-      HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_SECONDARYNAMENODE_OPTS}"
       hadoop_add_param HADOOP_OPTS hdfs.audit.logger "-Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER}"
     ;;
     snapshotDiff)
@@ -233,8 +211,6 @@ function hdfscmd_case
     zkfc)
       HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
       HADOOP_CLASSNAME='org.apache.hadoop.hdfs.tools.DFSZKFailoverController'
-      hadoop_debug "Appending HADOOP_ZKFC_OPTS onto HADOOP_OPTS"
-      HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_ZKFC_OPTS}"
     ;;
     *)
       HADOOP_CLASSNAME="${subcmd}"
@@ -288,8 +264,13 @@ if [[ ${HADOOP_WORKER_MODE} = true ]]; then
   exit $?
 fi
 
+hadoop_subcommand_opts "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"
+
 if [[ "${HADOOP_SUBCMD_SECURESERVICE}" = true ]]; then
   HADOOP_SECURE_USER="${HADOOP_SUBCMD_SECUREUSER}"
+
+  hadoop_subcommand_secure_opts "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"
+
   hadoop_verify_secure_prereq
   hadoop_setup_secure_service
   priv_outfile="${HADOOP_LOG_DIR}/privileged-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.out"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b20542fe/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs-config.sh
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs-config.sh b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs-config.sh
old mode 100644
new mode 100755
index d440210..cba37a4
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs-config.sh
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs-config.sh
@@ -26,7 +26,7 @@ function hadoop_subproject_init
       export HADOOP_HDFS_ENV_PROCESSED=true
     fi
   fi
-  
+
   # at some point in time, someone thought it would be a good idea to
   # create separate vars for every subproject.  *sigh*
   # let's perform some overrides and setup some defaults for bw compat
@@ -42,23 +42,31 @@ function hadoop_subproject_init
   hadoop_deprecate_envvar HADOOP_HDFS_NICENESS HADOOP_NICENESS
 
   hadoop_deprecate_envvar HADOOP_HDFS_STOP_TIMEOUT HADOOP_STOP_TIMEOUT
-  
+
   hadoop_deprecate_envvar HADOOP_HDFS_PID_DIR HADOOP_PID_DIR
 
   hadoop_deprecate_envvar HADOOP_HDFS_ROOT_LOGGER HADOOP_ROOT_LOGGER
 
   hadoop_deprecate_envvar HADOOP_HDFS_IDENT_STRING HADOOP_IDENT_STRING
-  
+
+  hadoop_deprecate_envvar HADOOP_DN_SECURE_EXTRA_OPTS HDFS_DATANODE_SECURE_EXTRA_OPTS
+
+  hadoop_deprecate_envvar HADOOP_NFS3_SECURE_EXTRA_OPTS HDFS_NFS3_SECURE_EXTRA_OPTS
+
+
   HADOOP_HDFS_HOME="${HADOOP_HDFS_HOME:-$HADOOP_HOME}"
-  
+
   # turn on the defaults
   export HDFS_AUDIT_LOGGER=${HDFS_AUDIT_LOGGER:-INFO,NullAppender}
-  export HADOOP_NAMENODE_OPTS=${HADOOP_NAMENODE_OPTS:-"-Dhadoop.security.logger=INFO,RFAS"}
-  export HADOOP_SECONDARYNAMENODE_OPTS=${HADOOP_SECONDARYNAMENODE_OPTS:-"-Dhadoop.security.logger=INFO,RFAS"}
-  export HADOOP_DATANODE_OPTS=${HADOOP_DATANODE_OPTS:-"-Dhadoop.security.logger=ERROR,RFAS"}
-  export HADOOP_DN_SECURE_EXTRA_OPTS=${HADOOP_DN_SECURE_EXTRA_OPTS:-"-jvm server"}
-  export HADOOP_NFS3_SECURE_EXTRA_OPTS=${HADOOP_NFS3_SECURE_EXTRA_OPTS:-"-jvm server"}
-  export HADOOP_PORTMAP_OPTS=${HADOOP_PORTMAP_OPTS:-"-Xmx512m"}
+  export HDFS_NAMENODE_OPTS=${HDFS_NAMENODE_OPTS:-"-Dhadoop.security.logger=INFO,RFAS"}
+  export HDFS_SECONDARYNAMENODE_OPTS=${HDFS_SECONDARYNAMENODE_OPTS:-"-Dhadoop.security.logger=INFO,RFAS"}
+  export HDFS_DATANODE_OPTS=${HDFS_DATANODE_OPTS:-"-Dhadoop.security.logger=ERROR,RFAS"}
+  export HDFS_PORTMAP_OPTS=${HDFS_PORTMAP_OPTS:-"-Xmx512m"}
+
+  # depending upon what is being used to start Java, these may need to be
+  # set empty. (thus no colon)
+  export HDFS_DATANODE_SECURE_EXTRA_OPTS=${HDFS_DATANODE_SECURE_EXTRA_OPTS-"-jvm server"}
+  export HDFS_NFS3_SECURE_EXTRA_OPTS=${HDFS_NFS3_SECURE_EXTRA_OPTS-"-jvm server"}
 }
 
 if [[ -z "${HADOOP_LIBEXEC_DIR}" ]]; then


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[17/19] hadoop git commit: HADOOP-13359. Modify YARN to use hadoop_subcommand_opts

Posted by aw...@apache.org.
HADOOP-13359. Modify YARN to use hadoop_subcommand_opts


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/44500673
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/44500673
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/44500673

Branch: refs/heads/HADOOP-13341
Commit: 4450067361c5a57b66ab228a283405ede1fdb49f
Parents: 9b62174
Author: Allen Wittenauer <aw...@apache.org>
Authored: Tue Aug 30 13:03:45 2016 -0700
Committer: Allen Wittenauer <aw...@apache.org>
Committed: Wed Aug 31 08:04:44 2016 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/hadoop-yarn/bin/yarn | 17 +++++------------
 1 file changed, 5 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/44500673/hadoop-yarn-project/hadoop-yarn/bin/yarn
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn b/hadoop-yarn-project/hadoop-yarn/bin/yarn
index 66a87b6..d2a8a50 100755
--- a/hadoop-yarn-project/hadoop-yarn/bin/yarn
+++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn
@@ -110,8 +110,6 @@ function yarncmd_case
     nodemanager)
       HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
       HADOOP_CLASSNAME='org.apache.hadoop.yarn.server.nodemanager.NodeManager'
-      hadoop_debug "Append YARN_NODEMANAGER_OPTS onto HADOOP_OPTS"
-      HADOOP_OPTS="${HADOOP_OPTS} ${YARN_NODEMANAGER_OPTS}"
       # Backwards compatibility
       if [[ -n "${YARN_NODEMANAGER_HEAPSIZE}" ]]; then
         HADOOP_HEAPSIZE_MAX="${YARN_NODEMANAGER_HEAPSIZE}"
@@ -120,8 +118,6 @@ function yarncmd_case
     proxyserver)
       HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
       HADOOP_CLASSNAME='org.apache.hadoop.yarn.server.webproxy.WebAppProxyServer'
-      hadoop_debug "Append YARN_PROXYSERVER_OPTS onto HADOOP_OPTS"
-      HADOOP_OPTS="${HADOOP_OPTS} ${YARN_PROXYSERVER_OPTS}"
       # Backwards compatibility
       if [[ -n "${YARN_PROXYSERVER_HEAPSIZE}" ]]; then
         # shellcheck disable=SC2034
@@ -134,8 +130,6 @@ function yarncmd_case
     resourcemanager)
       HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
       HADOOP_CLASSNAME='org.apache.hadoop.yarn.server.resourcemanager.ResourceManager'
-      HADOOP_OPTS="${HADOOP_OPTS} ${YARN_RESOURCEMANAGER_OPTS}"
-      hadoop_debug "Append YARN_RESOURCEMANAGER_OPTS onto HADOOP_OPTS"
       # Backwards compatibility
       if [[ -n "${YARN_RESOURCEMANAGER_HEAPSIZE}" ]]; then
         # shellcheck disable=SC2034
@@ -151,20 +145,14 @@ function yarncmd_case
     sharedcachemanager)
       HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
       HADOOP_CLASSNAME='org.apache.hadoop.yarn.server.sharedcachemanager.SharedCacheManager'
-      hadoop_debug "Append YARN_SHAREDCACHEMANAGER_OPTS onto HADOOP_OPTS"
-      HADOOP_OPTS="${HADOOP_OPTS} ${YARN_SHAREDCACHEMANAGER_OPTS}"
     ;;
     timelinereader)
       HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
       HADOOP_CLASSNAME='org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderServer'
-      hadoop_debug "Append YARN_TIMELINEREADER_OPTS onto HADOOP_OPTS"
-      HADOOP_OPTS="${HADOOP_OPTS} ${YARN_TIMELINEREADER_OPTS}"
     ;;
     timelineserver)
       HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
       HADOOP_CLASSNAME='org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryServer'
-      hadoop_debug "Append YARN_TIMELINESERVER_OPTS onto HADOOP_OPTS"
-      HADOOP_OPTS="${HADOOP_OPTS} ${YARN_TIMELINESERVER_OPTS}"
       # Backwards compatibility
       if [[ -n "${YARN_TIMELINESERVER_HEAPSIZE}" ]]; then
         # shellcheck disable=SC2034
@@ -268,8 +256,13 @@ if [[ ${HADOOP_WORKER_MODE} = true ]]; then
   exit $?
 fi
 
+hadoop_subcommand_opts "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"
+
 if [[ "${HADOOP_SUBCMD_SECURESERVICE}" = true ]]; then
   HADOOP_SECURE_USER="${HADOOP_SUBCMD_SECUREUSER}"
+
+  hadoop_subcommand_secure_opts "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"
+
   hadoop_verify_secure_prereq
   hadoop_setup_secure_service
   priv_outfile="${HADOOP_LOG_DIR}/privileged-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.out"


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[18/19] hadoop git commit: HADOOP-13563. hadoop_subcommand_opts should print name not actual content during debug

Posted by aw...@apache.org.
HADOOP-13563. hadoop_subcommand_opts should print name not actual content during debug

Signed-off-by: Allen Wittenauer <aw...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8165de43
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8165de43
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8165de43

Branch: refs/heads/HADOOP-13341
Commit: 8165de431ede1cdd0497e045dfb0345b48371080
Parents: 403ffae
Author: Allen Wittenauer <aw...@apache.org>
Authored: Tue Aug 30 15:23:29 2016 -0700
Committer: Allen Wittenauer <aw...@apache.org>
Committed: Wed Aug 31 08:04:44 2016 -0700

----------------------------------------------------------------------
 .../hadoop-common/src/main/bin/hadoop-functions.sh               | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8165de43/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
index 91546d4..b6e2b59 100755
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
@@ -2057,7 +2057,7 @@ function hadoop_subcommand_opts
   fi
 
   if [[ -n ${!uvar} ]]; then
-    hadoop_debug "Appending ${!uvar} onto HADOOP_OPTS"
+    hadoop_debug "Appending ${uvar} onto HADOOP_OPTS"
     HADOOP_OPTS="${HADOOP_OPTS} ${!uvar}"
     return 0
   fi
@@ -2102,7 +2102,7 @@ function hadoop_subcommand_secure_opts
   uvar="${uprogram}_${ucommand}_SECURE_EXTRA_OPTS"
 
   if [[ -n ${!uvar} ]]; then
-    hadoop_debug "Appending ${!uvar} onto HADOOP_OPTS"
+    hadoop_debug "Appending ${uvar} onto HADOOP_OPTS"
     HADOOP_OPTS="${HADOOP_OPTS} ${!uvar}"
     return 0
   fi


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[12/19] hadoop git commit: HADOOP-13356. Add a function to handle command_subcommand_OPTS (aw)

Posted by aw...@apache.org.
HADOOP-13356. Add a function to handle command_subcommand_OPTS (aw)

Signed-off-by: Allen Wittenauer <aw...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fd361929
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fd361929
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fd361929

Branch: refs/heads/HADOOP-13341
Commit: fd3619290e30cd0a7cb7ffa8ac2032cb3453b75c
Parents: 20ae1fa
Author: Allen Wittenauer <aw...@apache.org>
Authored: Fri Jul 8 09:25:09 2016 -0700
Committer: Allen Wittenauer <aw...@apache.org>
Committed: Wed Aug 31 08:04:44 2016 -0700

----------------------------------------------------------------------
 .../src/main/bin/hadoop-functions.sh            | 62 ++++++++++++++++
 .../test/scripts/hadoop_subcommand_opts.bats    | 76 ++++++++++++++++++++
 2 files changed, 138 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd361929/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
index 75554f0..6e58dca 100755
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
@@ -1974,6 +1974,68 @@ function hadoop_verify_user
   fi
 }
 
+## @description  Add custom (program)_(command)_OPTS to HADOOP_OPTS.
+## @description  Also handles the deprecated cases from pre-3.x.
+## @audience     public
+## @stability    stable
+## @replaceable  yes
+## @param        program
+## @param        subcommand
+## @return       will exit on failure conditions
+function hadoop_subcommand_opts
+{
+  declare program=$1
+  declare command=$2
+  declare var
+  declare uvar
+  declare uprogram
+  declare ucommand
+
+  if [[ -z "${program}" || -z "${command}" ]]; then
+    return 1
+  fi
+
+  # bash 4 and up have built-in ways to upper and lower
+  # case the contents of vars.  This is faster than
+  # calling tr.
+
+  if [[ -z "${BASH_VERSINFO[0]}" ]] \
+     || [[ "${BASH_VERSINFO[0]}" -lt 4 ]]; then
+    uprogram=$(echo "${program}" | tr '[:lower:]' '[:upper:]')
+    ucommand=$(echo "${command}" | tr '[:lower:]' '[:upper:]')
+  else
+    uprogram=${program^^}
+    ucommand=${command^^}
+  fi
+
+  # HDFS_namenode_OPTS
+  # HADOOP_distcp_OPTS
+  # MAPRED_distcp_OPTS
+  # YARN_sharedcachemanger_OPTS
+  # ...
+  var="${uprogram}_${command}_OPTS"
+
+  # Let's handle all of the deprecation cases early
+  # HADOOP_NAMENODE_OPTS -> HDFS_namenode_OPTS
+  # YARN_RESOURCEMANAGER_OPTS -> YARN_resourcemanager_OPTS
+
+  uvar="${uprogram}_${ucommand}_OPTS"
+  if [[ -n ${!uvar} ]]; then
+    hadoop_deprecate_envvar "${uvar}" "${var}"
+  fi
+
+  uvar="HADOOP_${ucommand}_OPTS"
+  if [[ -n ${!uvar} ]]; then
+    hadoop_deprecate_envvar "${uvar}" "${var}"
+  fi
+
+  if [[ -n ${!var} ]]; then
+    hadoop_debug "Appending ${!var} onto HADOOP_OPTS"
+    HADOOP_OPTS="${HADOOP_OPTS} ${!var}"
+    return 0
+  fi
+}
+
 ## @description  Perform the 'hadoop classpath', etc subcommand with the given
 ## @description  parameters
 ## @audience     private

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd361929/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_subcommand_opts.bats
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_subcommand_opts.bats b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_subcommand_opts.bats
new file mode 100644
index 0000000..1fbf343
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_subcommand_opts.bats
@@ -0,0 +1,76 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load hadoop-functions_test_helper
+
+@test "hadoop_subcommand_opts (missing param)" {
+  HADOOP_OPTS="x"
+  run hadoop_subcommand_opts testvar
+  [ "${status}" = "1" ]
+}
+
+@test "hadoop_subcommand_opts (simple not exist)" {
+  HADOOP_OPTS="x"
+  hadoop_subcommand_opts hadoop subcommand
+  [ "${HADOOP_OPTS}" = "x" ]
+}
+
+@test "hadoop_subcommand_opts (hadoop simple exist)" {
+  HADOOP_OPTS="x"
+  HADOOP_test_OPTS="y"
+  hadoop_subcommand_opts hadoop test
+  echo "${HADOOP_OPTS}"
+  [ "${HADOOP_OPTS}" = "x y" ]
+}
+
+@test "hadoop_subcommand_opts (hadoop complex exist)" {
+  HADOOP_OPTS="x"
+  HADOOP_test_OPTS="y z"
+  hadoop_subcommand_opts hadoop test
+  echo "${HADOOP_OPTS}"
+  [ "${HADOOP_OPTS}" = "x y z" ]
+}
+
+@test "hadoop_subcommand_opts (hdfs simple exist)" {
+  HADOOP_OPTS="x"
+  HDFS_test_OPTS="y"
+  hadoop_subcommand_opts hdfs test
+  echo "${HADOOP_OPTS}"
+  [ "${HADOOP_OPTS}" = "x y" ]
+}
+
+@test "hadoop_subcommand_opts (yarn simple exist)" {
+  HADOOP_OPTS="x"
+  YARN_test_OPTS="y"
+  hadoop_subcommand_opts yarn test
+  echo "${HADOOP_OPTS}"
+  [ "${HADOOP_OPTS}" = "x y" ]
+}
+
+@test "hadoop_subcommand_opts (deprecation case #1)" {
+  HADOOP_OPTS="x"
+  HADOOP_NAMENODE_OPTS="y"
+  hadoop_subcommand_opts hdfs namenode
+  echo "${HADOOP_OPTS}"
+  [ "${HADOOP_OPTS}" = "x y" ]
+}
+
+@test "hadoop_subcommand_opts (deprecation case #2)" {
+  HADOOP_OPTS="x"
+  YARN_RESOURCEMANAGER_OPTS="y"
+  hadoop_subcommand_opts yarn resourcemanager
+  echo "${HADOOP_OPTS}"
+  [ "${HADOOP_OPTS}" = "x y" ]
+}
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[04/19] hadoop git commit: YARN-5221. Expose UpdateResourceRequest API to allow AM to request for change in container properties. (asuresh)

Posted by aw...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java
index b271b37..38ef59b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java
@@ -2092,7 +2092,7 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
       String nodeLabelExpression) {
     ContainerId containerId = ContainerId.newContainerId(appAttemptId, id);
     NMContainerStatus containerReport =
-        NMContainerStatus.newInstance(containerId, containerState,
+        NMContainerStatus.newInstance(containerId, 0, containerState,
             Resource.newInstance(1024, 1), "recover container", 0,
             Priority.newInstance(0), 0, nodeLabelExpression);
     return containerReport;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java
index aa5b336..c2a20a1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java
@@ -1127,7 +1127,7 @@ public class TestResourceTrackerService extends NodeLabelTestBase {
     NMContainerStatus report =
         NMContainerStatus.newInstance(
           ContainerId.newContainerId(
-            ApplicationAttemptId.newInstance(app.getApplicationId(), 2), 1),
+            ApplicationAttemptId.newInstance(app.getApplicationId(), 2), 1), 0,
           ContainerState.COMPLETE, Resource.newInstance(1024, 1),
           "Dummy Completed", 0, Priority.newInstance(10), 1234);
     rm.getResourceTrackerService().handleNMContainerStatus(report, null);
@@ -1138,7 +1138,7 @@ public class TestResourceTrackerService extends NodeLabelTestBase {
         (RMAppAttemptImpl) app.getCurrentAppAttempt();
     currentAttempt.setMasterContainer(null);
     report = NMContainerStatus.newInstance(
-          ContainerId.newContainerId(currentAttempt.getAppAttemptId(), 0),
+          ContainerId.newContainerId(currentAttempt.getAppAttemptId(), 0), 0,
           ContainerState.COMPLETE, Resource.newInstance(1024, 1),
           "Dummy Completed", 0, Priority.newInstance(10), 1234);
     rm.getResourceTrackerService().handleNMContainerStatus(report, null);
@@ -1150,7 +1150,7 @@ public class TestResourceTrackerService extends NodeLabelTestBase {
     // Case 2.1: AppAttemptId is null
     report = NMContainerStatus.newInstance(
           ContainerId.newContainerId(
-            ApplicationAttemptId.newInstance(app.getApplicationId(), 2), 1),
+            ApplicationAttemptId.newInstance(app.getApplicationId(), 2), 1), 0,
           ContainerState.COMPLETE, Resource.newInstance(1024, 1),
           "Dummy Completed", 0, Priority.newInstance(10), 1234);
     try {
@@ -1165,7 +1165,7 @@ public class TestResourceTrackerService extends NodeLabelTestBase {
         (RMAppAttemptImpl) app.getCurrentAppAttempt();
     currentAttempt.setMasterContainer(null);
     report = NMContainerStatus.newInstance(
-      ContainerId.newContainerId(currentAttempt.getAppAttemptId(), 0),
+      ContainerId.newContainerId(currentAttempt.getAppAttemptId(), 0), 0,
       ContainerState.COMPLETE, Resource.newInstance(1024, 1),
       "Dummy Completed", 0, Priority.newInstance(10), 1234);
     try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
index 09c16d0..bc92c01 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
@@ -58,9 +58,9 @@ import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
 import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.ContainerResourceChangeRequest;
 import org.apache.hadoop.yarn.api.records.ContainerState;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.ContainerUpdateType;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.NodeState;
 import org.apache.hadoop.yarn.api.records.Priority;
@@ -70,6 +70,7 @@ import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceOption;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.AsyncDispatcher;
 import org.apache.hadoop.yarn.event.Dispatcher;
@@ -3232,9 +3233,10 @@ public class TestCapacityScheduler {
 
     // am1 asks to change its AM container from 1GB to 3GB
     am1.sendContainerResizingRequest(Arrays.asList(
-            ContainerResourceChangeRequest
-                .newInstance(containerId1, Resources.createResource(3 * GB))),
-        null);
+            UpdateContainerRequest
+                .newInstance(0, containerId1,
+                    ContainerUpdateType.INCREASE_RESOURCE,
+                    Resources.createResource(3 * GB), null)));
     
     FiCaSchedulerApp app = getFiCaSchedulerApp(rm, app1.getApplicationId());
     
@@ -3246,11 +3248,14 @@ public class TestCapacityScheduler {
     
     // am1 asks to change containerId2 (2G -> 3G) and containerId3 (2G -> 5G)
     am1.sendContainerResizingRequest(Arrays.asList(
-            ContainerResourceChangeRequest
-                .newInstance(containerId2, Resources.createResource(3 * GB)),
-            ContainerResourceChangeRequest
-                .newInstance(containerId3, Resources.createResource(5 * GB))),
-        null);
+        UpdateContainerRequest
+                .newInstance(0, containerId2,
+                    ContainerUpdateType.INCREASE_RESOURCE,
+                    Resources.createResource(3 * GB), null),
+        UpdateContainerRequest
+                .newInstance(0, containerId3,
+                    ContainerUpdateType.INCREASE_RESOURCE,
+                    Resources.createResource(5 * GB), null)));
     
     Assert.assertEquals(6 * GB,
         app.getAppAttemptResourceUsage().getPending().getMemorySize());
@@ -3261,13 +3266,18 @@ public class TestCapacityScheduler {
     // am1 asks to change containerId1 (1G->3G), containerId2 (2G -> 4G) and
     // containerId3 (2G -> 2G)
     am1.sendContainerResizingRequest(Arrays.asList(
-            ContainerResourceChangeRequest
-                .newInstance(containerId1, Resources.createResource(3 * GB)),
-            ContainerResourceChangeRequest
-                .newInstance(containerId2, Resources.createResource(4 * GB)),
-            ContainerResourceChangeRequest
-                .newInstance(containerId3, Resources.createResource(2 * GB))),
-        null);
+        UpdateContainerRequest
+                .newInstance(0, containerId1,
+                    ContainerUpdateType.INCREASE_RESOURCE,
+                    Resources.createResource(3 * GB), null),
+        UpdateContainerRequest
+                .newInstance(0, containerId2,
+                    ContainerUpdateType.INCREASE_RESOURCE,
+                    Resources.createResource(4 * GB), null),
+        UpdateContainerRequest
+                .newInstance(0, containerId3,
+                    ContainerUpdateType.INCREASE_RESOURCE,
+                    Resources.createResource(2 * GB), null)));
     Assert.assertEquals(4 * GB,
         app.getAppAttemptResourceUsage().getPending().getMemorySize());
     checkPendingResource(rm, "a1", 4 * GB, null);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java
index 6cf9c61..e2b4952 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java
@@ -289,13 +289,14 @@ public class TestContainerAllocation {
 
         @Override
         public Token createContainerToken(ContainerId containerId,
-            NodeId nodeId, String appSubmitter, Resource capability,
-            Priority priority, long createTime,
-            LogAggregationContext logAggregationContext, String nodeLabelExp, ContainerType containerType) {
+            int containerVersion, NodeId nodeId, String appSubmitter,
+            Resource capability, Priority priority, long createTime,
+            LogAggregationContext logAggregationContext, String nodeLabelExp,
+            ContainerType containerType) {
           numRetries++;
-          return super.createContainerToken(containerId, nodeId, appSubmitter,
-              capability, priority, createTime, logAggregationContext,
-              nodeLabelExp, containerType);
+          return super.createContainerToken(containerId, containerVersion,
+              nodeId, appSubmitter, capability, priority, createTime,
+              logAggregationContext, nodeLabelExp, containerType);
         }
       };
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerResizing.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerResizing.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerResizing.java
index 499e041..6fba22a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerResizing.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerResizing.java
@@ -30,10 +30,12 @@ import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.ContainerResourceChangeRequest;
+import org.apache.hadoop.yarn.api.records.ContainerUpdateType;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
+import org.apache.hadoop.yarn.api.records.UpdatedContainer;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.MockAM;
 import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
@@ -84,7 +86,7 @@ public class TestContainerResizing {
 
     @Override
     protected void decreaseContainers(
-        List<ContainerResourceChangeRequest> decreaseRequests,
+        List<UpdateContainerRequest> decreaseRequests,
         SchedulerApplicationAttempt attempt) {
       try {
         Thread.sleep(1000);
@@ -138,9 +140,10 @@ public class TestContainerResizing {
     sentRMContainerLaunched(rm1, containerId1);
     // am1 asks to change its AM container from 1GB to 3GB
     am1.sendContainerResizingRequest(Arrays.asList(
-            ContainerResourceChangeRequest
-                .newInstance(containerId1, Resources.createResource(3 * GB))),
-        null);
+            UpdateContainerRequest
+                .newInstance(0, containerId1,
+                    ContainerUpdateType.INCREASE_RESOURCE,
+                    Resources.createResource(3 * GB), null)));
 
     FiCaSchedulerApp app = TestUtils.getFiCaSchedulerApp(
         rm1, app1.getApplicationId());
@@ -195,9 +198,11 @@ public class TestContainerResizing {
     sentRMContainerLaunched(rm1, containerId1);
 
     // am1 asks to change its AM container from 1GB to 3GB
-    AllocateResponse response = am1.sendContainerResizingRequest(null, Arrays
-        .asList(ContainerResourceChangeRequest
-            .newInstance(containerId1, Resources.createResource(1 * GB))));
+    AllocateResponse response = am1.sendContainerResizingRequest(Arrays
+        .asList(UpdateContainerRequest
+            .newInstance(0, containerId1,
+                ContainerUpdateType.DECREASE_RESOURCE,
+                Resources.createResource(1 * GB), null)));
 
     verifyContainerDecreased(response, containerId1, 1 * GB);
     checkUsedResource(rm1, "default", 1 * GB, null);
@@ -266,9 +271,10 @@ public class TestContainerResizing {
 
     // am1 asks to change its AM container from 1GB to 3GB
     am1.sendContainerResizingRequest(Arrays.asList(
-            ContainerResourceChangeRequest
-                .newInstance(containerId1, Resources.createResource(7 * GB))),
-        null);
+            UpdateContainerRequest
+                .newInstance(0, containerId1,
+                    ContainerUpdateType.INCREASE_RESOURCE,
+                    Resources.createResource(7 * GB), null)));
 
     checkPendingResource(rm1, "default", 6 * GB, null);
     Assert.assertEquals(6 * GB,
@@ -367,9 +373,10 @@ public class TestContainerResizing {
     // am1 asks to change container2 from 2GB to 8GB, which will exceed user
     // limit
     am1.sendContainerResizingRequest(Arrays.asList(
-            ContainerResourceChangeRequest
-                .newInstance(containerId2, Resources.createResource(8 * GB))),
-        null);
+            UpdateContainerRequest
+                .newInstance(0, containerId2,
+                    ContainerUpdateType.INCREASE_RESOURCE,
+                    Resources.createResource(8 * GB), null)));
 
     checkPendingResource(rm1, "default", 6 * GB, null);
     Assert.assertEquals(6 * GB,
@@ -447,9 +454,10 @@ public class TestContainerResizing {
 
     // am1 asks to change its AM container from 1GB to 3GB
     am1.sendContainerResizingRequest(Arrays.asList(
-            ContainerResourceChangeRequest
-                .newInstance(containerId1, Resources.createResource(7 * GB))),
-        null);
+            UpdateContainerRequest
+                .newInstance(0, containerId1,
+                    ContainerUpdateType.INCREASE_RESOURCE,
+                    Resources.createResource(7 * GB), null)));
 
     checkPendingResource(rm1, "default", 6 * GB, null);
     Assert.assertEquals(6 * GB,
@@ -487,9 +495,10 @@ public class TestContainerResizing {
     // am1 asks to change its AM container from 1G to 1G (cancel the increase
     // request actually)
     am1.sendContainerResizingRequest(Arrays.asList(
-            ContainerResourceChangeRequest
-                .newInstance(containerId1, Resources.createResource(1 * GB))),
-        null);
+            UpdateContainerRequest
+                .newInstance(0, containerId1,
+                    ContainerUpdateType.INCREASE_RESOURCE,
+                    Resources.createResource(1 * GB), null)));
     // Trigger a node heartbeat..
     cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
     
@@ -559,9 +568,10 @@ public class TestContainerResizing {
 
     // am1 asks to change its AM container from 2GB to 8GB
     am1.sendContainerResizingRequest(Arrays.asList(
-            ContainerResourceChangeRequest
-                .newInstance(containerId1, Resources.createResource(8 * GB))),
-        null);
+            UpdateContainerRequest
+                .newInstance(0, containerId1,
+                    ContainerUpdateType.INCREASE_RESOURCE,
+                    Resources.createResource(8 * GB), null)));
 
     checkPendingResource(rm1, "default", 6 * GB, null);
     Assert.assertEquals(6 * GB,
@@ -597,9 +607,11 @@ public class TestContainerResizing {
     // request, make target_capacity=existing_capacity)
     am1.allocate(null, Arrays.asList(containerId2));
     // am1 asks to change its AM container from 2G to 1G (decrease)
-    am1.sendContainerResizingRequest(null, Arrays.asList(
-        ContainerResourceChangeRequest
-            .newInstance(containerId1, Resources.createResource(1 * GB))));
+    am1.sendContainerResizingRequest(Arrays.asList(
+        UpdateContainerRequest
+            .newInstance(0, containerId1,
+                ContainerUpdateType.INCREASE_RESOURCE,
+                Resources.createResource(1 * GB), null)));
     // Trigger a node heartbeat..
     cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
     
@@ -668,9 +680,10 @@ public class TestContainerResizing {
 
     // am1 asks to change its AM container from 2GB to 8GB
     am1.sendContainerResizingRequest(Arrays.asList(
-            ContainerResourceChangeRequest
-                .newInstance(containerId2, Resources.createResource(8 * GB))),
-        null);
+            UpdateContainerRequest
+                .newInstance(0, containerId2,
+                    ContainerUpdateType.INCREASE_RESOURCE,
+                    Resources.createResource(8 * GB), null)));
 
     checkPendingResource(rm1, "default", 6 * GB, null);
     Assert.assertEquals(6 * GB,
@@ -765,9 +778,10 @@ public class TestContainerResizing {
 
     // am1 asks to change its AM container from 2GB to 8GB
     am1.sendContainerResizingRequest(Arrays.asList(
-            ContainerResourceChangeRequest
-                .newInstance(containerId2, Resources.createResource(8 * GB))),
-        null);
+            UpdateContainerRequest
+                .newInstance(0, containerId2,
+                    ContainerUpdateType.INCREASE_RESOURCE,
+                    Resources.createResource(8 * GB), null)));
 
     checkPendingResource(rm1, "default", 6 * GB, null);
     Assert.assertEquals(6 * GB,
@@ -883,14 +897,16 @@ public class TestContainerResizing {
     allocateAndLaunchContainers(am1, nm1, rm1, 2, 1 * GB, 4, 6);
 
     // am1 asks to change its container[2-7] from 1G to 2G
-    List<ContainerResourceChangeRequest> increaseRequests = new ArrayList<>();
+    List<UpdateContainerRequest> increaseRequests = new ArrayList<>();
     for (int cId = 2; cId <= 7; cId++) {
       ContainerId containerId =
           ContainerId.newContainerId(am1.getApplicationAttemptId(), cId);
-      increaseRequests.add(ContainerResourceChangeRequest
-          .newInstance(containerId, Resources.createResource(2 * GB)));
+      increaseRequests.add(UpdateContainerRequest
+          .newInstance(0, containerId,
+              ContainerUpdateType.INCREASE_RESOURCE,
+              Resources.createResource(2 * GB), null));
     }
-    am1.sendContainerResizingRequest(increaseRequests, null);
+    am1.sendContainerResizingRequest(increaseRequests);
 
     checkPendingResource(rm1, "default", 6 * GB, null);
     Assert.assertEquals(6 * GB,
@@ -904,7 +920,7 @@ public class TestContainerResizing {
     // earlier allocated)
     cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
     AllocateResponse allocateResponse = am1.allocate(null, null);
-    Assert.assertEquals(3, allocateResponse.getIncreasedContainers().size());
+    Assert.assertEquals(3, allocateResponse.getUpdatedContainers().size());
     verifyContainerIncreased(allocateResponse,
         ContainerId.newContainerId(attemptId, 4), 2 * GB);
     verifyContainerIncreased(allocateResponse,
@@ -964,14 +980,16 @@ public class TestContainerResizing {
     allocateAndLaunchContainers(am1, nm1, rm1, 2, 1 * GB, 4, 6);
 
     // am1 asks to change its container[2-7] from 1G to 2G
-    List<ContainerResourceChangeRequest> increaseRequests = new ArrayList<>();
+    List<UpdateContainerRequest> increaseRequests = new ArrayList<>();
     for (int cId = 2; cId <= 7; cId++) {
       ContainerId containerId =
           ContainerId.newContainerId(am1.getApplicationAttemptId(), cId);
-      increaseRequests.add(ContainerResourceChangeRequest
-          .newInstance(containerId, Resources.createResource(2 * GB)));
+      increaseRequests.add(UpdateContainerRequest
+          .newInstance(0, containerId,
+              ContainerUpdateType.INCREASE_RESOURCE,
+              Resources.createResource(2 * GB), null));
     }
-    am1.sendContainerResizingRequest(increaseRequests, null);
+    am1.sendContainerResizingRequest(increaseRequests);
 
     checkPendingResource(rm1, "default", 6 * GB, null);
     Assert.assertEquals(6 * GB,
@@ -985,7 +1003,7 @@ public class TestContainerResizing {
     // earlier allocated)
     cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
     AllocateResponse allocateResponse = am1.allocate(null, null);
-    Assert.assertEquals(3, allocateResponse.getIncreasedContainers().size());
+    Assert.assertEquals(3, allocateResponse.getUpdatedContainers().size());
     verifyContainerIncreased(allocateResponse,
         ContainerId.newContainerId(attemptId, 4), 2 * GB);
     verifyContainerIncreased(allocateResponse,
@@ -1046,9 +1064,11 @@ public class TestContainerResizing {
     nm.nodeHeartbeat(true);
     // *In the mean time*, am1 asks to decrease its AM container resource from
     // 3GB to 1GB
-    AllocateResponse response = am1.sendContainerResizingRequest(null,
-        Collections.singletonList(ContainerResourceChangeRequest
-            .newInstance(containerId1, Resources.createResource(GB))));
+    AllocateResponse response = am1.sendContainerResizingRequest(
+        Collections.singletonList(UpdateContainerRequest
+            .newInstance(0, containerId1,
+                ContainerUpdateType.DECREASE_RESOURCE,
+                Resources.createResource(GB), null)));
     // verify that the containe resource is decreased
     verifyContainerDecreased(response, containerId1, GB);
 
@@ -1077,12 +1097,16 @@ public class TestContainerResizing {
 
   private void verifyContainerIncreased(AllocateResponse response,
       ContainerId containerId, int mem) {
-    List<Container> increasedContainers = response.getIncreasedContainers();
+    List<UpdatedContainer> increasedContainers =
+        response.getUpdatedContainers();
     boolean found = false;
-    for (Container c : increasedContainers) {
-      if (c.getId().equals(containerId)) {
+    for (UpdatedContainer c : increasedContainers) {
+      if (c.getContainer().getId().equals(containerId)) {
         found = true;
-        Assert.assertEquals(mem, c.getResource().getMemorySize());
+        Assert.assertEquals(ContainerUpdateType.INCREASE_RESOURCE,
+            c.getUpdateType());
+        Assert.assertEquals(mem,
+            c.getContainer().getResource().getMemorySize());
       }
     }
     if (!found) {
@@ -1092,12 +1116,16 @@ public class TestContainerResizing {
 
   private void verifyContainerDecreased(AllocateResponse response,
       ContainerId containerId, int mem) {
-    List<Container> decreasedContainers = response.getDecreasedContainers();
+    List<UpdatedContainer> decreasedContainers =
+        response.getUpdatedContainers();
     boolean found = false;
-    for (Container c : decreasedContainers) {
-      if (c.getId().equals(containerId)) {
+    for (UpdatedContainer c : decreasedContainers) {
+      if (c.getContainer().getId().equals(containerId)) {
         found = true;
-        Assert.assertEquals(mem, c.getResource().getMemorySize());
+        Assert.assertEquals(ContainerUpdateType.DECREASE_RESOURCE,
+            c.getUpdateType());
+        Assert.assertEquals(mem,
+            c.getContainer().getResource().getMemorySize());
       }
     }
     if (!found) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestIncreaseAllocationExpirer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestIncreaseAllocationExpirer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestIncreaseAllocationExpirer.java
index d388172..c5829cf 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestIncreaseAllocationExpirer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestIncreaseAllocationExpirer.java
@@ -21,10 +21,12 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.ContainerResourceChangeRequest;
 import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.apache.hadoop.yarn.api.records.ContainerUpdateType;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.UpdateContainerError;
+import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.MockAM;
 import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
@@ -109,8 +111,9 @@ public class TestIncreaseAllocationExpirer {
     rm1.waitForState(nm1, containerId2, RMContainerState.RUNNING);
     // am1 asks to increase containerId2 from 1GB to 3GB
     am1.sendContainerResizingRequest(Collections.singletonList(
-        ContainerResourceChangeRequest.newInstance(
-            containerId2, Resources.createResource(3 * GB))), null);
+        UpdateContainerRequest.newInstance(0, containerId2,
+            ContainerUpdateType.INCREASE_RESOURCE,
+            Resources.createResource(3 * GB), null)));
     // Kick off scheduling and sleep for 1 second;
     nm1.nodeHeartbeat(true);
     Thread.sleep(1000);
@@ -180,8 +183,9 @@ public class TestIncreaseAllocationExpirer {
     rm1.waitForState(nm1, containerId2, RMContainerState.RUNNING);
     // am1 asks to increase containerId2 from 1GB to 3GB
     am1.sendContainerResizingRequest(Collections.singletonList(
-        ContainerResourceChangeRequest.newInstance(
-            containerId2, Resources.createResource(3 * GB))), null);
+        UpdateContainerRequest.newInstance(0, containerId2,
+            ContainerUpdateType.INCREASE_RESOURCE,
+            Resources.createResource(3 * GB), null)));
     // Kick off scheduling and wait for 1 second;
     nm1.nodeHeartbeat(true);
     Thread.sleep(1000);
@@ -249,8 +253,9 @@ public class TestIncreaseAllocationExpirer {
     rm1.waitForState(nm1, containerId2, RMContainerState.RUNNING);
     // am1 asks to change containerId2 from 1GB to 3GB
     am1.sendContainerResizingRequest(Collections.singletonList(
-        ContainerResourceChangeRequest.newInstance(
-            containerId2, Resources.createResource(3 * GB))), null);
+        UpdateContainerRequest.newInstance(0, containerId2,
+            ContainerUpdateType.INCREASE_RESOURCE,
+            Resources.createResource(3 * GB), null)));
     // Kick off scheduling and sleep for 1 second to
     // make sure the allocation is done
     nm1.nodeHeartbeat(true);
@@ -261,10 +266,23 @@ public class TestIncreaseAllocationExpirer {
     Resource resource1 = Resources.clone(
         rm1.getResourceScheduler().getRMContainer(containerId2)
             .getAllocatedResource());
+
+    // This should not work, since the container version is wrong
+    AllocateResponse response = am1.sendContainerResizingRequest(Collections
+        .singletonList(
+        UpdateContainerRequest.newInstance(0, containerId2,
+            ContainerUpdateType.INCREASE_RESOURCE,
+            Resources.createResource(5 * GB), null)));
+    List<UpdateContainerError> updateErrors = response.getUpdateErrors();
+    Assert.assertEquals(1, updateErrors.size());
+    Assert.assertEquals("INCORRECT_CONTAINER_VERSION_ERROR|0|1",
+        updateErrors.get(0).getReason());
+
     // am1 asks to change containerId2 from 3GB to 5GB
     am1.sendContainerResizingRequest(Collections.singletonList(
-        ContainerResourceChangeRequest.newInstance(
-            containerId2, Resources.createResource(5 * GB))), null);
+        UpdateContainerRequest.newInstance(1, containerId2,
+            ContainerUpdateType.INCREASE_RESOURCE,
+            Resources.createResource(5 * GB), null)));
     // Kick off scheduling and sleep for 1 second to
     // make sure the allocation is done
     nm1.nodeHeartbeat(true);
@@ -362,30 +380,36 @@ public class TestIncreaseAllocationExpirer {
     rm1.waitForState(nm1, containerId3, RMContainerState.RUNNING);
     rm1.waitForState(nm1, containerId4, RMContainerState.RUNNING);
     // am1 asks to change containerId2 and containerId3 from 1GB to 3GB
-    List<ContainerResourceChangeRequest> increaseRequests = new ArrayList<>();
-    increaseRequests.add(ContainerResourceChangeRequest.newInstance(
-        containerId2, Resources.createResource(6 * GB)));
-    increaseRequests.add(ContainerResourceChangeRequest.newInstance(
-        containerId3, Resources.createResource(6 * GB)));
-    increaseRequests.add(ContainerResourceChangeRequest.newInstance(
-        containerId4, Resources.createResource(6 * GB)));
-    am1.sendContainerResizingRequest(increaseRequests, null);
+    List<UpdateContainerRequest> increaseRequests = new ArrayList<>();
+    increaseRequests.add(UpdateContainerRequest.newInstance(0, containerId2,
+        ContainerUpdateType.INCREASE_RESOURCE,
+        Resources.createResource(6 * GB), null));
+    increaseRequests.add(UpdateContainerRequest.newInstance(0, containerId3,
+        ContainerUpdateType.INCREASE_RESOURCE,
+        Resources.createResource(6 * GB), null));
+    increaseRequests.add(UpdateContainerRequest.newInstance(0, containerId4,
+        ContainerUpdateType.INCREASE_RESOURCE,
+        Resources.createResource(6 * GB), null));
+    am1.sendContainerResizingRequest(increaseRequests);
     nm1.nodeHeartbeat(true);
     Thread.sleep(1000);
     // Start container increase allocation expirer
     am1.allocate(null, null);
     // Decrease containers
-    List<ContainerResourceChangeRequest> decreaseRequests = new ArrayList<>();
-    decreaseRequests.add(ContainerResourceChangeRequest.newInstance(
-        containerId2, Resources.createResource(2 * GB)));
-    decreaseRequests.add(ContainerResourceChangeRequest.newInstance(
-        containerId3, Resources.createResource(4 * GB)));
-    decreaseRequests.add(ContainerResourceChangeRequest.newInstance(
-        containerId4, Resources.createResource(4 * GB)));
+    List<UpdateContainerRequest> decreaseRequests = new ArrayList<>();
+    decreaseRequests.add(UpdateContainerRequest.newInstance(1, containerId2,
+        ContainerUpdateType.INCREASE_RESOURCE,
+        Resources.createResource(2 * GB), null));
+    decreaseRequests.add(UpdateContainerRequest.newInstance(1, containerId3,
+        ContainerUpdateType.INCREASE_RESOURCE,
+        Resources.createResource(4 * GB), null));
+    decreaseRequests.add(UpdateContainerRequest.newInstance(1, containerId4,
+        ContainerUpdateType.INCREASE_RESOURCE,
+        Resources.createResource(4 * GB), null));
     AllocateResponse response =
-        am1.sendContainerResizingRequest(null, decreaseRequests);
+        am1.sendContainerResizingRequest(decreaseRequests);
     // Verify containers are decreased in scheduler
-    Assert.assertEquals(3, response.getDecreasedContainers().size());
+    Assert.assertEquals(3, response.getUpdatedContainers().size());
     // Use the token for containerId4 on NM (6G). This should set the last
     // confirmed resource to 4G, and cancel the allocation expirer
     nm1.containerIncreaseStatus(getContainer(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
index 59bb6aa..ee3396d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
@@ -220,7 +220,7 @@ public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
     
     org.apache.hadoop.yarn.api.records.Token validContainerToken =
         containerTokenSecretManager.createContainerToken(validContainerId,
-            validNode, user, r, Priority.newInstance(10), 1234);
+            0, validNode, user, r, Priority.newInstance(10), 1234);
     ContainerTokenIdentifier identifier =
         BuilderUtils.newContainerTokenIdentifier(validContainerToken);
     Assert.assertEquals(Priority.newInstance(10), identifier.getPriority());
@@ -277,7 +277,7 @@ public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
         4 * 60 * 1000);
     validContainerToken =
         containerTokenSecretManager.createContainerToken(validContainerId,
-            validNode, user, r, Priority.newInstance(0), 0);
+            0, validNode, user, r, Priority.newInstance(0), 0);
     Assert.assertTrue(testStartContainer(rpc, validAppAttemptId, validNode,
       validContainerToken, validNMToken, false).isEmpty());
     Assert.assertTrue(nmTokenSecretManagerNM
@@ -293,7 +293,7 @@ public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
 
     org.apache.hadoop.yarn.api.records.Token validContainerToken2 =
         containerTokenSecretManager.createContainerToken(validContainerId2,
-            validNode, user, r, Priority.newInstance(0), 0);
+            0, validNode, user, r, Priority.newInstance(0), 0);
     
     org.apache.hadoop.yarn.api.records.Token validNMToken2 =
         nmTokenSecretManagerRM.createNMToken(validAppAttemptId2, validNode, user);
@@ -379,7 +379,7 @@ public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
           .createNMToken(validAppAttemptId, validNode, user);
     org.apache.hadoop.yarn.api.records.Token newContainerToken =
         containerTokenSecretManager.createContainerToken(
-          ContainerId.newContainerId(attempt2, 1), validNode, user, r,
+          ContainerId.newContainerId(attempt2, 1), 0, validNode, user, r,
             Priority.newInstance(0), 0);
     Assert.assertTrue(testStartContainer(rpc, attempt2, validNode,
       newContainerToken, attempt1NMToken, false).isEmpty());
@@ -639,7 +639,7 @@ public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
     
     Token containerToken = 
         containerTokenSecretManager.createContainerToken(
-            cId, nodeId, user, r, Priority.newInstance(0), 0);
+            cId, 0, nodeId, user, r, Priority.newInstance(0), 0);
     
     ContainerTokenIdentifier containerTokenIdentifier = 
         getContainerTokenIdentifierFromToken(containerToken);
@@ -672,8 +672,8 @@ public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
     ContainerId cId2 = ContainerId.newContainerId(appAttemptId, 1);
     // Creating modified containerToken
     Token containerToken2 =
-        tamperedContainerTokenSecretManager.createContainerToken(cId2, nodeId,
-            user, r, Priority.newInstance(0), 0);
+        tamperedContainerTokenSecretManager.createContainerToken(cId2, 0,
+            nodeId, user, r, Priority.newInstance(0), 0);
     
     StringBuilder sb = new StringBuilder("Given Container ");
     sb.append(cId2);
@@ -731,8 +731,8 @@ public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
             getContainerTokenSecretManager();
     Resource r = Resource.newInstance(1230, 2);
     Token containerToken =
-        containerTokenSecretManager.createContainerToken(cId, nodeId, user, r,
-            Priority.newInstance(0), 0);
+        containerTokenSecretManager.createContainerToken(cId, 0, nodeId, user,
+            r, Priority.newInstance(0), 0);
     
     ContainerTokenIdentifier containerTokenIdentifier =
         new ContainerTokenIdentifier();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestMiniYarnClusterNodeUtilization.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestMiniYarnClusterNodeUtilization.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestMiniYarnClusterNodeUtilization.java
index 49a82c8..a941302 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestMiniYarnClusterNodeUtilization.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestMiniYarnClusterNodeUtilization.java
@@ -198,8 +198,6 @@ public class TestMiniYarnClusterNodeUtilization {
   /**
    * Verify both the RMNode and SchedulerNode have been updated with the test
    * fixture utilization data.
-   * @param containersUtilization Utilization of the container.
-   * @param nodeUtilization Utilization of the node.
    */
   private void verifySimulatedUtilization() throws InterruptedException {
     ResourceManager rm = cluster.getResourceManager(0);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/proto/test_token.proto
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/proto/test_token.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/proto/test_token.proto
index 853f477..c111462 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/proto/test_token.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/proto/test_token.proto
@@ -44,5 +44,6 @@ message ContainerTokenIdentifierForTestProto {
   optional int64 creationTime = 9;
   optional LogAggregationContextProto logAggregationContext = 10;
   optional string message = 11;
+  optional int32 version = 14 [default = 0];
 }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[02/19] hadoop git commit: HDFS-9392. Admins support for maintenance state. Contributed by Ming Ma.

Posted by aw...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9dcbdbdb/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
index f6b5d8f..ddb8237 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
@@ -26,17 +26,13 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
-import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
-import java.util.Random;
 import java.util.concurrent.ExecutionException;
 
 import com.google.common.base.Supplier;
 import com.google.common.collect.Lists;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
-import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -64,11 +60,8 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStatistics;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.test.PathUtils;
 import org.apache.log4j.Level;
-import org.junit.After;
 import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Ignore;
 import org.junit.Test;
 import org.mortbay.util.ajax.JSON;
@@ -78,90 +71,9 @@ import org.slf4j.LoggerFactory;
 /**
  * This class tests the decommissioning of nodes.
  */
-public class TestDecommission {
+public class TestDecommission extends AdminStatesBaseTest {
   public static final Logger LOG = LoggerFactory.getLogger(TestDecommission
       .class);
-  static final long seed = 0xDEADBEEFL;
-  static final int blockSize = 8192;
-  static final int fileSize = 16384;
-  static final int HEARTBEAT_INTERVAL = 1; // heartbeat interval in seconds
-  static final int BLOCKREPORT_INTERVAL_MSEC = 1000; //block report in msec
-  static final int NAMENODE_REPLICATION_INTERVAL = 1; //replication interval
-
-  final Random myrand = new Random();
-  Path dir;
-  Path hostsFile;
-  Path excludeFile;
-  FileSystem localFileSys;
-  Configuration conf;
-  MiniDFSCluster cluster = null;
-
-  @Before
-  public void setup() throws IOException {
-    conf = new HdfsConfiguration();
-    // Set up the hosts/exclude files.
-    localFileSys = FileSystem.getLocal(conf);
-    Path workingDir = localFileSys.getWorkingDirectory();
-    dir = new Path(workingDir, PathUtils.getTestDirName(getClass()) + "/work-dir/decommission");
-    hostsFile = new Path(dir, "hosts");
-    excludeFile = new Path(dir, "exclude");
-    
-    // Setup conf
-    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, false);
-    conf.set(DFSConfigKeys.DFS_HOSTS, hostsFile.toUri().getPath());
-    conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
-    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 2000);
-    conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, HEARTBEAT_INTERVAL);
-    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
-    conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, BLOCKREPORT_INTERVAL_MSEC);
-    conf.setInt(DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY, 4);
-    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, NAMENODE_REPLICATION_INTERVAL);
-  
-    writeConfigFile(hostsFile, null);
-    writeConfigFile(excludeFile, null);
-  }
-  
-  @After
-  public void teardown() throws IOException {
-    cleanupFile(localFileSys, dir);
-    if (cluster != null) {
-      cluster.shutdown();
-      cluster = null;
-    }
-  }
-  
-  private void writeConfigFile(Path name, List<String> nodes) 
-    throws IOException {
-    // delete if it already exists
-    if (localFileSys.exists(name)) {
-      localFileSys.delete(name, true);
-    }
-
-    FSDataOutputStream stm = localFileSys.create(name);
-    
-    if (nodes != null) {
-      for (Iterator<String> it = nodes.iterator(); it.hasNext();) {
-        String node = it.next();
-        stm.writeBytes(node);
-        stm.writeBytes("\n");
-      }
-    }
-    stm.close();
-  }
-
-  private void writeFile(FileSystem fileSys, Path name, int repl)
-    throws IOException {
-    // create and write a file that contains three blocks of data
-    FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
-        .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
-        (short) repl, blockSize);
-    byte[] buffer = new byte[fileSize];
-    Random rand = new Random(seed);
-    rand.nextBytes(buffer);
-    stm.write(buffer);
-    stm.close();
-    LOG.info("Created file " + name + " with " + repl + " replicas.");
-  }
 
   /**
    * Verify that the number of replicas are as expected for each block in
@@ -223,128 +135,6 @@ public class TestDecommission {
     return null;
   }
 
-  private void cleanupFile(FileSystem fileSys, Path name) throws IOException {
-    assertTrue(fileSys.exists(name));
-    fileSys.delete(name, true);
-    assertTrue(!fileSys.exists(name));
-  }
-
-  /*
-   * decommission the DN at index dnIndex or one random node if dnIndex is set
-   * to -1 and wait for the node to reach the given {@code waitForState}.
-   */
-  private DatanodeInfo decommissionNode(int nnIndex,
-                                  String datanodeUuid,
-                                  ArrayList<DatanodeInfo>decommissionedNodes,
-                                  AdminStates waitForState)
-    throws IOException {
-    DFSClient client = getDfsClient(cluster.getNameNode(nnIndex), conf);
-    DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
-
-    //
-    // pick one datanode randomly unless the caller specifies one.
-    //
-    int index = 0;
-    if (datanodeUuid == null) {
-      boolean found = false;
-      while (!found) {
-        index = myrand.nextInt(info.length);
-        if (!info[index].isDecommissioned()) {
-          found = true;
-        }
-      }
-    } else {
-      // The caller specifies a DN
-      for (; index < info.length; index++) {
-        if (info[index].getDatanodeUuid().equals(datanodeUuid)) {
-          break;
-        }
-      }
-      if (index == info.length) {
-        throw new IOException("invalid datanodeUuid " + datanodeUuid);
-      }
-    }
-    String nodename = info[index].getXferAddr();
-    LOG.info("Decommissioning node: " + nodename);
-
-    // write nodename into the exclude file.
-    ArrayList<String> nodes = new ArrayList<String>();
-    if (decommissionedNodes != null) {
-      for (DatanodeInfo dn : decommissionedNodes) {
-        nodes.add(dn.getName());
-      }
-    }
-    nodes.add(nodename);
-    writeConfigFile(excludeFile, nodes);
-    refreshNodes(cluster.getNamesystem(nnIndex), conf);
-    DatanodeInfo ret = NameNodeAdapter.getDatanode(
-        cluster.getNamesystem(nnIndex), info[index]);
-    waitNodeState(ret, waitForState);
-    return ret;
-  }
-
-  /* Ask a specific NN to stop decommission of the datanode and wait for each
-   * to reach the NORMAL state.
-   */
-  private void recommissionNode(int nnIndex, DatanodeInfo decommissionedNode) throws IOException {
-    LOG.info("Recommissioning node: " + decommissionedNode);
-    writeConfigFile(excludeFile, null);
-    refreshNodes(cluster.getNamesystem(nnIndex), conf);
-    waitNodeState(decommissionedNode, AdminStates.NORMAL);
-
-  }
-
-  /* 
-   * Wait till node is fully decommissioned.
-   */
-  private void waitNodeState(DatanodeInfo node,
-                             AdminStates state) {
-    boolean done = state == node.getAdminState();
-    while (!done) {
-      LOG.info("Waiting for node " + node + " to change state to "
-          + state + " current state: " + node.getAdminState());
-      try {
-        Thread.sleep(HEARTBEAT_INTERVAL * 500);
-      } catch (InterruptedException e) {
-        // nothing
-      }
-      done = state == node.getAdminState();
-    }
-    LOG.info("node " + node + " reached the state " + state);
-  }
-  
-  /* Get DFSClient to the namenode */
-  private static DFSClient getDfsClient(NameNode nn,
-      Configuration conf) throws IOException {
-    return new DFSClient(nn.getNameNodeAddress(), conf);
-  }
-  
-  /* Validate cluster has expected number of datanodes */
-  private static void validateCluster(DFSClient client, int numDNs)
-      throws IOException {
-    DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
-    assertEquals("Number of Datanodes ", numDNs, info.length);
-  }
-  
-  /** Start a MiniDFSCluster 
-   * @throws IOException */
-  private void startCluster(int numNameNodes, int numDatanodes,
-      Configuration conf) throws IOException {
-    cluster = new MiniDFSCluster.Builder(conf)
-      .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(numNameNodes))
-        .numDataNodes(numDatanodes).build();
-    cluster.waitActive();
-    for (int i = 0; i < numNameNodes; i++) {
-      DFSClient client = getDfsClient(cluster.getNameNode(i), conf);
-      validateCluster(client, numDatanodes);
-    }
-  }
-
-  static void refreshNodes(final FSNamesystem ns, final Configuration conf
-      ) throws IOException {
-    ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
-  }
-  
   private void verifyStats(NameNode namenode, FSNamesystem fsn,
       DatanodeInfo info, DataNode node, boolean decommissioning)
       throws InterruptedException, IOException {
@@ -376,7 +166,7 @@ public class TestDecommission {
   public void testDecommission() throws IOException {
     testDecommission(1, 6);
   }
-  
+
   /**
    * Tests decommission with replicas on the target datanode cannot be migrated
    * to other datanodes and satisfy the replication factor. Make sure the
@@ -387,8 +177,8 @@ public class TestDecommission {
     LOG.info("Starting test testDecommission");
     int numNamenodes = 1;
     int numDatanodes = 4;
-    conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3);
-    startCluster(numNamenodes, numDatanodes, conf);
+    getConf().setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3);
+    startCluster(numNamenodes, numDatanodes);
 
     ArrayList<ArrayList<DatanodeInfo>> namenodeDecomList = new ArrayList<ArrayList<DatanodeInfo>>(
         numNamenodes);
@@ -399,8 +189,8 @@ public class TestDecommission {
 
     // Start decommissioning one namenode at a time
     ArrayList<DatanodeInfo> decommissionedNodes = namenodeDecomList.get(0);
-    FileSystem fileSys = cluster.getFileSystem(0);
-    FSNamesystem ns = cluster.getNamesystem(0);
+    FileSystem fileSys = getCluster().getFileSystem(0);
+    FSNamesystem ns = getCluster().getNamesystem(0);
 
     writeFile(fileSys, file1, replicas);
 
@@ -408,14 +198,14 @@ public class TestDecommission {
     int liveDecomissioned = ns.getNumDecomLiveDataNodes();
 
     // Decommission one node. Verify that node is decommissioned.
-    DatanodeInfo decomNode = decommissionNode(0, null, decommissionedNodes,
-        AdminStates.DECOMMISSIONED);
+    DatanodeInfo decomNode = takeNodeOutofService(0, null, 0,
+        decommissionedNodes, AdminStates.DECOMMISSIONED);
     decommissionedNodes.add(decomNode);
     assertEquals(deadDecomissioned, ns.getNumDecomDeadDataNodes());
     assertEquals(liveDecomissioned + 1, ns.getNumDecomLiveDataNodes());
 
     // Ensure decommissioned datanode is not automatically shutdown
-    DFSClient client = getDfsClient(cluster.getNameNode(0), conf);
+    DFSClient client = getDfsClient(0);
     assertEquals("All datanodes must be alive", numDatanodes,
         client.datanodeReport(DatanodeReportType.LIVE).length);
     assertNull(checkFile(fileSys, file1, replicas, decomNode.getXferAddr(),
@@ -424,9 +214,8 @@ public class TestDecommission {
 
     // Restart the cluster and ensure recommissioned datanodes
     // are allowed to register with the namenode
-    cluster.shutdown();
-    startCluster(1, 4, conf);
-    cluster.shutdown();
+    shutdownCluster();
+    startCluster(1, 4);
   }
   
   /**
@@ -449,26 +238,22 @@ public class TestDecommission {
    */
   @Test(timeout=360000)
   public void testDecommissionOnStandby() throws Exception {
-    Configuration hdfsConf = new HdfsConfiguration(conf);
-    hdfsConf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
-    hdfsConf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 30000);
-    hdfsConf.setInt(DFSConfigKeys.DFS_NAMENODE_TOLERATE_HEARTBEAT_MULTIPLIER_KEY, 2);
+    getConf().setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
+    getConf().setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
+        30000);
+    getConf().setInt(
+        DFSConfigKeys.DFS_NAMENODE_TOLERATE_HEARTBEAT_MULTIPLIER_KEY, 2);
 
     // The time to wait so that the slow DN's heartbeat is considered old
     // by BlockPlacementPolicyDefault and thus will choose that DN for
     // excess replica.
     long slowHeartbeatDNwaitTime =
-        hdfsConf.getLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
-        DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT) * 1000 * (hdfsConf.getInt(
-        DFSConfigKeys.DFS_NAMENODE_TOLERATE_HEARTBEAT_MULTIPLIER_KEY,
+        getConf().getLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
+        DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT) * 1000 * (getConf().
+        getInt(DFSConfigKeys.DFS_NAMENODE_TOLERATE_HEARTBEAT_MULTIPLIER_KEY,
         DFSConfigKeys.DFS_NAMENODE_TOLERATE_HEARTBEAT_MULTIPLIER_DEFAULT) + 1);
 
-    cluster = new MiniDFSCluster.Builder(hdfsConf)
-        .nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(3).build();
-
-    cluster.transitionToActive(0);
-    cluster.waitActive();
-
+    startSimpleHACluster(3);
 
     // Step 1, create a cluster with 4 DNs. Blocks are stored on the first 3 DNs.
     // The last DN is empty. Also configure the last DN to have slow heartbeat
@@ -478,29 +263,29 @@ public class TestDecommission {
     // same as # of DNs, each DN will have a replica for any block.
     Path file1 = new Path("testDecommissionHA.dat");
     int replicas = 3;
-    FileSystem activeFileSys = cluster.getFileSystem(0);
+    FileSystem activeFileSys = getCluster().getFileSystem(0);
     writeFile(activeFileSys, file1, replicas);
 
-    HATestUtil.waitForStandbyToCatchUp(cluster.getNameNode(0),
-        cluster.getNameNode(1));
+    HATestUtil.waitForStandbyToCatchUp(getCluster().getNameNode(0),
+        getCluster().getNameNode(1));
 
     // Step 1.b, start a DN with slow heartbeat, so that we can know for sure it
     // will be chosen as the target of excess replica during recommission.
-    hdfsConf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 30);
-    cluster.startDataNodes(hdfsConf, 1, true, null, null, null);
-    DataNode lastDN = cluster.getDataNodes().get(3);
+    getConf().setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 30);
+    getCluster().startDataNodes(getConf(), 1, true, null, null, null);
+    DataNode lastDN = getCluster().getDataNodes().get(3);
     lastDN.getDatanodeUuid();
 
     // Step 2, decommission the first DN at both ANN and SBN.
-    DataNode firstDN = cluster.getDataNodes().get(0);
+    DataNode firstDN = getCluster().getDataNodes().get(0);
 
     // Step 2.a, ask ANN to decomm the first DN
-    DatanodeInfo decommissionedNodeFromANN = decommissionNode(
-        0, firstDN.getDatanodeUuid(), null, AdminStates.DECOMMISSIONED);
+    DatanodeInfo decommissionedNodeFromANN = takeNodeOutofService(
+        0, firstDN.getDatanodeUuid(), 0, null, AdminStates.DECOMMISSIONED);
 
     // Step 2.b, ask SBN to decomm the first DN
-    DatanodeInfo decomNodeFromSBN = decommissionNode(1, firstDN.getDatanodeUuid(), null,
-        AdminStates.DECOMMISSIONED);
+    DatanodeInfo decomNodeFromSBN = takeNodeOutofService(1,
+        firstDN.getDatanodeUuid(), 0, null, AdminStates.DECOMMISSIONED);
 
     // Step 3, recommission the first DN on SBN and ANN to create excess replica
     // It recommissions the node on SBN first to create potential
@@ -520,7 +305,7 @@ public class TestDecommission {
     // After the fix,
     //    After recommissionNode finishes, SBN has 4 live replicas ( 0, 1, 2, 3 )
     Thread.sleep(slowHeartbeatDNwaitTime);
-    recommissionNode(1, decomNodeFromSBN);
+    putNodeInService(1, decomNodeFromSBN);
 
     // Step 3.b, ask ANN to recommission the first DN.
     // To verify the fix, the test makes sure the excess replica picked by ANN
@@ -529,41 +314,41 @@ public class TestDecommission {
     // by ANN.
     // 1. restore LastDNprop's heartbeat interval.
     // 2. Make next-to-last DN's heartbeat slow.
-    MiniDFSCluster.DataNodeProperties LastDNprop = cluster.stopDataNode(3);
-    LastDNprop.conf.setLong(
+    MiniDFSCluster.DataNodeProperties lastDNprop =
+        getCluster().stopDataNode(3);
+    lastDNprop.conf.setLong(
         DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, HEARTBEAT_INTERVAL);
-    cluster.restartDataNode(LastDNprop);
-
-    MiniDFSCluster.DataNodeProperties nextToLastDNprop = cluster.stopDataNode(2);
-    nextToLastDNprop.conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 30);
-    cluster.restartDataNode(nextToLastDNprop);
-    cluster.waitActive();
+    getCluster().restartDataNode(lastDNprop);
+
+    MiniDFSCluster.DataNodeProperties nextToLastDNprop =
+        getCluster().stopDataNode(2);
+    nextToLastDNprop.conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
+        30);
+    getCluster().restartDataNode(nextToLastDNprop);
+    getCluster().waitActive();
     Thread.sleep(slowHeartbeatDNwaitTime);
-    recommissionNode(0, decommissionedNodeFromANN);
+    putNodeInService(0, decommissionedNodeFromANN);
 
     // Step 3.c, make sure the DN has deleted the block and report to NNs
-    cluster.triggerHeartbeats();
-    HATestUtil.waitForDNDeletions(cluster);
-    cluster.triggerDeletionReports();
+    getCluster().triggerHeartbeats();
+    HATestUtil.waitForDNDeletions(getCluster());
+    getCluster().triggerDeletionReports();
 
     // Step 4, decommission the first DN on both ANN and SBN
     // With the fix to make sure SBN no longer marks excess replica
     // during recommission, SBN's decommission can finish properly
-    decommissionNode(0, firstDN.getDatanodeUuid(), null,
+    takeNodeOutofService(0, firstDN.getDatanodeUuid(), 0, null,
         AdminStates.DECOMMISSIONED);
 
     // Ask SBN to decomm the first DN
-    decommissionNode(1, firstDN.getDatanodeUuid(), null,
+    takeNodeOutofService(1, firstDN.getDatanodeUuid(), 0, null,
         AdminStates.DECOMMISSIONED);
-
-    cluster.shutdown();
-
   }
 
   private void testDecommission(int numNamenodes, int numDatanodes)
       throws IOException {
     LOG.info("Starting test testDecommission");
-    startCluster(numNamenodes, numDatanodes, conf);
+    startCluster(numNamenodes, numDatanodes);
     
     ArrayList<ArrayList<DatanodeInfo>> namenodeDecomList = 
       new ArrayList<ArrayList<DatanodeInfo>>(numNamenodes);
@@ -577,8 +362,8 @@ public class TestDecommission {
       // Start decommissioning one namenode at a time
       for (int i = 0; i < numNamenodes; i++) {
         ArrayList<DatanodeInfo> decommissionedNodes = namenodeDecomList.get(i);
-        FileSystem fileSys = cluster.getFileSystem(i);
-        FSNamesystem ns = cluster.getNamesystem(i);
+        FileSystem fileSys = getCluster().getFileSystem(i);
+        FSNamesystem ns = getCluster().getNamesystem(i);
 
         writeFile(fileSys, file1, replicas);
 
@@ -586,14 +371,14 @@ public class TestDecommission {
         int liveDecomissioned = ns.getNumDecomLiveDataNodes();
 
         // Decommission one node. Verify that node is decommissioned.
-        DatanodeInfo decomNode = decommissionNode(i, null, decommissionedNodes,
-            AdminStates.DECOMMISSIONED);
+        DatanodeInfo decomNode = takeNodeOutofService(i, null, 0,
+            decommissionedNodes, AdminStates.DECOMMISSIONED);
         decommissionedNodes.add(decomNode);
         assertEquals(deadDecomissioned, ns.getNumDecomDeadDataNodes());
         assertEquals(liveDecomissioned + 1, ns.getNumDecomLiveDataNodes());
 
         // Ensure decommissioned datanode is not automatically shutdown
-        DFSClient client = getDfsClient(cluster.getNameNode(i), conf);
+        DFSClient client = getDfsClient(i);
         assertEquals("All datanodes must be alive", numDatanodes, 
             client.datanodeReport(DatanodeReportType.LIVE).length);
         // wait for the block to be replicated
@@ -616,9 +401,8 @@ public class TestDecommission {
 
     // Restart the cluster and ensure decommissioned datanodes
     // are allowed to register with the namenode
-    cluster.shutdown();
-    startCluster(numNamenodes, numDatanodes, conf);
-    cluster.shutdown();
+    shutdownCluster();
+    startCluster(numNamenodes, numDatanodes);
   }
 
   /**
@@ -630,13 +414,13 @@ public class TestDecommission {
     try {
       LOG.info("Starting test testRecommission");
 
-      startCluster(1, numDatanodes, conf);
+      startCluster(1, numDatanodes);
 
       final Path file1 = new Path("testDecommission.dat");
       final int replicas = numDatanodes - 1;
 
       ArrayList<DatanodeInfo> decommissionedNodes = Lists.newArrayList();
-      final FileSystem fileSys = cluster.getFileSystem();
+      final FileSystem fileSys = getCluster().getFileSystem();
 
       // Write a file to n-1 datanodes
       writeFile(fileSys, file1, replicas);
@@ -647,25 +431,24 @@ public class TestDecommission {
           replicas, loc.getHosts().length);
       final String toDecomHost = loc.getNames()[0];
       String toDecomUuid = null;
-      for (DataNode d : cluster.getDataNodes()) {
+      for (DataNode d : getCluster().getDataNodes()) {
         if (d.getDatanodeId().getXferAddr().equals(toDecomHost)) {
           toDecomUuid = d.getDatanodeId().getDatanodeUuid();
           break;
         }
       }
       assertNotNull("Could not find a dn with the block!", toDecomUuid);
-      final DatanodeInfo decomNode =
-          decommissionNode(0, toDecomUuid, decommissionedNodes,
-              AdminStates.DECOMMISSIONED);
+      final DatanodeInfo decomNode = takeNodeOutofService(0, toDecomUuid,
+          0, decommissionedNodes, AdminStates.DECOMMISSIONED);
       decommissionedNodes.add(decomNode);
       final BlockManager blockManager =
-          cluster.getNamesystem().getBlockManager();
+          getCluster().getNamesystem().getBlockManager();
       final DatanodeManager datanodeManager =
           blockManager.getDatanodeManager();
       BlockManagerTestUtil.recheckDecommissionState(datanodeManager);
 
       // Ensure decommissioned datanode is not automatically shutdown
-      DFSClient client = getDfsClient(cluster.getNameNode(), conf);
+      DFSClient client = getDfsClient(0);
       assertEquals("All datanodes must be alive", numDatanodes,
           client.datanodeReport(DatanodeReportType.LIVE).length);
 
@@ -692,15 +475,13 @@ public class TestDecommission {
       }, 500, 30000);
 
       // redecommission and wait for over-replication to be fixed
-      recommissionNode(0, decomNode);
+      putNodeInService(0, decomNode);
       BlockManagerTestUtil.recheckDecommissionState(datanodeManager);
-      DFSTestUtil.waitForReplication(cluster, b, 1, replicas, 0);
+      DFSTestUtil.waitForReplication(getCluster(), b, 1, replicas, 0);
 
       cleanupFile(fileSys, file1);
     } finally {
-      if (cluster != null) {
-        cluster.shutdown();
-      }
+      shutdownCluster();
     }
   }
   
@@ -726,35 +507,33 @@ public class TestDecommission {
       InterruptedException {
     LOG.info("Starting test testClusterStats");
     int numDatanodes = 1;
-    startCluster(numNameNodes, numDatanodes, conf);
+    startCluster(numNameNodes, numDatanodes);
     
     for (int i = 0; i < numNameNodes; i++) {
-      FileSystem fileSys = cluster.getFileSystem(i);
+      FileSystem fileSys = getCluster().getFileSystem(i);
       Path file = new Path("testClusterStats.dat");
       writeFile(fileSys, file, 1);
       
-      FSNamesystem fsn = cluster.getNamesystem(i);
-      NameNode namenode = cluster.getNameNode(i);
+      FSNamesystem fsn = getCluster().getNamesystem(i);
+      NameNode namenode = getCluster().getNameNode(i);
       
-      DatanodeInfo decomInfo = decommissionNode(i, null, null,
+      DatanodeInfo decomInfo = takeNodeOutofService(i, null, 0, null,
           AdminStates.DECOMMISSION_INPROGRESS);
       DataNode decomNode = getDataNode(decomInfo);
       // Check namenode stats for multiple datanode heartbeats
       verifyStats(namenode, fsn, decomInfo, decomNode, true);
       
       // Stop decommissioning and verify stats
-      writeConfigFile(excludeFile, null);
-      refreshNodes(fsn, conf);
       DatanodeInfo retInfo = NameNodeAdapter.getDatanode(fsn, decomInfo);
+      putNodeInService(i, retInfo);
       DataNode retNode = getDataNode(decomInfo);
-      waitNodeState(retInfo, AdminStates.NORMAL);
       verifyStats(namenode, fsn, retInfo, retNode, false);
     }
   }
 
   private DataNode getDataNode(DatanodeInfo decomInfo) {
     DataNode decomNode = null;
-    for (DataNode dn: cluster.getDataNodes()) {
+    for (DataNode dn: getCluster().getDataNodes()) {
       if (decomInfo.equals(dn.getDatanodeId())) {
         decomNode = dn;
         break;
@@ -789,22 +568,16 @@ public class TestDecommission {
   public void testHostsFile(int numNameNodes) throws IOException,
       InterruptedException {
     int numDatanodes = 1;
-    cluster = new MiniDFSCluster.Builder(conf)
-        .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(numNameNodes))
-        .numDataNodes(numDatanodes).setupHostsFile(true).build();
-    cluster.waitActive();
-    
+    startCluster(numNameNodes, numDatanodes, true, null, false);
+
     // Now empty hosts file and ensure the datanode is disallowed
     // from talking to namenode, resulting in it's shutdown.
-    ArrayList<String>list = new ArrayList<String>();
     final String bogusIp = "127.0.30.1";
-    list.add(bogusIp);
-    writeConfigFile(hostsFile, list);
-    
+    initIncludeHost(bogusIp);
+
     for (int j = 0; j < numNameNodes; j++) {
-      refreshNodes(cluster.getNamesystem(j), conf);
-      
-      DFSClient client = getDfsClient(cluster.getNameNode(j), conf);
+      refreshNodes(j);
+      DFSClient client = getDfsClient(j);
       DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
       for (int i = 0 ; i < 5 && info.length != 0; i++) {
         LOG.info("Waiting for datanode to be marked dead");
@@ -828,19 +601,20 @@ public class TestDecommission {
     LOG.info("Starting test testDecommissionWithOpenfile");
     
     //At most 4 nodes will be decommissioned
-    startCluster(1, 7, conf);
+    startCluster(1, 7);
         
-    FileSystem fileSys = cluster.getFileSystem(0);
-    FSNamesystem ns = cluster.getNamesystem(0);
-    
+    FileSystem fileSys = getCluster().getFileSystem(0);
+    FSNamesystem ns = getCluster().getNamesystem(0);
+
     String openFile = "/testDecommissionWithOpenfile.dat";
            
     writeFile(fileSys, new Path(openFile), (short)3);   
     // make sure the file was open for write
     FSDataOutputStream fdos =  fileSys.append(new Path(openFile)); 
     
-    LocatedBlocks lbs = NameNodeAdapter.getBlockLocations(cluster.getNameNode(0), openFile, 0, fileSize);
-              
+    LocatedBlocks lbs = NameNodeAdapter.getBlockLocations(
+        getCluster().getNameNode(0), openFile, 0, fileSize);
+
     DatanodeInfo[] dnInfos4LastBlock = lbs.getLastLocatedBlock().getLocations();
     DatanodeInfo[] dnInfos4FirstBlock = lbs.get(0).getLocations();
     
@@ -863,12 +637,12 @@ public class TestDecommission {
     //decommission one of the 3 nodes which have last block
     nodes.add(dnInfos4LastBlock[0].getXferAddr());
     dnInfos.add(dm.getDatanode(dnInfos4LastBlock[0]));
-    
-    writeConfigFile(excludeFile, nodes);
-    refreshNodes(ns, conf);  
+
+    initExcludeHosts(nodes);
+    refreshNodes(0);
     for (DatanodeInfo dn : dnInfos) {
       waitNodeState(dn, AdminStates.DECOMMISSIONED);
-    }           
+    }
 
     fdos.close();
   }
@@ -882,31 +656,32 @@ public class TestDecommission {
     int numNamenodes = 1;
     int numDatanodes = 1;
     int replicas = 1;
-    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,
+    getConf().setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,
         DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_DEFAULT);
-    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY, 5);
+    getConf().setLong(DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY, 5);
 
-    startCluster(numNamenodes, numDatanodes, conf);
+    startCluster(numNamenodes, numDatanodes);
     Path file1 = new Path("testDecommissionWithNamenodeRestart.dat");
-    FileSystem fileSys = cluster.getFileSystem();
+    FileSystem fileSys = getCluster().getFileSystem();
     writeFile(fileSys, file1, replicas);
         
-    DFSClient client = getDfsClient(cluster.getNameNode(), conf);
+    DFSClient client = getDfsClient(0);
     DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
     DatanodeID excludedDatanodeID = info[0];
     String excludedDatanodeName = info[0].getXferAddr();
 
-    writeConfigFile(excludeFile, new ArrayList<String>(Arrays.asList(excludedDatanodeName)));
+    initExcludeHost(excludedDatanodeName);
 
     //Add a new datanode to cluster
-    cluster.startDataNodes(conf, 1, true, null, null, null, null);
+    getCluster().startDataNodes(getConf(), 1, true, null, null, null, null);
     numDatanodes+=1;
 
-    assertEquals("Number of datanodes should be 2 ", 2, cluster.getDataNodes().size());
+    assertEquals("Number of datanodes should be 2 ", 2,
+        getCluster().getDataNodes().size());
     //Restart the namenode
-    cluster.restartNameNode();
+    getCluster().restartNameNode();
     DatanodeInfo datanodeInfo = NameNodeAdapter.getDatanode(
-        cluster.getNamesystem(), excludedDatanodeID);
+        getCluster().getNamesystem(), excludedDatanodeID);
     waitNodeState(datanodeInfo, AdminStates.DECOMMISSIONED);
 
     // Ensure decommissioned datanode is not automatically shutdown
@@ -919,9 +694,8 @@ public class TestDecommission {
     cleanupFile(fileSys, file1);
     // Restart the cluster and ensure recommissioned datanodes
     // are allowed to register with the namenode
-    cluster.shutdown();
-    startCluster(numNamenodes, numDatanodes, conf);
-    cluster.shutdown();
+    shutdownCluster();
+    startCluster(numNamenodes, numDatanodes);
   }
 
   /**
@@ -933,30 +707,30 @@ public class TestDecommission {
     int numNamenodes = 1;
     int numDatanodes = 2;
 
-    startCluster(numNamenodes, numDatanodes, conf);
+    startCluster(numNamenodes, numDatanodes);
 
-    DFSClient client = getDfsClient(cluster.getNameNode(), conf);
+    DFSClient client = getDfsClient(0);
     DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
     DatanodeInfo excludedDatanode = info[0];
     String excludedDatanodeName = info[0].getXferAddr();
 
-    writeConfigFile(hostsFile, new ArrayList<String>(Arrays.asList(
-        excludedDatanodeName, info[1].getXferAddr())));
-    decommissionNode(0, excludedDatanode.getDatanodeUuid(), null,
+    List<String> hosts = new ArrayList<String>(Arrays.asList(
+        excludedDatanodeName, info[1].getXferAddr()));
+    initIncludeHosts(hosts.toArray(new String[hosts.size()]));
+    takeNodeOutofService(0, excludedDatanode.getDatanodeUuid(), 0, null,
         AdminStates.DECOMMISSIONED);
 
-    cluster.stopDataNode(excludedDatanodeName);
+    getCluster().stopDataNode(excludedDatanodeName);
     DFSTestUtil.waitForDatanodeState(
-        cluster, excludedDatanode.getDatanodeUuid(), false, 20000);
+        getCluster(), excludedDatanode.getDatanodeUuid(), false, 20000);
 
     //Restart the namenode
-    cluster.restartNameNode();
+    getCluster().restartNameNode();
 
     assertEquals("There should be one node alive", 1,
         client.datanodeReport(DatanodeReportType.LIVE).length);
     assertEquals("There should be one node dead", 1,
         client.datanodeReport(DatanodeReportType.DEAD).length);
-    cluster.shutdown();
   }
 
   /**
@@ -976,7 +750,6 @@ public class TestDecommission {
   @Ignore
   @Test(timeout=360000)
   public void testIncludeByRegistrationName() throws Exception {
-    Configuration hdfsConf = new Configuration(conf);
     // Any IPv4 address starting with 127 functions as a "loopback" address
     // which is connected to the current host.  So by choosing 127.0.0.100
     // as our registration name, we have chosen a name which is also a valid
@@ -985,26 +758,21 @@ public class TestDecommission {
     // to deal with DNS in this test.
     final String registrationName = "127.0.0.100";
     final String nonExistentDn = "127.0.0.10";
-    hdfsConf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, registrationName);
-    cluster = new MiniDFSCluster.Builder(hdfsConf)
-        .numDataNodes(1).checkDataNodeHostConfig(true)
-        .setupHostsFile(true).build();
-    cluster.waitActive();
+    getConf().set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, registrationName);
+    startCluster(1, 1, false, null, true);
 
     // Set up an includes file that doesn't have our datanode.
-    ArrayList<String> nodes = new ArrayList<String>();
-    nodes.add(nonExistentDn);
-    writeConfigFile(hostsFile,  nodes);
-    refreshNodes(cluster.getNamesystem(0), hdfsConf);
+    initIncludeHost(nonExistentDn);
+    refreshNodes(0);
 
     // Wait for the DN to be marked dead.
     LOG.info("Waiting for DN to be marked as dead.");
-    final DFSClient client = getDfsClient(cluster.getNameNode(0), hdfsConf);
+    final DFSClient client = getDfsClient(0);
     GenericTestUtils.waitFor(new Supplier<Boolean>() {
       @Override
       public Boolean get() {
         BlockManagerTestUtil
-            .checkHeartbeat(cluster.getNamesystem().getBlockManager());
+            .checkHeartbeat(getCluster().getNamesystem().getBlockManager());
         try {
           DatanodeInfo info[] = client.datanodeReport(DatanodeReportType.DEAD);
           return info.length == 1;
@@ -1017,13 +785,11 @@ public class TestDecommission {
 
     // Use a non-empty include file with our registration name.
     // It should work.
-    int dnPort = cluster.getDataNodes().get(0).getXferPort();
-    nodes = new ArrayList<String>();
-    nodes.add(registrationName + ":" + dnPort);
-    writeConfigFile(hostsFile,  nodes);
-    refreshNodes(cluster.getNamesystem(0), hdfsConf);
-    cluster.restartDataNode(0);
-    cluster.triggerHeartbeats();
+    int dnPort = getCluster().getDataNodes().get(0).getXferPort();
+    initIncludeHost(registrationName + ":" + dnPort);
+    refreshNodes(0);
+    getCluster().restartDataNode(0);
+    getCluster().triggerHeartbeats();
 
     // Wait for the DN to come back.
     LOG.info("Waiting for DN to come back.");
@@ -1031,7 +797,7 @@ public class TestDecommission {
       @Override
       public Boolean get() {
         BlockManagerTestUtil
-            .checkHeartbeat(cluster.getNamesystem().getBlockManager());
+            .checkHeartbeat(getCluster().getNamesystem().getBlockManager());
         try {
           DatanodeInfo info[] = client.datanodeReport(DatanodeReportType.LIVE);
           if (info.length == 1) {
@@ -1050,20 +816,19 @@ public class TestDecommission {
   
   @Test(timeout=120000)
   public void testBlocksPerInterval() throws Exception {
-    Configuration newConf = new Configuration(conf);
     org.apache.log4j.Logger.getLogger(DecommissionManager.class)
         .setLevel(Level.TRACE);
     // Turn the blocks per interval way down
-    newConf.setInt(
+    getConf().setInt(
         DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_KEY,
         3);
     // Disable the normal monitor runs
-    newConf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY,
+    getConf().setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY,
         Integer.MAX_VALUE);
-    startCluster(1, 3, newConf);
-    final FileSystem fs = cluster.getFileSystem();
+    startCluster(1, 3);
+    final FileSystem fs = getCluster().getFileSystem();
     final DatanodeManager datanodeManager =
-        cluster.getNamesystem().getBlockManager().getDatanodeManager();
+        getCluster().getNamesystem().getBlockManager().getDatanodeManager();
     final DecommissionManager decomManager = datanodeManager.getDecomManager();
 
     // Write a 3 block file, so each node has one block. Should scan 3 nodes.
@@ -1085,10 +850,9 @@ public class TestDecommission {
       throws IOException, ExecutionException, InterruptedException {
     // Decom all nodes
     ArrayList<DatanodeInfo> decommissionedNodes = Lists.newArrayList();
-    for (DataNode d: cluster.getDataNodes()) {
-      DatanodeInfo dn = decommissionNode(0, d.getDatanodeUuid(),
-          decommissionedNodes,
-          AdminStates.DECOMMISSION_INPROGRESS);
+    for (DataNode d: getCluster().getDataNodes()) {
+      DatanodeInfo dn = takeNodeOutofService(0, d.getDatanodeUuid(), 0,
+          decommissionedNodes, AdminStates.DECOMMISSION_INPROGRESS);
       decommissionedNodes.add(dn);
     }
     // Run decom scan and check
@@ -1097,26 +861,25 @@ public class TestDecommission {
         decomManager.getNumNodesChecked());
     // Recommission all nodes
     for (DatanodeInfo dn : decommissionedNodes) {
-      recommissionNode(0, dn);
+      putNodeInService(0, dn);
     }
   }
 
   @Test(timeout=120000)
   public void testPendingNodes() throws Exception {
-    Configuration newConf = new Configuration(conf);
     org.apache.log4j.Logger.getLogger(DecommissionManager.class)
         .setLevel(Level.TRACE);
     // Only allow one node to be decom'd at a time
-    newConf.setInt(
+    getConf().setInt(
         DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_MAX_CONCURRENT_TRACKED_NODES,
         1);
     // Disable the normal monitor runs
-    newConf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 
+    getConf().setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY,
         Integer.MAX_VALUE);
-    startCluster(1, 3, newConf);
-    final FileSystem fs = cluster.getFileSystem();
+    startCluster(1, 3);
+    final FileSystem fs = getCluster().getFileSystem();
     final DatanodeManager datanodeManager =
-        cluster.getNamesystem().getBlockManager().getDatanodeManager();
+        getCluster().getNamesystem().getBlockManager().getDatanodeManager();
     final DecommissionManager decomManager = datanodeManager.getDecomManager();
 
     // Keep a file open to prevent decom from progressing
@@ -1125,16 +888,15 @@ public class TestDecommission {
     // Flush and trigger block reports so the block definitely shows up on NN
     open1.write(123);
     open1.hflush();
-    for (DataNode d: cluster.getDataNodes()) {
+    for (DataNode d: getCluster().getDataNodes()) {
       DataNodeTestUtils.triggerBlockReport(d);
     }
     // Decom two nodes, so one is still alive
     ArrayList<DatanodeInfo> decommissionedNodes = Lists.newArrayList();
     for (int i=0; i<2; i++) {
-      final DataNode d = cluster.getDataNodes().get(i);
-      DatanodeInfo dn = decommissionNode(0, d.getDatanodeUuid(), 
-          decommissionedNodes, 
-          AdminStates.DECOMMISSION_INPROGRESS);
+      final DataNode d = getCluster().getDataNodes().get(i);
+      DatanodeInfo dn = takeNodeOutofService(0, d.getDatanodeUuid(), 0,
+          decommissionedNodes, AdminStates.DECOMMISSION_INPROGRESS);
       decommissionedNodes.add(dn);
     }
 
@@ -1145,10 +907,9 @@ public class TestDecommission {
 
     // Close file, try to decom the last node, should get stuck in tracked
     open1.close();
-    final DataNode d = cluster.getDataNodes().get(2);
-    DatanodeInfo dn = decommissionNode(0, d.getDatanodeUuid(),
-        decommissionedNodes,
-        AdminStates.DECOMMISSION_INPROGRESS);
+    final DataNode d = getCluster().getDataNodes().get(2);
+    DatanodeInfo dn = takeNodeOutofService(0, d.getDatanodeUuid(), 0,
+        decommissionedNodes, AdminStates.DECOMMISSION_INPROGRESS);
     decommissionedNodes.add(dn);
     BlockManagerTestUtil.recheckDecommissionState(datanodeManager);
     
@@ -1171,16 +932,11 @@ public class TestDecommission {
    */
   @Test
   public void testCountOnDecommissionedNodeList() throws IOException{
-    conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
-    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1);
+    getConf().setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
+    getConf().setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
+        1);
     try {
-      cluster =
-          new MiniDFSCluster.Builder(conf)
-              .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(1))
-              .numDataNodes(1).build();
-      cluster.waitActive();
-      DFSClient client = getDfsClient(cluster.getNameNode(0), conf);
-      validateCluster(client, 1);
+      startCluster(1, 1);
 
       ArrayList<ArrayList<DatanodeInfo>> namenodeDecomList =
           new ArrayList<ArrayList<DatanodeInfo>>(1);
@@ -1188,10 +944,10 @@ public class TestDecommission {
 
       // Move datanode1 to Decommissioned state
       ArrayList<DatanodeInfo> decommissionedNode = namenodeDecomList.get(0);
-      decommissionNode(0, null,
-          decommissionedNode, AdminStates.DECOMMISSIONED);
+      takeNodeOutofService(0, null, 0, decommissionedNode,
+          AdminStates.DECOMMISSIONED);
 
-      FSNamesystem ns = cluster.getNamesystem(0);
+      FSNamesystem ns = getCluster().getNamesystem(0);
       DatanodeManager datanodeManager =
           ns.getBlockManager().getDatanodeManager();
       List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
@@ -1202,7 +958,7 @@ public class TestDecommission {
       datanodeManager.fetchDatanodes(live, null, true);
       assertTrue(0==live.size());
     }finally {
-      cluster.shutdown();
+      shutdownCluster();
     }
   }
 
@@ -1235,21 +991,15 @@ public class TestDecommission {
     Map<String, Map<String, String>> usage = null;
     DatanodeInfo decommissionedNodeInfo = null;
     String zeroNodeUsage = "0.00%";
-    conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
-    conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
-    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1);
+    getConf().setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
+    getConf().setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
+    getConf().setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
+        1);
     FileSystem fileSys = null;
     Path file1 = new Path("testNodeUsage.dat");
     try {
-      SimulatedFSDataset.setFactory(conf);
-      cluster =
-          new MiniDFSCluster.Builder(conf)
-              .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(1))
-              .numDataNodes(numDatanodes)
-              .simulatedCapacities(nodesCapacity).build();
-      cluster.waitActive();
-      DFSClient client = getDfsClient(cluster.getNameNode(0), conf);
-      validateCluster(client, numDatanodes);
+      SimulatedFSDataset.setFactory(getConf());
+      startCluster(1, numDatanodes, false, nodesCapacity, false);
 
       ArrayList<ArrayList<DatanodeInfo>> namenodeDecomList =
           new ArrayList<ArrayList<DatanodeInfo>>(1);
@@ -1258,12 +1008,12 @@ public class TestDecommission {
       if (decommissionState == AdminStates.DECOMMISSIONED) {
         // Move datanode1 to Decommissioned state
         ArrayList<DatanodeInfo> decommissionedNode = namenodeDecomList.get(0);
-        decommissionedNodeInfo = decommissionNode(0, null,
+        decommissionedNodeInfo = takeNodeOutofService(0, null, 0,
             decommissionedNode, decommissionState);
       }
       // Write a file(replica 1).Hence will be written to only one live node.
-      fileSys = cluster.getFileSystem(0);
-      FSNamesystem ns = cluster.getNamesystem(0);
+      fileSys = getCluster().getFileSystem(0);
+      FSNamesystem ns = getCluster().getNamesystem(0);
       writeFile(fileSys, file1, 1);
       Thread.sleep(2000);
 
@@ -1276,7 +1026,7 @@ public class TestDecommission {
         // Start decommissioning datanode
         ArrayList<DatanodeInfo> decommissioningNodes = namenodeDecomList.
             get(0);
-        decommissionedNodeInfo = decommissionNode(0, null,
+        decommissionedNodeInfo = takeNodeOutofService(0, null, 0,
             decommissioningNodes, decommissionState);
         // NodeUsage should not include DECOMMISSION_INPROGRESS node
         // (minUsage should be 0.00%)
@@ -1286,7 +1036,7 @@ public class TestDecommission {
             equalsIgnoreCase(zeroNodeUsage));
       }
       // Recommission node
-      recommissionNode(0, decommissionedNodeInfo);
+      putNodeInService(0, decommissionedNodeInfo);
 
       usage = (Map<String, Map<String, String>>) JSON.parse(ns.getNodeUsage());
       String nodeusageAfterRecommi =
@@ -1297,7 +1047,6 @@ public class TestDecommission {
           equalsIgnoreCase(nodeusageAfterRecommi));
     } finally {
       cleanupFile(fileSys, file1);
-      cluster.shutdown();
     }
   }
 
@@ -1306,9 +1055,8 @@ public class TestDecommission {
     int numNamenodes = 1;
     int numDatanodes = 2;
 
-    startCluster(numNamenodes,numDatanodes,conf);
-    cluster.waitActive();
-    FSNamesystem ns = cluster.getNamesystem(0);
+    startCluster(numNamenodes, numDatanodes);
+    FSNamesystem ns = getCluster().getNamesystem(0);
     BlockManager blockManager = ns.getBlockManager();
     DatanodeStatistics datanodeStatistics = blockManager.getDatanodeManager()
         .getDatanodeStatistics();
@@ -1318,11 +1066,11 @@ public class TestDecommission {
     long initialBlockPoolUsed = datanodeStatistics.getBlockPoolUsed();
     ArrayList<ArrayList<DatanodeInfo>> namenodeDecomList =
         new ArrayList<ArrayList<DatanodeInfo>>(numNamenodes);
-    namenodeDecomList.add(0, new ArrayList<DatanodeInfo>(numDatanodes));
+    namenodeDecomList.add(0, new ArrayList<>(numDatanodes));
     ArrayList<DatanodeInfo> decommissionedNodes = namenodeDecomList.get(0);
     //decommission one node
-    DatanodeInfo decomNode = decommissionNode(0, null, decommissionedNodes,
-        AdminStates.DECOMMISSIONED);
+    DatanodeInfo decomNode = takeNodeOutofService(0, null, 0,
+        decommissionedNodes, AdminStates.DECOMMISSIONED);
     decommissionedNodes.add(decomNode);
     long newUsedCapacity = datanodeStatistics.getCapacityUsed();
     long newTotalCapacity = datanodeStatistics.getCapacityTotal();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9dcbdbdb/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java
new file mode 100644
index 0000000..63617ad
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java
@@ -0,0 +1,310 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.Collection;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.util.Time;
+import org.junit.Test;
+
+/**
+ * This class tests node maintenance.
+ */
+public class TestMaintenanceState extends AdminStatesBaseTest {
+  public static final Log LOG = LogFactory.getLog(TestMaintenanceState.class);
+  static private final long EXPIRATION_IN_MS = 500;
+
+  public TestMaintenanceState() {
+    setUseCombinedHostFileManager();
+  }
+
+  /**
+   * Verify a node can transition from AdminStates.ENTERING_MAINTENANCE to
+   * AdminStates.NORMAL.
+   */
+  @Test(timeout = 360000)
+  public void testTakeNodeOutOfEnteringMaintenance() throws Exception {
+    LOG.info("Starting testTakeNodeOutOfEnteringMaintenance");
+    final int replicas = 1;
+    final int numNamenodes = 1;
+    final int numDatanodes = 1;
+    final Path file1 = new Path("/testTakeNodeOutOfEnteringMaintenance.dat");
+
+    startCluster(numNamenodes, numDatanodes);
+
+    FileSystem fileSys = getCluster().getFileSystem(0);
+    writeFile(fileSys, file1, replicas, 1);
+
+    DatanodeInfo nodeOutofService = takeNodeOutofService(0,
+        null, Long.MAX_VALUE, null, AdminStates.ENTERING_MAINTENANCE);
+
+    putNodeInService(0, nodeOutofService.getDatanodeUuid());
+
+    cleanupFile(fileSys, file1);
+  }
+
+  /**
+   * Verify a AdminStates.ENTERING_MAINTENANCE node can expire and transition
+   * to AdminStates.NORMAL upon timeout.
+   */
+  @Test(timeout = 360000)
+  public void testEnteringMaintenanceExpiration() throws Exception {
+    LOG.info("Starting testEnteringMaintenanceExpiration");
+    final int replicas = 1;
+    final int numNamenodes = 1;
+    final int numDatanodes = 1;
+    final Path file1 = new Path("/testTakeNodeOutOfEnteringMaintenance.dat");
+
+    startCluster(numNamenodes, numDatanodes);
+
+    FileSystem fileSys = getCluster().getFileSystem(0);
+    writeFile(fileSys, file1, replicas, 1);
+
+    // expires in 500 milliseconds
+    DatanodeInfo nodeOutofService = takeNodeOutofService(0, null,
+        Time.monotonicNow() + EXPIRATION_IN_MS, null,
+        AdminStates.ENTERING_MAINTENANCE);
+
+    waitNodeState(nodeOutofService, AdminStates.NORMAL);
+
+    cleanupFile(fileSys, file1);
+  }
+
+  /**
+   * Verify node stays in AdminStates.NORMAL with invalid expiration.
+   */
+  @Test(timeout = 360000)
+  public void testInvalidExpiration() throws Exception {
+    LOG.info("Starting testInvalidExpiration");
+    final int replicas = 1;
+    final int numNamenodes = 1;
+    final int numDatanodes = 1;
+    final Path file1 = new Path("/testTakeNodeOutOfEnteringMaintenance.dat");
+
+    startCluster(numNamenodes, numDatanodes);
+
+    FileSystem fileSys = getCluster().getFileSystem(0);
+    writeFile(fileSys, file1, replicas, 1);
+
+    // expiration has to be greater than Time.monotonicNow().
+    takeNodeOutofService(0, null, Time.monotonicNow(), null,
+        AdminStates.NORMAL);
+
+    cleanupFile(fileSys, file1);
+  }
+
+  /**
+   * When a dead node is put to maintenance, it transitions directly to
+   * AdminStates.IN_MAINTENANCE.
+   */
+  @Test(timeout = 360000)
+  public void testPutDeadNodeToMaintenance() throws Exception {
+    LOG.info("Starting testPutDeadNodeToMaintenance");
+    final int numNamenodes = 1;
+    final int numDatanodes = 1;
+    final int replicas = 1;
+    final Path file1 = new Path("/testPutDeadNodeToMaintenance.dat");
+
+    startCluster(numNamenodes, numDatanodes);
+
+    FileSystem fileSys = getCluster().getFileSystem(0);
+    FSNamesystem ns = getCluster().getNamesystem(0);
+    writeFile(fileSys, file1, replicas, 1);
+
+    MiniDFSCluster.DataNodeProperties dnProp = getCluster().stopDataNode(0);
+    DFSTestUtil.waitForDatanodeState(
+        getCluster(), dnProp.datanode.getDatanodeUuid(), false, 20000);
+
+    int deadInMaintenance = ns.getNumInMaintenanceDeadDataNodes();
+    int liveInMaintenance = ns.getNumInMaintenanceLiveDataNodes();
+
+    takeNodeOutofService(0, dnProp.datanode.getDatanodeUuid(), Long.MAX_VALUE,
+        null, AdminStates.IN_MAINTENANCE);
+
+    assertEquals(deadInMaintenance + 1, ns.getNumInMaintenanceDeadDataNodes());
+    assertEquals(liveInMaintenance, ns.getNumInMaintenanceLiveDataNodes());
+
+    cleanupFile(fileSys, file1);
+  }
+
+  /**
+   * When a dead node is put to maintenance, it transitions directly to
+   * AdminStates.IN_MAINTENANCE. Then AdminStates.IN_MAINTENANCE expires and
+   * transitions to AdminStates.NORMAL.
+   */
+  @Test(timeout = 360000)
+  public void testPutDeadNodeToMaintenanceWithExpiration() throws Exception {
+    LOG.info("Starting testPutDeadNodeToMaintenanceWithExpiration");
+    final int numNamenodes = 1;
+    final int numDatanodes = 1;
+    final int replicas = 1;
+    final Path file1 = new Path("/testPutDeadNodeToMaintenance.dat");
+
+    startCluster(numNamenodes, numDatanodes);
+
+    FileSystem fileSys = getCluster().getFileSystem(0);
+    FSNamesystem ns = getCluster().getNamesystem(0);
+    writeFile(fileSys, file1, replicas, 1);
+
+    MiniDFSCluster.DataNodeProperties dnProp = getCluster().stopDataNode(0);
+    DFSTestUtil.waitForDatanodeState(
+        getCluster(), dnProp.datanode.getDatanodeUuid(), false, 20000);
+
+    int deadInMaintenance = ns.getNumInMaintenanceDeadDataNodes();
+    int liveInMaintenance = ns.getNumInMaintenanceLiveDataNodes();
+
+    DatanodeInfo nodeOutofService = takeNodeOutofService(0,
+        dnProp.datanode.getDatanodeUuid(),
+        Time.monotonicNow() + EXPIRATION_IN_MS, null,
+        AdminStates.IN_MAINTENANCE);
+
+    waitNodeState(nodeOutofService, AdminStates.NORMAL);
+
+    // no change
+    assertEquals(deadInMaintenance, ns.getNumInMaintenanceDeadDataNodes());
+    assertEquals(liveInMaintenance, ns.getNumInMaintenanceLiveDataNodes());
+
+    cleanupFile(fileSys, file1);
+  }
+
+  /**
+   * Transition from decommissioned state to maintenance state.
+   */
+  @Test(timeout = 360000)
+  public void testTransitionFromDecommissioned() throws IOException {
+    LOG.info("Starting testTransitionFromDecommissioned");
+    final int numNamenodes = 1;
+    final int numDatanodes = 4;
+    final int replicas = 3;
+    final Path file1 = new Path("/testTransitionFromDecommissioned.dat");
+
+    startCluster(numNamenodes, numDatanodes);
+
+    FileSystem fileSys = getCluster().getFileSystem(0);
+    writeFile(fileSys, file1, replicas, 1);
+
+    DatanodeInfo nodeOutofService = takeNodeOutofService(0, null, 0, null,
+        AdminStates.DECOMMISSIONED);
+
+    takeNodeOutofService(0, nodeOutofService.getDatanodeUuid(), Long.MAX_VALUE,
+        null, AdminStates.IN_MAINTENANCE);
+
+    cleanupFile(fileSys, file1);
+  }
+
+  /**
+   * Transition from decommissioned state to maintenance state.
+   * After the maintenance state expires, it is transitioned to NORMAL.
+   */
+  @Test(timeout = 360000)
+  public void testTransitionFromDecommissionedAndExpired() throws IOException {
+    LOG.info("Starting testTransitionFromDecommissionedAndExpired");
+    final int numNamenodes = 1;
+    final int numDatanodes = 4;
+    final int replicas = 3;
+    final Path file1 = new Path("/testTransitionFromDecommissioned.dat");
+
+    startCluster(numNamenodes, numDatanodes);
+
+    FileSystem fileSys = getCluster().getFileSystem(0);
+    writeFile(fileSys, file1, replicas, 1);
+
+    DatanodeInfo nodeOutofService = takeNodeOutofService(0, null, 0, null,
+        AdminStates.DECOMMISSIONED);
+
+    takeNodeOutofService(0, nodeOutofService.getDatanodeUuid(),
+        Time.monotonicNow() + EXPIRATION_IN_MS, null,
+        AdminStates.IN_MAINTENANCE);
+
+    waitNodeState(nodeOutofService, AdminStates.NORMAL);
+
+    cleanupFile(fileSys, file1);
+  }
+
+  /**
+   * When a node is put to maintenance, it first transitions to
+   * AdminStates.ENTERING_MAINTENANCE. It makes sure all blocks have minimal
+   * replication before it can be transitioned to AdminStates.IN_MAINTENANCE.
+   * If node becomes dead when it is in AdminStates.ENTERING_MAINTENANCE, admin
+   * state should stay in AdminStates.ENTERING_MAINTENANCE state.
+   */
+  @Test(timeout = 360000)
+  public void testNodeDeadWhenInEnteringMaintenance() throws Exception {
+    LOG.info("Starting testNodeDeadWhenInEnteringMaintenance");
+    final int numNamenodes = 1;
+    final int numDatanodes = 1;
+    final int replicas = 1;
+    final Path file1 = new Path("/testNodeDeadWhenInEnteringMaintenance.dat");
+
+    startCluster(numNamenodes, numDatanodes);
+
+    FileSystem fileSys = getCluster().getFileSystem(0);
+    FSNamesystem ns = getCluster().getNamesystem(0);
+    writeFile(fileSys, file1, replicas, 1);
+
+    DatanodeInfo nodeOutofService = takeNodeOutofService(0,
+        getFirstBlockFirstReplicaUuid(fileSys, file1), Long.MAX_VALUE, null,
+        AdminStates.ENTERING_MAINTENANCE);
+    assertEquals(1, ns.getNumEnteringMaintenanceDataNodes());
+
+    MiniDFSCluster.DataNodeProperties dnProp =
+        getCluster().stopDataNode(nodeOutofService.getXferAddr());
+    DFSTestUtil.waitForDatanodeState(
+        getCluster(), nodeOutofService.getDatanodeUuid(), false, 20000);
+    DFSClient client = getDfsClient(0);
+    assertEquals("maintenance node shouldn't be alive", numDatanodes - 1,
+        client.datanodeReport(DatanodeReportType.LIVE).length);
+
+    getCluster().restartDataNode(dnProp, true);
+    getCluster().waitActive();
+    waitNodeState(nodeOutofService, AdminStates.ENTERING_MAINTENANCE);
+    assertEquals(1, ns.getNumEnteringMaintenanceDataNodes());
+
+    cleanupFile(fileSys, file1);
+  }
+
+  static protected String getFirstBlockFirstReplicaUuid(FileSystem fileSys,
+      Path name) throws IOException {
+    // need a raw stream
+    assertTrue("Not HDFS:"+fileSys.getUri(),
+        fileSys instanceof DistributedFileSystem);
+    HdfsDataInputStream dis = (HdfsDataInputStream)fileSys.open(name);
+    Collection<LocatedBlock> dinfo = dis.getAllBlocks();
+    for (LocatedBlock blk : dinfo) { // for each block
+      DatanodeInfo[] nodes = blk.getLocations();
+      if (nodes.length > 0) {
+        return nodes[0].getDatanodeUuid();
+      }
+    }
+    return null;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9dcbdbdb/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
index 7c39bf8..6bb6040 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
@@ -158,7 +158,7 @@ public class TestDecommissioningStatus {
     // write nodename into the exclude file.
     ArrayList<String> nodes = new ArrayList<String>(decommissionedNodes);
     nodes.add(dnName);
-    hostsFileWriter.initExcludeHosts(nodes.toArray(new String[0]));
+    hostsFileWriter.initExcludeHosts(nodes);
   }
 
   private void checkDecommissionStatus(DatanodeDescriptor decommNode,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9dcbdbdb/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/HostsFileWriter.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/HostsFileWriter.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/HostsFileWriter.java
index 2ef0b8f..4c8fcef 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/HostsFileWriter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/HostsFileWriter.java
@@ -20,8 +20,11 @@ package org.apache.hadoop.hdfs.util;
 
 import java.io.File;
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
 
 
 import org.apache.commons.io.FileUtils;
@@ -73,30 +76,60 @@ public class HostsFileWriter {
   }
 
   public void initExcludeHost(String hostNameAndPort) throws IOException {
-    initExcludeHosts(hostNameAndPort);
+    ArrayList<String> nodes = new ArrayList<>();
+    nodes.add(hostNameAndPort);
+    initExcludeHosts(nodes);
   }
 
-  public void initExcludeHosts(String... hostNameAndPorts) throws IOException {
+  public void initExcludeHosts(List<String> hostNameAndPorts)
+      throws IOException {
+    initOutOfServiceHosts(hostNameAndPorts, null);
+  }
+
+  public void initOutOfServiceHosts(List<String> decommissionHostNameAndPorts,
+      Map<String, Long> maintenanceHosts) throws IOException {
     StringBuilder excludeHosts = new StringBuilder();
     if (isLegacyHostsFile) {
-      for (String hostNameAndPort : hostNameAndPorts) {
+      if (maintenanceHosts != null && maintenanceHosts.size() > 0) {
+        throw new UnsupportedOperationException(
+            "maintenance support isn't supported by legacy hosts file");
+      }
+      for (String hostNameAndPort : decommissionHostNameAndPorts) {
         excludeHosts.append(hostNameAndPort).append("\n");
       }
-      DFSTestUtil.writeFile(localFileSys, excludeFile, excludeHosts.toString());
+      DFSTestUtil.writeFile(localFileSys, excludeFile,
+          excludeHosts.toString());
     } else {
       HashSet<DatanodeAdminProperties> allDNs = new HashSet<>();
-      for (String hostNameAndPort : hostNameAndPorts) {
-        DatanodeAdminProperties dn = new DatanodeAdminProperties();
-        String[] hostAndPort = hostNameAndPort.split(":");
-        dn.setHostName(hostAndPort[0]);
-        dn.setPort(Integer.parseInt(hostAndPort[1]));
-        dn.setAdminState(AdminStates.DECOMMISSIONED);
-        allDNs.add(dn);
+      if (decommissionHostNameAndPorts != null) {
+        for (String hostNameAndPort : decommissionHostNameAndPorts) {
+          DatanodeAdminProperties dn = new DatanodeAdminProperties();
+          String[] hostAndPort = hostNameAndPort.split(":");
+          dn.setHostName(hostAndPort[0]);
+          dn.setPort(Integer.parseInt(hostAndPort[1]));
+          dn.setAdminState(AdminStates.DECOMMISSIONED);
+          allDNs.add(dn);
+        }
+      }
+      if (maintenanceHosts != null) {
+        for (Map.Entry<String, Long> hostEntry : maintenanceHosts.entrySet()) {
+          DatanodeAdminProperties dn = new DatanodeAdminProperties();
+          String[] hostAndPort = hostEntry.getKey().split(":");
+          dn.setHostName(hostAndPort[0]);
+          dn.setPort(Integer.parseInt(hostAndPort[1]));
+          dn.setAdminState(AdminStates.IN_MAINTENANCE);
+          dn.setMaintenanceExpireTimeInMS(hostEntry.getValue());
+          allDNs.add(dn);
+        }
       }
       CombinedHostsFileWriter.writeFile(combinedFile.toString(), allDNs);
     }
   }
 
+  public void initIncludeHost(String hostNameAndPort) throws IOException {
+    initIncludeHosts(new String[]{hostNameAndPort});
+  }
+
   public void initIncludeHosts(String[] hostNameAndPorts) throws IOException {
     StringBuilder includeHosts = new StringBuilder();
     if (isLegacyHostsFile) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9dcbdbdb/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestCombinedHostsFileReader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestCombinedHostsFileReader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestCombinedHostsFileReader.java
index 923cf66..b48784f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestCombinedHostsFileReader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestCombinedHostsFileReader.java
@@ -62,7 +62,7 @@ public class TestCombinedHostsFileReader {
   public void testLoadExistingJsonFile() throws Exception {
     Set<DatanodeAdminProperties> all =
         CombinedHostsFileReader.readFile(EXISTING_FILE.getAbsolutePath());
-    assertEquals(5, all.size());
+    assertEquals(7, all.size());
   }
 
   /*

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9dcbdbdb/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/dfs.hosts.json
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/dfs.hosts.json b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/dfs.hosts.json
index 64fca48..9c852e0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/dfs.hosts.json
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/dfs.hosts.json
@@ -3,3 +3,5 @@
 {"hostName": "host3", "adminState": "DECOMMISSIONED"}
 {"hostName": "host4", "upgradeDomain": "ud2", "adminState": "DECOMMISSIONED"}
 {"hostName": "host5", "port": 8090}
+{"hostName": "host6", "adminState": "IN_MAINTENANCE"}
+{"hostName": "host7", "adminState": "IN_MAINTENANCE", "maintenanceExpireTimeInMS": "112233"}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[10/19] hadoop git commit: HADOOP-13564. modify mapred to use hadoop_subcommand_opts

Posted by aw...@apache.org.
HADOOP-13564. modify mapred to use hadoop_subcommand_opts

Signed-off-by: Allen Wittenauer <aw...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/403ffae8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/403ffae8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/403ffae8

Branch: refs/heads/HADOOP-13341
Commit: 403ffae81df8ff34e2d2994b5bc7622aa3eb638e
Parents: 76c8d58
Author: Allen Wittenauer <aw...@apache.org>
Authored: Tue Aug 30 15:11:24 2016 -0700
Committer: Allen Wittenauer <aw...@apache.org>
Committed: Wed Aug 31 08:04:44 2016 -0700

----------------------------------------------------------------------
 hadoop-mapreduce-project/bin/mapred           | 11 +++++++----
 hadoop-mapreduce-project/bin/mapred-config.sh | 10 ++++++----
 hadoop-mapreduce-project/conf/mapred-env.sh   |  4 ++--
 3 files changed, 15 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/403ffae8/hadoop-mapreduce-project/bin/mapred
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/bin/mapred b/hadoop-mapreduce-project/bin/mapred
index c566d5a..fe7c56a 100755
--- a/hadoop-mapreduce-project/bin/mapred
+++ b/hadoop-mapreduce-project/bin/mapred
@@ -69,8 +69,6 @@ function mapredcmd_case
     historyserver)
       HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
       HADOOP_CLASSNAME=org.apache.hadoop.mapreduce.v2.hs.JobHistoryServer
-      hadoop_debug "Appending HADOOP_JOB_HISTORYSERVER_OPTS onto HADOOP_OPTS"
-      HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_JOB_HISTORYSERVER_OPTS}"
       if [ -n "${HADOOP_JOB_HISTORYSERVER_HEAPSIZE}" ]; then
         # shellcheck disable=SC2034
         HADOOP_HEAPSIZE_MAX="${HADOOP_JOB_HISTORYSERVER_HEAPSIZE}"
@@ -144,13 +142,18 @@ fi
 
 hadoop_add_client_opts
 
-if [[ ${HADOOP_SLAVE_MODE} = true ]]; then
-  hadoop_common_slave_mode_execute "${HADOOP_MAPRED_HOME}/bin/mapred" "${HADOOP_USER_PARAMS[@]}"
+if [[ ${HADOOP_WORKER_MODE} = true ]]; then
+  hadoop_common_worker_mode_execute "${HADOOP_MAPRED_HOME}/bin/mapred" "${HADOOP_USER_PARAMS[@]}"
   exit $?
 fi
 
+hadoop_subcommand_opts "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"
+
 if [[ "${HADOOP_SUBCMD_SECURESERVICE}" = true ]]; then
   HADOOP_SECURE_USER="${HADOOP_SUBCMD_SECUREUSER}"
+
+  hadoop_subcommand_secure_opts "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"
+
   hadoop_verify_secure_prereq
   hadoop_setup_secure_service
   priv_outfile="${HADOOP_LOG_DIR}/privileged-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.out"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/403ffae8/hadoop-mapreduce-project/bin/mapred-config.sh
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/bin/mapred-config.sh b/hadoop-mapreduce-project/bin/mapred-config.sh
old mode 100644
new mode 100755
index a989792..68d3463d
--- a/hadoop-mapreduce-project/bin/mapred-config.sh
+++ b/hadoop-mapreduce-project/bin/mapred-config.sh
@@ -26,7 +26,7 @@ function hadoop_subproject_init
       export HADOOP_MAPRED_ENV_PROCESSED=true
     fi
   fi
-  
+
   # at some point in time, someone thought it would be a good idea to
   # create separate vars for every subproject.  *sigh*
   # let's perform some overrides and setup some defaults for bw compat
@@ -38,15 +38,17 @@ function hadoop_subproject_init
   hadoop_deprecate_envvar HADOOP_MAPRED_LOG_DIR HADOOP_LOG_DIR
 
   hadoop_deprecate_envvar HADOOP_MAPRED_LOGFILE HADOOP_LOGFILE
-  
+
   hadoop_deprecate_envvar HADOOP_MAPRED_NICENESS HADOOP_NICENESS
-  
+
   hadoop_deprecate_envvar HADOOP_MAPRED_STOP_TIMEOUT HADOOP_STOP_TIMEOUT
-  
+
   hadoop_deprecate_envvar HADOOP_MAPRED_PID_DIR HADOOP_PID_DIR
 
   hadoop_deprecate_envvar HADOOP_MAPRED_ROOT_LOGGER HADOOP_ROOT_LOGGER
 
+  hadoop_deprecate_envvar HADOOP_JOB_HISTORY_OPTS MAPRED_HISTORYSERVER_OPTS
+
   HADOOP_MAPRED_HOME="${HADOOP_MAPRED_HOME:-$HADOOP_HOME}"
 
   hadoop_deprecate_envvar HADOOP_MAPRED_IDENT_STRING HADOOP_IDENT_STRING

http://git-wip-us.apache.org/repos/asf/hadoop/blob/403ffae8/hadoop-mapreduce-project/conf/mapred-env.sh
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/conf/mapred-env.sh b/hadoop-mapreduce-project/conf/mapred-env.sh
index bbe4a49..53bc296 100644
--- a/hadoop-mapreduce-project/conf/mapred-env.sh
+++ b/hadoop-mapreduce-project/conf/mapred-env.sh
@@ -31,14 +31,14 @@
 # Specify the max heapsize for the JobHistoryServer.  If no units are
 # given, it will be assumed to be in MB.
 # This value will be overridden by an Xmx setting specified in HADOOP_OPTS,
-# and/or HADOOP_JOB_HISTORYSERVER_OPTS.
+# and/or MAPRED_HISTORYSERVER_OPTS.
 # Default is the same as HADOOP_HEAPSIZE_MAX.
 #export HADOOP_JOB_HISTORYSERVER_HEAPSIZE=
 
 # Specify the JVM options to be used when starting the HistoryServer.
 # These options will be appended to the options specified as HADOOP_OPTS
 # and therefore may override any similar flags set in HADOOP_OPTS
-#export HADOOP_JOB_HISTORYSERVER_OPTS=
+#export MAPRED_HISTORYSERVER_OPTS=
 
 # Specify the log4j settings for the JobHistoryServer
 # Java property: hadoop.root.logger


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[19/19] hadoop git commit: HADOOP-13361. Modify hadoop_verify_user to be consistent with hadoop_subcommand_opts (ie more granularity)

Posted by aw...@apache.org.
HADOOP-13361. Modify hadoop_verify_user to be consistent with hadoop_subcommand_opts (ie more granularity)

Signed-off-by: Allen Wittenauer <aw...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/76c8d58e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/76c8d58e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/76c8d58e

Branch: refs/heads/HADOOP-13341
Commit: 76c8d58e4dbdc572cdd3284541cbd2a1eb911b28
Parents: 4450067
Author: Allen Wittenauer <aw...@apache.org>
Authored: Tue Aug 30 13:55:04 2016 -0700
Committer: Allen Wittenauer <aw...@apache.org>
Committed: Wed Aug 31 08:04:44 2016 -0700

----------------------------------------------------------------------
 .../hadoop-common/src/main/bin/hadoop           |  4 +-
 .../src/main/bin/hadoop-functions.sh            | 24 +++++++--
 .../hadoop-common/src/main/conf/hadoop-env.sh   |  3 +-
 .../src/test/scripts/hadoop_verify_user.bats    | 53 ++++++++++++++++++++
 .../hadoop-hdfs/src/main/bin/hdfs               |  4 +-
 hadoop-mapreduce-project/bin/mapred             |  4 +-
 hadoop-yarn-project/hadoop-yarn/bin/yarn        |  4 +-
 7 files changed, 82 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/76c8d58e/hadoop-common-project/hadoop-common/src/main/bin/hadoop
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop b/hadoop-common-project/hadoop-common/src/main/bin/hadoop
index 9b682e6..450543d 100755
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop
@@ -190,6 +190,8 @@ fi
 HADOOP_SUBCMD=$1
 shift
 
+hadoop_verify_user "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"
+
 HADOOP_SUBCMD_ARGS=("$@")
 
 if declare -f hadoop_subcommand_"${HADOOP_SUBCMD}" >/dev/null 2>&1; then
@@ -199,8 +201,6 @@ else
   hadoopcmd_case "${HADOOP_SUBCMD}" "${HADOOP_SUBCMD_ARGS[@]}"
 fi
 
-hadoop_verify_user "${HADOOP_SUBCMD}"
-
 hadoop_add_client_opts
 
 if [[ ${HADOOP_WORKER_MODE} = true ]]; then

http://git-wip-us.apache.org/repos/asf/hadoop/blob/76c8d58e/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
index db868bd..91546d4 100755
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
@@ -1984,12 +1984,26 @@ function hadoop_secure_daemon_handler
 ## @return       will exit on failure conditions
 function hadoop_verify_user
 {
-  local command=$1
-  local uservar="HADOOP_${command}_USER"
+  declare program=$1
+  declare command=$2
+  declare uprogram
+  declare ucommand
+  declare uvar
 
-  if [[ -n ${!uservar} ]]; then
-    if [[ ${!uservar} !=  "${USER}" ]]; then
-      hadoop_error "ERROR: ${command} can only be executed by ${!uservar}."
+  if [[ -z "${BASH_VERSINFO[0]}" ]] \
+     || [[ "${BASH_VERSINFO[0]}" -lt 4 ]]; then
+    uprogram=$(echo "${program}" | tr '[:lower:]' '[:upper:]')
+    ucommand=$(echo "${command}" | tr '[:lower:]' '[:upper:]')
+  else
+    uprogram=${program^^}
+    ucommand=${command^^}
+  fi
+
+  uvar="${uprogram}_${ucommand}_USER"
+
+  if [[ -n ${!uvar} ]]; then
+    if [[ ${!uvar} !=  "${USER}" ]]; then
+      hadoop_error "ERROR: ${command} can only be executed by ${!uvar}."
       exit 1
     fi
   fi

http://git-wip-us.apache.org/repos/asf/hadoop/blob/76c8d58e/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
index 4656f4d..a78f3f6 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
+++ b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
@@ -417,6 +417,7 @@ esac
 #
 # To prevent accidents, shell commands be (superficially) locked
 # to only allow certain users to execute certain subcommands.
+# It uses the format of (command)_(subcommand)_USER.
 #
 # For example, to limit who can execute the namenode command,
-# export HADOOP_namenode_USER=hdfs
+# export HDFS_NAMENODE_USER=hdfs

http://git-wip-us.apache.org/repos/asf/hadoop/blob/76c8d58e/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_verify_user.bats
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_verify_user.bats b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_verify_user.bats
new file mode 100644
index 0000000..ac9fa9f
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_verify_user.bats
@@ -0,0 +1,53 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load hadoop-functions_test_helper
+
+@test "hadoop_verify_user (hadoop: no setting)" {
+  run hadoop_verify_user hadoop test
+  [ "${status}" = "0" ]
+}
+
+@test "hadoop_verify_user (yarn: no setting)" {
+  run hadoop_verify_user yarn test
+  [ "${status}" = "0" ]
+}
+
+@test "hadoop_verify_user (hadoop: allow)" {
+  HADOOP_TEST_USER=${USER}
+  run hadoop_verify_user hadoop test
+  [ "${status}" = "0" ]
+}
+
+@test "hadoop_verify_user (yarn: allow)" {
+  YARN_TEST_USER=${USER}
+  run hadoop_verify_user yarn test
+  [ "${status}" = "0" ]
+}
+
+# colon isn't a valid username, so let's use it
+# this should fail regardless of who the user is
+# that is running the test code
+@test "hadoop_verify_user (hadoop: disallow)" {
+  HADOOP_TEST_USER=:
+  run hadoop_verify_user hadoop test
+  [ "${status}" = "1" ]
+}
+
+@test "hadoop_verify_user (yarn: disallow)" {
+  YARN_TEST_USER=:
+  run hadoop_verify_user yarn test
+  [ "${status}" = "1" ]
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/76c8d58e/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
index b704b00..5ecea29 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
@@ -246,6 +246,8 @@ fi
 HADOOP_SUBCMD=$1
 shift
 
+hadoop_verify_user "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"
+
 HADOOP_SUBCMD_ARGS=("$@")
 
 if declare -f hdfs_subcommand_"${HADOOP_SUBCMD}" >/dev/null 2>&1; then
@@ -255,8 +257,6 @@ else
   hdfscmd_case "${HADOOP_SUBCMD}" "${HADOOP_SUBCMD_ARGS[@]}"
 fi
 
-hadoop_verify_user "${HADOOP_SUBCMD}"
-
 hadoop_add_client_opts
 
 if [[ ${HADOOP_WORKER_MODE} = true ]]; then

http://git-wip-us.apache.org/repos/asf/hadoop/blob/76c8d58e/hadoop-mapreduce-project/bin/mapred
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/bin/mapred b/hadoop-mapreduce-project/bin/mapred
index 3243d20..c566d5a 100755
--- a/hadoop-mapreduce-project/bin/mapred
+++ b/hadoop-mapreduce-project/bin/mapred
@@ -131,6 +131,8 @@ fi
 HADOOP_SUBCMD=$1
 shift
 
+hadoop_verify_user "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"
+
 HADOOP_SUBCMD_ARGS=("$@")
 
 if declare -f mapred_subcommand_"${HADOOP_SUBCMD}" >/dev/null 2>&1; then
@@ -140,8 +142,6 @@ else
   mapredcmd_case "${HADOOP_SUBCMD}" "${HADOOP_SUBCMD_ARGS[@]}"
 fi
 
-hadoop_verify_user "${HADOOP_SUBCMD}"
-
 hadoop_add_client_opts
 
 if [[ ${HADOOP_SLAVE_MODE} = true ]]; then

http://git-wip-us.apache.org/repos/asf/hadoop/blob/76c8d58e/hadoop-yarn-project/hadoop-yarn/bin/yarn
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn b/hadoop-yarn-project/hadoop-yarn/bin/yarn
index d2a8a50..804fd1a 100755
--- a/hadoop-yarn-project/hadoop-yarn/bin/yarn
+++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn
@@ -228,6 +228,8 @@ fi
 HADOOP_SUBCMD=$1
 shift
 
+hadoop_verify_user "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"
+
 HADOOP_SUBCMD_ARGS=("$@")
 
 if declare -f yarn_subcommand_"${HADOOP_SUBCMD}" >/dev/null 2>&1; then
@@ -237,8 +239,6 @@ else
   yarncmd_case "${HADOOP_SUBCMD}" "${HADOOP_SUBCMD_ARGS[@]}"
 fi
 
-hadoop_verify_user "${HADOOP_SUBCMD}"
-
 # It's unclear if YARN_CLIENT_OPTS is actually a useful
 # thing to have separate from HADOOP_CLIENT_OPTS.  Someone
 # might use it, so let's not deprecate it and just override


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[07/19] hadoop git commit: YARN-5221. Expose UpdateResourceRequest API to allow AM to request for change in container properties. (asuresh)

Posted by aw...@apache.org.
YARN-5221. Expose UpdateResourceRequest API to allow AM to request for change in container properties. (asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d6d9cff2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d6d9cff2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d6d9cff2

Branch: refs/heads/HADOOP-13341
Commit: d6d9cff21b7b6141ed88359652cf22e8973c0661
Parents: 9dcbdbd
Author: Arun Suresh <as...@apache.org>
Authored: Sat Aug 27 15:22:43 2016 -0700
Committer: Arun Suresh <as...@apache.org>
Committed: Tue Aug 30 15:52:29 2016 -0700

----------------------------------------------------------------------
 .../app/local/TestLocalContainerAllocator.java  |   4 +-
 .../v2/app/rm/TestRMContainerAllocator.java     |  10 +-
 .../sls/scheduler/ResourceSchedulerWrapper.java |   6 +-
 .../sls/scheduler/SLSCapacityScheduler.java     |   6 +-
 .../api/protocolrecords/AllocateRequest.java    |  64 +---
 .../api/protocolrecords/AllocateResponse.java   |  76 +++--
 .../hadoop/yarn/api/records/Container.java      |  24 +-
 .../records/ContainerResourceChangeRequest.java | 117 -------
 .../yarn/api/records/ContainerUpdateType.java   |  45 +++
 .../yarn/api/records/UpdateContainerError.java  | 119 +++++++
 .../api/records/UpdateContainerRequest.java     | 218 ++++++++++++
 .../yarn/api/records/UpdatedContainer.java      | 118 +++++++
 .../src/main/proto/yarn_protos.proto            |   6 +-
 .../src/main/proto/yarn_service_protos.proto    |  31 +-
 .../distributedshell/ApplicationMaster.java     |   4 +-
 .../yarn/client/api/async/AMRMClientAsync.java  |   9 +-
 .../api/async/impl/AMRMClientAsyncImpl.java     |   8 +-
 .../yarn/client/api/impl/AMRMClientImpl.java    |  84 ++---
 .../api/async/impl/TestAMRMClientAsync.java     |  55 +--
 .../yarn/client/api/impl/TestAMRMClient.java    |  42 +--
 .../api/impl/TestAMRMClientOnRMRestart.java     |  14 +-
 .../impl/pb/AllocateRequestPBImpl.java          | 151 +++------
 .../impl/pb/AllocateResponsePBImpl.java         | 192 ++++++++---
 .../api/records/impl/pb/ContainerPBImpl.java    |  13 +
 .../ContainerResourceChangeRequestPBImpl.java   | 141 --------
 .../yarn/api/records/impl/pb/ProtoUtils.java    |  69 +++-
 .../impl/pb/UpdateContainerErrorPBImpl.java     | 125 +++++++
 .../impl/pb/UpdateContainerRequestPBImpl.java   | 187 ++++++++++
 .../records/impl/pb/UpdatedContainerPBImpl.java | 117 +++++++
 .../yarn/security/ContainerTokenIdentifier.java |  29 +-
 .../src/main/proto/yarn_security_token.proto    |   1 +
 .../hadoop/yarn/api/TestPBImplRecords.java      |  17 +-
 .../yarn/security/TestYARNTokenIdentifier.java  |   4 +-
 .../api/protocolrecords/NMContainerStatus.java  |  15 +-
 .../impl/pb/NMContainerStatusPBImpl.java        |  13 +
 .../OpportunisticContainerAllocator.java        |   2 +-
 .../hadoop/yarn/server/utils/BuilderUtils.java  |  14 +-
 .../yarn_server_common_service_protos.proto     |   1 +
 .../protocolrecords/TestProtocolRecords.java    |   4 +-
 .../TestRegisterNodeManagerRequest.java         |   2 +-
 .../containermanager/ContainerManagerImpl.java  |  16 +-
 .../container/ContainerImpl.java                |   7 +-
 .../queuing/QueuingContainerManagerImpl.java    |   3 +-
 .../recovery/NMLeveldbStateStoreService.java    |  41 ++-
 .../recovery/NMNullStateStoreService.java       |   4 +-
 .../recovery/NMStateStoreService.java           |  13 +-
 .../nodemanager/TestNodeManagerResync.java      |   2 +-
 .../nodemanager/TestNodeStatusUpdater.java      |  24 +-
 .../amrmproxy/MockResourceManagerFacade.java    |   4 +-
 .../BaseContainerManagerTest.java               |   2 +-
 .../recovery/NMMemoryStateStoreService.java     |   7 +-
 .../TestNMLeveldbStateStoreService.java         |   7 +-
 .../nodemanager/webapp/MockContainer.java       |   2 +-
 .../nodemanager/webapp/TestNMWebServer.java     |   6 +-
 .../ApplicationMasterService.java               |  54 ++-
 .../server/resourcemanager/RMServerUtils.java   | 338 ++++++++++---------
 .../scheduler/AbstractYarnScheduler.java        |  13 +-
 .../scheduler/SchedContainerChangeRequest.java  |   2 +-
 .../scheduler/SchedulerApplicationAttempt.java  |  12 +-
 .../scheduler/YarnScheduler.java                |   6 +-
 .../scheduler/capacity/CapacityScheduler.java   |   8 +-
 .../scheduler/fair/FairScheduler.java           |   6 +-
 .../scheduler/fifo/FifoScheduler.java           |   6 +-
 .../security/RMContainerTokenSecretManager.java |  64 ++--
 .../yarn/server/resourcemanager/MockAM.java     |   7 +-
 .../resourcemanager/TestApplicationCleanup.java |   9 +-
 .../TestApplicationMasterService.java           |  86 +++--
 .../server/resourcemanager/TestRMRestart.java   |   2 +-
 .../TestResourceTrackerService.java             |   8 +-
 .../capacity/TestCapacityScheduler.java         |  42 ++-
 .../capacity/TestContainerAllocation.java       |  13 +-
 .../capacity/TestContainerResizing.java         | 134 +++++---
 .../capacity/TestIncreaseAllocationExpirer.java |  76 +++--
 .../server/TestContainerManagerSecurity.java    |  18 +-
 .../TestMiniYarnClusterNodeUtilization.java     |   2 -
 .../src/test/proto/test_token.proto             |   1 +
 76 files changed, 2099 insertions(+), 1103 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/local/TestLocalContainerAllocator.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/local/TestLocalContainerAllocator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/local/TestLocalContainerAllocator.java
index f9e4595b..3fa0043 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/local/TestLocalContainerAllocator.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/local/TestLocalContainerAllocator.java
@@ -59,6 +59,7 @@ import org.apache.hadoop.yarn.api.records.NMToken;
 import org.apache.hadoop.yarn.api.records.NodeReport;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.UpdatedContainer;
 import org.apache.hadoop.yarn.client.ClientRMProxy;
 import org.apache.hadoop.yarn.event.Event;
 import org.apache.hadoop.yarn.event.EventHandler;
@@ -296,8 +297,7 @@ public class TestLocalContainerAllocator {
           Resources.none(), null, 1, null,
           Collections.<NMToken>emptyList(),
           yarnToken,
-          Collections.<Container>emptyList(),
-          Collections.<Container>emptyList());
+          Collections.<UpdatedContainer>emptyList());
       response.setApplicationPriority(Priority.newInstance(0));
       return response;
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
index 44aa593..a115b13 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
@@ -99,7 +99,6 @@ import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
 import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.ContainerResourceChangeRequest;
 import org.apache.hadoop.yarn.api.records.ContainerState;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.NMToken;
@@ -108,6 +107,7 @@ import org.apache.hadoop.yarn.api.records.NodeReport;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.Dispatcher;
 import org.apache.hadoop.yarn.event.DrainDispatcher;
@@ -1703,8 +1703,8 @@ public class TestRMContainerAllocator {
         ApplicationAttemptId applicationAttemptId, List<ResourceRequest> ask,
         List<ContainerId> release, List<String> blacklistAdditions,
         List<String> blacklistRemovals,
-        List<ContainerResourceChangeRequest> increaseRequests,
-        List<ContainerResourceChangeRequest> decreaseRequests) {
+        List<UpdateContainerRequest> increaseRequests,
+        List<UpdateContainerRequest> decreaseRequests) {
       List<ResourceRequest> askCopy = new ArrayList<ResourceRequest>();
       for (ResourceRequest req : ask) {
         ResourceRequest reqCopy = ResourceRequest.newInstance(req
@@ -1750,8 +1750,8 @@ public class TestRMContainerAllocator {
         ApplicationAttemptId applicationAttemptId, List<ResourceRequest> ask,
         List<ContainerId> release, List<String> blacklistAdditions,
         List<String> blacklistRemovals,
-        List<ContainerResourceChangeRequest> increaseRequest,
-        List<ContainerResourceChangeRequest> decreaseRequests) {
+        List<UpdateContainerRequest> increaseRequest,
+        List<UpdateContainerRequest> decreaseRequests) {
       List<ResourceRequest> askCopy = new ArrayList<ResourceRequest>();
       for (ResourceRequest req : ask) {
         ResourceRequest reqCopy = ResourceRequest.newInstance(req

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/ResourceSchedulerWrapper.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/ResourceSchedulerWrapper.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/ResourceSchedulerWrapper.java
index 393300c..79f934c 100644
--- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/ResourceSchedulerWrapper.java
+++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/ResourceSchedulerWrapper.java
@@ -51,7 +51,6 @@ import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
 import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.ContainerResourceChangeRequest;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.Priority;
@@ -60,6 +59,7 @@ import org.apache.hadoop.yarn.api.records.QueueInfo;
 import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore;
@@ -206,8 +206,8 @@ final public class ResourceSchedulerWrapper
   public Allocation allocate(ApplicationAttemptId attemptId,
       List<ResourceRequest> resourceRequests, List<ContainerId> containerIds,
       List<String> strings, List<String> strings2,
-      List<ContainerResourceChangeRequest> increaseRequests,
-      List<ContainerResourceChangeRequest> decreaseRequests) {
+      List<UpdateContainerRequest> increaseRequests,
+      List<UpdateContainerRequest> decreaseRequests) {
     if (metricsON) {
       final Timer.Context context = schedulerAllocateTimer.time();
       Allocation allocation = null;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SLSCapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SLSCapacityScheduler.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SLSCapacityScheduler.java
index 1c3fa79..cf08309 100644
--- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SLSCapacityScheduler.java
+++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SLSCapacityScheduler.java
@@ -48,10 +48,10 @@ import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
 import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.ContainerResourceChangeRequest;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Allocation;
@@ -179,8 +179,8 @@ public class SLSCapacityScheduler extends CapacityScheduler implements
   public Allocation allocate(ApplicationAttemptId attemptId,
       List<ResourceRequest> resourceRequests, List<ContainerId> containerIds,
       List<String> strings, List<String> strings2,
-      List<ContainerResourceChangeRequest> increaseRequests,
-      List<ContainerResourceChangeRequest> decreaseRequests) {
+      List<UpdateContainerRequest> increaseRequests,
+      List<UpdateContainerRequest> decreaseRequests) {
     if (metricsON) {
       final Timer.Context context = schedulerAllocateTimer.time();
       Allocation allocation = null;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java
index e24ebdf..f7ce127 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java
@@ -27,8 +27,8 @@ import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest;
-import org.apache.hadoop.yarn.api.records.ContainerResourceChangeRequest;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
 import org.apache.hadoop.yarn.util.Records;
 
 /**
@@ -48,13 +48,8 @@ import org.apache.hadoop.yarn.util.Records;
  *     A list of unused {@link Container} which are being returned.
  *   </li>
  *   <li>
- *     A list of {@link ContainerResourceChangeRequest} to inform
- *     the <code>ResourceManager</code> about the resource increase
- *     requirements of running containers.
- *   </li>
- *   <li>
- *     A list of {@link ContainerResourceChangeRequest} to inform
- *     the <code>ResourceManager</code> about the resource decrease
+ *     A list of {@link UpdateContainerRequest} to inform
+ *     the <code>ResourceManager</code> about the change in
  *     requirements of running containers.
  *   </li>
  * </ul>
@@ -72,25 +67,23 @@ public abstract class AllocateRequest {
       List<ContainerId> containersToBeReleased,
       ResourceBlacklistRequest resourceBlacklistRequest) {
     return newInstance(responseID, appProgress, resourceAsk,
-        containersToBeReleased, resourceBlacklistRequest, null, null);
+        containersToBeReleased, resourceBlacklistRequest, null);
   }
   
   @Public
-  @Stable
+  @Unstable
   public static AllocateRequest newInstance(int responseID, float appProgress,
       List<ResourceRequest> resourceAsk,
       List<ContainerId> containersToBeReleased,
       ResourceBlacklistRequest resourceBlacklistRequest,
-      List<ContainerResourceChangeRequest> increaseRequests,
-      List<ContainerResourceChangeRequest> decreaseRequests) {
+      List<UpdateContainerRequest> updateRequests) {
     AllocateRequest allocateRequest = Records.newRecord(AllocateRequest.class);
     allocateRequest.setResponseId(responseID);
     allocateRequest.setProgress(appProgress);
     allocateRequest.setAskList(resourceAsk);
     allocateRequest.setReleaseList(containersToBeReleased);
     allocateRequest.setResourceBlacklistRequest(resourceBlacklistRequest);
-    allocateRequest.setIncreaseRequests(increaseRequests);
-    allocateRequest.setDecreaseRequests(decreaseRequests);
+    allocateRequest.setUpdateRequests(updateRequests);
     return allocateRequest;
   }
   
@@ -197,48 +190,25 @@ public abstract class AllocateRequest {
       ResourceBlacklistRequest resourceBlacklistRequest);
   
   /**
-   * Get the list of container resource increase requests being sent by the
-   * <code>ApplicationMaster</code>.
-   * @return the list of {@link ContainerResourceChangeRequest}
-   *         being sent by the
-   *         <code>ApplicationMaster</code>.
-   */
-  @Public
-  @Unstable
-  public abstract List<ContainerResourceChangeRequest> getIncreaseRequests();
-
-  /**
-   * Set the list of container resource increase requests to inform the
-   * <code>ResourceManager</code> about the containers whose resources need
-   *         to be increased.
-   * @param increaseRequests list of
-   *        {@link ContainerResourceChangeRequest}
-   */
-  @Public
-  @Unstable
-  public abstract void setIncreaseRequests(
-      List<ContainerResourceChangeRequest> increaseRequests);
-
-  /**
-   * Get the list of container resource decrease requests being sent by the
+   * Get the list of container update requests being sent by the
    * <code>ApplicationMaster</code>.
-   * @return list of {@link ContainerResourceChangeRequest}
+   * @return list of {@link UpdateContainerRequest}
    *         being sent by the
    *         <code>ApplicationMaster</code>.
    */
   @Public
   @Unstable
-  public abstract List<ContainerResourceChangeRequest> getDecreaseRequests();
+  public abstract List<UpdateContainerRequest> getUpdateRequests();
 
   /**
-   * Set the list of container resource decrease requests to inform the
-   * <code>ResourceManager</code> about the containers whose resources need
-   * to be decreased.
-   * @param decreaseRequests list of
-   *        {@link ContainerResourceChangeRequest}
+   * Set the list of container update requests to inform the
+   * <code>ResourceManager</code> about the containers that need to be
+   * updated.
+   * @param updateRequests list of <code>UpdateContainerRequest</code> for
+   *                       containers to be updated
    */
   @Public
   @Unstable
-  public abstract void setDecreaseRequests(
-      List<ContainerResourceChangeRequest> decreaseRequests);
+  public abstract void setUpdateRequests(
+      List<UpdateContainerRequest> updateRequests);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateResponse.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateResponse.java
index 4fba423..69089ee 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateResponse.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateResponse.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.yarn.api.protocolrecords;
 
+import java.util.ArrayList;
 import java.util.List;
 
 import org.apache.hadoop.classification.InterfaceAudience.Private;
@@ -35,6 +36,8 @@ import org.apache.hadoop.yarn.api.records.PreemptionMessage;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.Token;
+import org.apache.hadoop.yarn.api.records.UpdateContainerError;
+import org.apache.hadoop.yarn.api.records.UpdatedContainer;
 import org.apache.hadoop.yarn.util.Records;
 
 /**
@@ -95,19 +98,17 @@ public abstract class AllocateResponse {
   }
 
   @Public
-  @Stable
+  @Unstable
   public static AllocateResponse newInstance(int responseId,
       List<ContainerStatus> completedContainers,
       List<Container> allocatedContainers, List<NodeReport> updatedNodes,
       Resource availResources, AMCommand command, int numClusterNodes,
       PreemptionMessage preempt, List<NMToken> nmTokens,
-      List<Container> increasedContainers,
-      List<Container> decreasedContainers) {
+      List<UpdatedContainer> updatedContainers) {
     AllocateResponse response = newInstance(responseId, completedContainers,
         allocatedContainers, updatedNodes, availResources, command,
         numClusterNodes, preempt, nmTokens);
-    response.setIncreasedContainers(increasedContainers);
-    response.setDecreasedContainers(decreasedContainers);
+    response.setUpdatedContainers(updatedContainers);
     return response;
   }
 
@@ -118,12 +119,11 @@ public abstract class AllocateResponse {
       List<Container> allocatedContainers, List<NodeReport> updatedNodes,
       Resource availResources, AMCommand command, int numClusterNodes,
       PreemptionMessage preempt, List<NMToken> nmTokens, Token amRMToken,
-      List<Container> increasedContainers,
-      List<Container> decreasedContainers) {
+      List<UpdatedContainer> updatedContainers) {
     AllocateResponse response =
         newInstance(responseId, completedContainers, allocatedContainers,
           updatedNodes, availResources, command, numClusterNodes, preempt,
-          nmTokens, increasedContainers, decreasedContainers);
+          nmTokens, updatedContainers);
     response.setAMRMToken(amRMToken);
     return response;
   }
@@ -135,13 +135,11 @@ public abstract class AllocateResponse {
       List<Container> allocatedContainers, List<NodeReport> updatedNodes,
       Resource availResources, AMCommand command, int numClusterNodes,
       PreemptionMessage preempt, List<NMToken> nmTokens, Token amRMToken,
-      List<Container> increasedContainers,
-      List<Container> decreasedContainers,
-      String collectorAddr) {
+      List<UpdatedContainer> updatedContainers, String collectorAddr) {
     AllocateResponse response =
         newInstance(responseId, completedContainers, allocatedContainers,
           updatedNodes, availResources, command, numClusterNodes, preempt,
-          nmTokens, increasedContainers, decreasedContainers);
+          nmTokens, updatedContainers);
     response.setAMRMToken(amRMToken);
     response.setCollectorAddr(collectorAddr);
     return response;
@@ -290,40 +288,24 @@ public abstract class AllocateResponse {
   public abstract void setNMTokens(List<NMToken> nmTokens);
   
   /**
-   * Get the list of newly increased containers by
+   * Get the list of newly updated containers by
    * <code>ResourceManager</code>.
    * @return list of newly increased containers
    */
   @Public
   @Unstable
-  public abstract List<Container> getIncreasedContainers();
-
-  /**
-   * Set the list of newly increased containers by
-   * <code>ResourceManager</code>.
-   */
-  @Private
-  @Unstable
-  public abstract void setIncreasedContainers(
-      List<Container> increasedContainers);
-
-  /**
-   * Get the list of newly decreased containers by
-   * <code>ResourceManager</code>.
-   * @return the list of newly decreased containers
-   */
-  @Public
-  @Unstable
-  public abstract List<Container> getDecreasedContainers();
+  public abstract List<UpdatedContainer> getUpdatedContainers();
 
   /**
-   * Set the list of newly decreased containers by
+   * Set the list of newly updated containers by
    * <code>ResourceManager</code>.
+   *
+   * @param updatedContainers List of Updated Containers.
    */
   @Private
   @Unstable
-  public abstract void setDecreasedContainers(
-      List<Container> decreasedContainers);
+  public abstract void setUpdatedContainers(
+      List<UpdatedContainer> updatedContainers);
 
   /**
    * The AMRMToken that belong to this attempt
@@ -364,4 +346,28 @@ public abstract class AllocateResponse {
   @Unstable
   public abstract void setCollectorAddr(String collectorAddr);
 
+  /**
+   * Get the list of container update errors to inform the
+   * Application Master about the container updates that could not be
+   * satisfied due to error.
+   *
+   * @return List of Update Container Errors.
+   */
+  @Public
+  @Unstable
+  public List<UpdateContainerError> getUpdateErrors() {
+    return new ArrayList<>();
+  }
+
+  /**
+   * Set the list of container update errors to inform the
+   * Application Master about the container updates that could not be
+   * satisfied due to error.
+   * @param updateErrors list of <code>UpdateContainerError</code> for
+   *                       containers updates requests that were in error
+   */
+  @Public
+  @Unstable
+  public void setUpdateErrors(List<UpdateContainerError> updateErrors) {
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Container.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Container.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Container.java
index 707a71d..4fdc803 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Container.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Container.java
@@ -230,8 +230,30 @@ public abstract class Container implements Comparable<Container> {
    *                            allocation.
    */
   @Private
-  @Evolving
+  @Unstable
   public void setAllocationRequestId(long allocationRequestID) {
     throw new UnsupportedOperationException();
   }
+
+  /**
+   * Get the version of this container. The version will be incremented when
+   * a container is updated.
+   *
+   * @return version of this container.
+   */
+  @Private
+  @Unstable
+  public int getVersion() {
+    return 0;
+  }
+
+  /**
+   * Set the version of this container.
+   * @param version of this container.
+   */
+  @Private
+  @Unstable
+  public void setVersion(int version) {
+    throw new UnsupportedOperationException();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerResourceChangeRequest.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerResourceChangeRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerResourceChangeRequest.java
deleted file mode 100644
index 117015b..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerResourceChangeRequest.java
+++ /dev/null
@@ -1,117 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.api.records;
-
-import org.apache.hadoop.classification.InterfaceAudience.Public;
-import org.apache.hadoop.classification.InterfaceStability.Unstable;
-import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
-import org.apache.hadoop.yarn.util.Records;
-
-/**
- * {@code ContainerResourceChangeRequest} represents the request made by an
- * application to the {@code ResourceManager} to change resource allocation of
- * a running {@code Container}.
- * <p>
- * It includes:
- * <ul>
- *   <li>{@link ContainerId} for the container.</li>
- *   <li>
- *     {@link Resource} capability of the container after the resource change
- *     is completed.
- *   </li>
- * </ul>
- *
- * @see ApplicationMasterProtocol#allocate(org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest)
- */
-@Public
-@Unstable
-public abstract class ContainerResourceChangeRequest {
-
-  @Public
-  @Unstable
-  public static ContainerResourceChangeRequest newInstance(
-      ContainerId existingContainerId, Resource targetCapability) {
-    ContainerResourceChangeRequest context = Records
-        .newRecord(ContainerResourceChangeRequest.class);
-    context.setContainerId(existingContainerId);
-    context.setCapability(targetCapability);
-    return context;
-  }
-
-  /**
-   * Get the <code>ContainerId</code> of the container.
-   * @return <code>ContainerId</code> of the container
-   */
-  @Public
-  @Unstable
-  public abstract ContainerId getContainerId();
-
-  /**
-   * Set the <code>ContainerId</code> of the container.
-   * @param containerId <code>ContainerId</code> of the container
-   */
-  @Public
-  @Unstable
-  public abstract void setContainerId(ContainerId containerId);
-
-  /**
-   * Get the <code>Resource</code> capability of the container.
-   * @return <code>Resource</code> capability of the container
-   */
-  @Public
-  @Unstable
-  public abstract Resource getCapability();
-
-  /**
-   * Set the <code>Resource</code> capability of the container.
-   * @param capability <code>Resource</code> capability of the container
-   */
-  @Public
-  @Unstable
-  public abstract void setCapability(Resource capability);
-
-  @Override
-  public int hashCode() {
-    return getCapability().hashCode() + getContainerId().hashCode();
-  }
-
-  @Override
-  public boolean equals(Object other) {
-    if (other instanceof ContainerResourceChangeRequest) {
-      ContainerResourceChangeRequest ctx =
-          (ContainerResourceChangeRequest) other;
-
-      if (getContainerId() == null && ctx.getContainerId() != null) {
-        return false;
-      } else if (!getContainerId().equals(ctx.getContainerId())) {
-        return false;
-      }
-
-      if (getCapability() == null && ctx.getCapability() != null) {
-        return false;
-      } else if (!getCapability().equals(ctx.getCapability())) {
-        return false;
-      }
-
-      return true;
-    } else {
-      return false;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerUpdateType.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerUpdateType.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerUpdateType.java
new file mode 100644
index 0000000..978ea09
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerUpdateType.java
@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.records;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Encodes the type of Container Update.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Unstable
+public enum ContainerUpdateType {
+
+  /**
+   * Resource increase.
+   */
+  INCREASE_RESOURCE,
+
+  /**
+   * Resource decrease.
+   */
+  DECREASE_RESOURCE,
+
+  /**
+   * Execution Type change.
+   */
+  UPDATE_EXECUTION_TYPE
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/UpdateContainerError.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/UpdateContainerError.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/UpdateContainerError.java
new file mode 100644
index 0000000..7102f7b
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/UpdateContainerError.java
@@ -0,0 +1,119 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.records;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.yarn.util.Records;
+
+/**
+ * {@code UpdateContainerError} is used by the Scheduler to notify the
+ * ApplicationMaster of an UpdateContainerRequest it cannot satisfy due to
+ * an error in the request. It includes the update request as well as
+ * a reason for why the request was not satisfiable.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Unstable
+public abstract class UpdateContainerError {
+
+  @InterfaceAudience.Public
+  @InterfaceStability.Unstable
+  public static UpdateContainerError newInstance(String reason,
+      UpdateContainerRequest updateContainerRequest) {
+    UpdateContainerError error = Records.newRecord(UpdateContainerError.class);
+    error.setReason(reason);
+    error.setUpdateContainerRequest(updateContainerRequest);
+    return error;
+  }
+
+  /**
+   * Get reason why the update request was not satisfiable.
+   * @return Reason
+   */
+  @InterfaceAudience.Public
+  @InterfaceStability.Unstable
+  public abstract String getReason();
+
+  /**
+   * Set reason why the update request was not satisfiable.
+   * @param reason Reason
+   */
+  @InterfaceAudience.Public
+  @InterfaceStability.Unstable
+  public abstract void setReason(String reason);
+
+  /**
+   * Get the {@code UpdateContainerRequest} that was not satisfiable.
+   * @return UpdateContainerRequest
+   */
+  @InterfaceAudience.Public
+  @InterfaceStability.Unstable
+  public abstract UpdateContainerRequest getUpdateContainerRequest();
+
+  /**
+   * Set the {@code UpdateContainerRequest} that was not satisfiable.
+   * @param updateContainerRequest Update Container Request
+   */
+  @InterfaceAudience.Public
+  @InterfaceStability.Unstable
+  public abstract void setUpdateContainerRequest(
+      UpdateContainerRequest updateContainerRequest);
+
+  @Override
+  public int hashCode() {
+    final int prime = 2153;
+    int result = 2459;
+    String reason = getReason();
+    UpdateContainerRequest updateReq = getUpdateContainerRequest();
+    result = prime * result + ((reason == null) ? 0 : reason.hashCode());
+    result = prime * result + ((updateReq == null) ? 0 : updateReq.hashCode());
+    return result;
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (this == obj) {
+      return true;
+    }
+    if (obj == null) {
+      return false;
+    }
+    if (getClass() != obj.getClass()) {
+      return false;
+    }
+    UpdateContainerError other = (UpdateContainerError) obj;
+    String reason = getReason();
+    if (reason == null) {
+      if (other.getReason() != null) {
+        return false;
+      }
+    } else if (!reason.equals(other.getReason())) {
+      return false;
+    }
+    UpdateContainerRequest req = getUpdateContainerRequest();
+    if (req == null) {
+      if (other.getUpdateContainerRequest() != null) {
+        return false;
+      }
+    } else if (!req.equals(other.getUpdateContainerRequest())) {
+      return false;
+    }
+    return true;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/UpdateContainerRequest.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/UpdateContainerRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/UpdateContainerRequest.java
new file mode 100644
index 0000000..ef39f5c
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/UpdateContainerRequest.java
@@ -0,0 +1,218 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.records;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
+import org.apache.hadoop.yarn.util.Records;
+
+/**
+ * {@code UpdateContainerRequest} represents the request made by an
+ * application to the {@code ResourceManager} to update an attribute of a
+ * {@code Container} such as its Resource allocation or (@code ExecutionType}
+ * <p>
+ * It includes:
+ * <ul>
+ *   <li>version for the container.</li>
+ *   <li>{@link ContainerId} for the container.</li>
+ *   <li>
+ *     {@link Resource} capability of the container after the update request
+ *     is completed.
+ *   </li>
+ *   <li>
+ *     {@link ExecutionType} of the container after the update request is
+ *     completed.
+ *   </li>
+ * </ul>
+ *
+ * Update rules:
+ * <ul>
+ *   <li>
+ *     Currently only ONE aspect of the container can be updated per request
+ *     (user can either update Capability OR ExecutionType in one request..
+ *     not both).
+ *   </li>
+ *   <li>
+ *     There must be only 1 update request per container in an allocate call.
+ *   </li>
+ *   <li>
+ *     If a new update request is sent for a container (in a subsequent allocate
+ *     call) before the first one is satisfied by the Scheduler, it will
+ *     overwrite the previous request.
+ *   </li>
+ * </ul>
+ * @see ApplicationMasterProtocol#allocate(org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest)
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Unstable
+public abstract class UpdateContainerRequest {
+
+  @InterfaceAudience.Public
+  @InterfaceStability.Unstable
+  public static UpdateContainerRequest newInstance(int version,
+      ContainerId containerId, ContainerUpdateType updateType,
+      Resource targetCapability, ExecutionType targetExecutionType) {
+    UpdateContainerRequest request =
+        Records.newRecord(UpdateContainerRequest.class);
+    request.setContainerVersion(version);
+    request.setContainerId(containerId);
+    request.setContainerUpdateType(updateType);
+    request.setExecutionType(targetExecutionType);
+    request.setCapability(targetCapability);
+    return request;
+  }
+
+  /**
+   * Get the <code>ContainerId</code> of the container.
+   * @return <code>ContainerId</code> of the container
+   */
+  @InterfaceAudience.Public
+  @InterfaceStability.Unstable
+  public abstract int getContainerVersion();
+
+  /**
+   * Set the current version of the container.
+   * @param containerVersion of the container
+   */
+  @InterfaceAudience.Public
+  @InterfaceStability.Unstable
+  public abstract void setContainerVersion(int containerVersion);
+
+  /**
+   * Get the <code>ContainerUpdateType</code> of the container.
+   * @return <code>ContainerUpdateType</code> of the container.
+   */
+  @InterfaceAudience.Public
+  @InterfaceStability.Unstable
+  public abstract ContainerUpdateType getContainerUpdateType();
+
+  /**
+   * Set the <code>ContainerUpdateType</code> of the container.
+   * @param updateType of the Container
+   */
+  @InterfaceAudience.Public
+  @InterfaceStability.Unstable
+  public abstract void setContainerUpdateType(ContainerUpdateType updateType);
+
+  /**
+   * Get the <code>ContainerId</code> of the container.
+   * @return <code>ContainerId</code> of the container
+   */
+  @InterfaceAudience.Public
+  @InterfaceStability.Unstable
+  public abstract ContainerId getContainerId();
+
+  /**
+   * Set the <code>ContainerId</code> of the container.
+   * @param containerId <code>ContainerId</code> of the container
+   */
+  @InterfaceAudience.Public
+  @InterfaceStability.Unstable
+  public abstract void setContainerId(ContainerId containerId);
+
+  /**
+   * Get the <code>Resource</code> capability of the container.
+   * @return <code>Resource</code> capability of the container
+   */
+  @InterfaceAudience.Public
+  @InterfaceStability.Unstable
+  public abstract Resource getCapability();
+
+  /**
+   * Set the <code>Resource</code> capability of the container.
+   * @param capability <code>Resource</code> capability of the container
+   */
+  @InterfaceAudience.Public
+  @InterfaceStability.Unstable
+  public abstract void setCapability(Resource capability);
+
+  /**
+   * Get the target <code>ExecutionType</code> of the container.
+   * @return <code>ExecutionType</code> of the container
+   */
+  @InterfaceAudience.Public
+  @InterfaceStability.Unstable
+  public abstract ExecutionType getExecutionType();
+
+  /**
+   * Set the target <code>ExecutionType</code> of the container.
+   * @param executionType <code>ExecutionType</code> of the container
+   */
+  @InterfaceAudience.Public
+  @InterfaceStability.Unstable
+  public abstract void setExecutionType(ExecutionType executionType);
+
+  @Override
+  public int hashCode() {
+    final int prime = 2153;
+    int result = 2459;
+    ContainerId cId = getContainerId();
+    ExecutionType execType = getExecutionType();
+    Resource capability = getCapability();
+    result =
+        prime * result + ((capability == null) ? 0 : capability.hashCode());
+    result = prime * result + ((cId == null) ? 0 : cId.hashCode());
+    result = prime * result + getContainerVersion();
+    result = prime * result + ((execType == null) ? 0 : execType.hashCode());
+    return result;
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (this == obj) {
+      return true;
+    }
+    if (obj == null) {
+      return false;
+    }
+    if (getClass() != obj.getClass()) {
+      return false;
+    }
+    UpdateContainerRequest other = (UpdateContainerRequest) obj;
+    Resource capability = getCapability();
+    if (capability == null) {
+      if (other.getCapability() != null) {
+        return false;
+      }
+    } else if (!capability.equals(other.getCapability())) {
+      return false;
+    }
+    ContainerId cId = getContainerId();
+    if (cId == null) {
+      if (other.getContainerId() != null) {
+        return false;
+      }
+    } else if (!cId.equals(other.getContainerId())) {
+      return false;
+    }
+    if (getContainerVersion() != other.getContainerVersion()) {
+      return false;
+    }
+    ExecutionType execType = getExecutionType();
+    if (execType == null) {
+      if (other.getExecutionType() != null) {
+        return false;
+      }
+    } else if (!execType.equals(other.getExecutionType())) {
+      return false;
+    }
+    return true;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/UpdatedContainer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/UpdatedContainer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/UpdatedContainer.java
new file mode 100644
index 0000000..68f6ca1
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/UpdatedContainer.java
@@ -0,0 +1,118 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.records;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.yarn.util.Records;
+
+/**
+ * An object that encapsulates an updated container and the
+ * type of Update.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Unstable
+public abstract class UpdatedContainer {
+
+  /**
+   * Static Factory method.
+   *
+   * @param updateType ContainerUpdateType
+   * @param container Container
+   * @return UpdatedContainer
+   */
+  @InterfaceAudience.Public
+  @InterfaceStability.Unstable
+  public static UpdatedContainer newInstance(ContainerUpdateType updateType,
+      Container container) {
+    UpdatedContainer updatedContainer =
+        Records.newRecord(UpdatedContainer.class);
+    updatedContainer.setUpdateType(updateType);
+    updatedContainer.setContainer(container);
+    return updatedContainer;
+  }
+
+  /**
+   * Get the <code>ContainerUpdateType</code>.
+   * @return ContainerUpdateType
+   */
+  public abstract ContainerUpdateType getUpdateType();
+
+  /**
+   * Set the <code>ContainerUpdateType</code>.
+   * @param updateType ContainerUpdateType
+   */
+  public abstract void setUpdateType(ContainerUpdateType updateType);
+
+  /**
+   * Get the <code>Container</code>.
+   * @return Container
+   */
+  public abstract Container getContainer();
+
+  /**
+   * Set the <code>Container</code>.
+   * @param container Container
+   */
+  public  abstract void setContainer(Container container);
+
+  @Override
+  public int hashCode() {
+    final int prime = 2153;
+    int result = 2459;
+    ContainerUpdateType updateType = getUpdateType();
+    Container container = getContainer();
+    result = prime * result + ((updateType == null) ? 0 :
+        updateType.hashCode());
+    result = prime * result + ((container == null) ? 0 : container.hashCode());
+    return result;
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (this == obj) {
+      return true;
+    }
+    if (obj == null) {
+      return false;
+    }
+    if (getClass() != obj.getClass()) {
+      return false;
+    }
+    UpdatedContainer other = (UpdatedContainer) obj;
+    ContainerUpdateType updateType = getUpdateType();
+    if (updateType == null) {
+      if (other.getUpdateType() != null) {
+        return false;
+      }
+    } else if (updateType != other.getUpdateType()) {
+      return false;
+    }
+    Container container = getContainer();
+    if (container == null) {
+      if (other.getContainer() != null) {
+        return false;
+      }
+    } else if (!container.equals(other.getContainer())) {
+      return false;
+    }
+    return true;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
index 2cc1784..2d6007e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
@@ -94,6 +94,7 @@ message ContainerProto {
   optional hadoop.common.TokenProto container_token = 6;
   optional ExecutionTypeProto execution_type = 7 [default = GUARANTEED];
   optional int64 allocation_request_id = 8 [default = -1];
+  optional int32 version = 9 [default = 0];
 }
 
 message ContainerReportProto {
@@ -535,11 +536,6 @@ enum ContainerExitStatusProto {
   DISKS_FAILED = -101;
 }
 
-message ContainerResourceChangeRequestProto {
-  optional ContainerIdProto container_id = 1;
-  optional ResourceProto capability = 2;
-} 
-
 message ContainerRetryContextProto {
   optional ContainerRetryPolicyProto retry_policy = 1 [default = NEVER_RETRY];
   repeated int32 error_codes = 2;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
index 4abb80b..97eaa5c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
@@ -60,14 +60,32 @@ message FinishApplicationMasterResponseProto {
   optional bool isUnregistered = 1 [default = false];
 }
 
+enum ContainerUpdateTypeProto {
+  INCREASE_RESOURCE = 0;
+  DECREASE_RESOURCE = 1;
+  UPDATE_EXECUTION_TYPE = 2;
+}
+
+message UpdateContainerRequestProto {
+  required int32 container_version = 1;
+  required ContainerIdProto container_id = 2;
+  required ContainerUpdateTypeProto update_type = 3;
+  optional ResourceProto capability = 4;
+  optional ExecutionTypeProto execution_type = 5;
+}
+
+message UpdateContainerErrorProto {
+  optional string reason = 1;
+  optional UpdateContainerRequestProto update_request = 2;
+}
+
 message AllocateRequestProto {
   repeated ResourceRequestProto ask = 1;
   repeated ContainerIdProto release = 2;
   optional ResourceBlacklistRequestProto blacklist_request = 3;
   optional int32 response_id = 4;
   optional float progress = 5;
-  repeated ContainerResourceChangeRequestProto increase_request = 6;
-  repeated ContainerResourceChangeRequestProto decrease_request = 7;
+  repeated UpdateContainerRequestProto update_requests = 6;
 }
 
 message NMTokenProto {
@@ -75,6 +93,11 @@ message NMTokenProto {
   optional hadoop.common.TokenProto token = 2;
 }
 
+message UpdatedContainerProto {
+  required ContainerUpdateTypeProto update_type = 1;
+  required ContainerProto container = 2;
+}
+
 message AllocateResponseProto {
   optional AMCommandProto a_m_command = 1;
   optional int32 response_id = 2;
@@ -85,11 +108,11 @@ message AllocateResponseProto {
   optional int32 num_cluster_nodes = 7;
   optional PreemptionMessageProto preempt = 8;
   repeated NMTokenProto nm_tokens = 9;
-  repeated ContainerProto increased_containers = 10;
-  repeated ContainerProto decreased_containers = 11;
+  repeated UpdatedContainerProto updated_containers = 10;
   optional hadoop.common.TokenProto am_rm_token = 12;
   optional PriorityProto application_priority = 13;
   optional string collector_addr = 14;
+  repeated UpdateContainerErrorProto update_errors = 15;
 }
 
 enum SchedulerResourceTypes {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
index b9949e1..17dae6b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
@@ -90,6 +90,7 @@ import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.api.records.URL;
+import org.apache.hadoop.yarn.api.records.UpdatedContainer;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineEntityGroupId;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineEvent;
@@ -912,7 +913,8 @@ public class ApplicationMaster {
     }
 
     @Override
-    public void onContainersResourceChanged(List<Container> containers) {}
+    public void onContainersUpdated(
+        List<UpdatedContainer> containers) {}
 
     @Override
     public void onShutdownRequest() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/AMRMClientAsync.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/AMRMClientAsync.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/AMRMClientAsync.java
index 10d2a2f..d2195a6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/AMRMClientAsync.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/AMRMClientAsync.java
@@ -29,6 +29,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.classification.InterfaceStability.Stable;
 import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
@@ -40,6 +41,7 @@ import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.NodeReport;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.UpdatedContainer;
 import org.apache.hadoop.yarn.client.api.AMRMClient;
 import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest;
 import org.apache.hadoop.yarn.client.api.async.impl.AMRMClientAsyncImpl;
@@ -63,7 +65,7 @@ import com.google.common.annotations.VisibleForTesting;
  *     [run tasks on the containers]
  *   }
  *
- *   public void onContainersResourceChanged(List<Container> containers) {
+ *   public void onContainersUpdated(List<Container> containers) {
  *     [determine if resource allocation of containers have been increased in
  *      the ResourceManager, and if so, inform the NodeManagers to increase the
  *      resource monitor/enforcement on the containers]
@@ -426,8 +428,9 @@ extends AbstractService {
      * Called when the ResourceManager responds to a heartbeat with containers
      * whose resource allocation has been changed.
      */
-    public abstract void onContainersResourceChanged(
-        List<Container> containers);
+    @Public
+    @Unstable
+    public abstract void onContainersUpdated(List<UpdatedContainer> containers);
 
     /**
      * Called when the ResourceManager wants the ApplicationMaster to shutdown

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/AMRMClientAsyncImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/AMRMClientAsyncImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/AMRMClientAsyncImpl.java
index 242df65..bc6cadd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/AMRMClientAsyncImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/AMRMClientAsyncImpl.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.NodeReport;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.UpdatedContainer;
 import org.apache.hadoop.yarn.client.api.AMRMClient;
 import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest;
 import org.apache.hadoop.yarn.client.api.TimelineClient;
@@ -354,12 +355,11 @@ extends AMRMClientAsync<T> {
           if (handler instanceof AMRMClientAsync.AbstractCallbackHandler) {
             // RM side of the implementation guarantees that there are
             // no duplications between increased and decreased containers
-            List<Container> changed = new ArrayList<>();
-            changed.addAll(response.getIncreasedContainers());
-            changed.addAll(response.getDecreasedContainers());
+            List<UpdatedContainer> changed = new ArrayList<>();
+            changed.addAll(response.getUpdatedContainers());
             if (!changed.isEmpty()) {
               ((AMRMClientAsync.AbstractCallbackHandler) handler)
-                  .onContainersResourceChanged(changed);
+                  .onContainersUpdated(changed);
             }
           }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java
index 60834f6..6f6bb85 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java
@@ -52,8 +52,8 @@ import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterReque
 import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.ContainerResourceChangeRequest;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.ContainerUpdateType;
 import org.apache.hadoop.yarn.api.records.ExecutionType;
 import org.apache.hadoop.yarn.api.records.ExecutionTypeRequest;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
@@ -63,6 +63,8 @@ import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.api.records.Token;
+import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
+import org.apache.hadoop.yarn.api.records.UpdatedContainer;
 import org.apache.hadoop.yarn.client.ClientRMProxy;
 import org.apache.hadoop.yarn.client.api.AMRMClient;
 import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest;
@@ -261,36 +263,10 @@ public class AMRMClientImpl<T extends ContainerRequest> extends AMRMClient<T> {
         new HashMap<>();
     try {
       synchronized (this) {
-        askList = new ArrayList<ResourceRequest>(ask.size());
-        for(ResourceRequest r : ask) {
-          // create a copy of ResourceRequest as we might change it while the 
-          // RPC layer is using it to send info across
-          ResourceRequest rr = ResourceRequest.newInstance(r.getPriority(),
-              r.getResourceName(), r.getCapability(), r.getNumContainers(),
-              r.getRelaxLocality(), r.getNodeLabelExpression(),
-              r.getExecutionTypeRequest());
-          rr.setAllocationRequestId(r.getAllocationRequestId());
-          askList.add(rr);
-        }
-        List<ContainerResourceChangeRequest> increaseList = new ArrayList<>();
-        List<ContainerResourceChangeRequest> decreaseList = new ArrayList<>();
+        askList = cloneAsks();
         // Save the current change for recovery
         oldChange.putAll(change);
-        for (Map.Entry<ContainerId, SimpleEntry<Container, Resource>> entry :
-            change.entrySet()) {
-          Container container = entry.getValue().getKey();
-          Resource original = container.getResource();
-          Resource target = entry.getValue().getValue();
-          if (Resources.fitsIn(target, original)) {
-            // This is a decrease request
-            decreaseList.add(ContainerResourceChangeRequest.newInstance(
-                container.getId(), target));
-          } else {
-            // This is an increase request
-            increaseList.add(ContainerResourceChangeRequest.newInstance(
-                container.getId(), target));
-          }
-        }
+        List<UpdateContainerRequest> updateList = createUpdateList();
         releaseList = new ArrayList<ContainerId>(release);
         // optimistically clear this collection assuming no RPC failure
         ask.clear();
@@ -306,8 +282,7 @@ public class AMRMClientImpl<T extends ContainerRequest> extends AMRMClient<T> {
         
         allocateRequest =
             AllocateRequest.newInstance(lastResponseId, progressIndicator,
-                askList, releaseList, blacklistRequest,
-                    increaseList, decreaseList);
+                askList, releaseList, blacklistRequest, updateList);
         // clear blacklistAdditions and blacklistRemovals before
         // unsynchronized part
         blacklistAdditions.clear();
@@ -358,9 +333,8 @@ public class AMRMClientImpl<T extends ContainerRequest> extends AMRMClient<T> {
         if (!pendingChange.isEmpty()) {
           List<ContainerStatus> completed =
               allocateResponse.getCompletedContainersStatuses();
-          List<Container> changed = new ArrayList<>();
-          changed.addAll(allocateResponse.getIncreasedContainers());
-          changed.addAll(allocateResponse.getDecreasedContainers());
+          List<UpdatedContainer> changed = new ArrayList<>();
+          changed.addAll(allocateResponse.getUpdatedContainers());
           // remove all pending change requests that belong to the completed
           // containers
           for (ContainerStatus status : completed) {
@@ -417,6 +391,40 @@ public class AMRMClientImpl<T extends ContainerRequest> extends AMRMClient<T> {
     return allocateResponse;
   }
 
+  private List<UpdateContainerRequest> createUpdateList() {
+    List<UpdateContainerRequest> updateList = new ArrayList<>();
+    for (Map.Entry<ContainerId, SimpleEntry<Container, Resource>> entry :
+        change.entrySet()) {
+      Resource targetCapability = entry.getValue().getValue();
+      Resource currCapability = entry.getValue().getKey().getResource();
+      int version = entry.getValue().getKey().getVersion();
+      ContainerUpdateType updateType =
+          ContainerUpdateType.INCREASE_RESOURCE;
+      if (Resources.fitsIn(targetCapability, currCapability)) {
+        updateType = ContainerUpdateType.DECREASE_RESOURCE;
+      }
+      updateList.add(
+          UpdateContainerRequest.newInstance(version, entry.getKey(),
+              updateType, targetCapability, null));
+    }
+    return updateList;
+  }
+
+  private List<ResourceRequest> cloneAsks() {
+    List<ResourceRequest> askList = new ArrayList<ResourceRequest>(ask.size());
+    for(ResourceRequest r : ask) {
+      // create a copy of ResourceRequest as we might change it while the
+      // RPC layer is using it to send info across
+      ResourceRequest rr = ResourceRequest.newInstance(r.getPriority(),
+          r.getResourceName(), r.getCapability(), r.getNumContainers(),
+          r.getRelaxLocality(), r.getNodeLabelExpression(),
+          r.getExecutionTypeRequest());
+      rr.setAllocationRequestId(r.getAllocationRequestId());
+      askList.add(rr);
+    }
+    return askList;
+  }
+
   protected void removePendingReleaseRequests(
       List<ContainerStatus> completedContainersStatuses) {
     for (ContainerStatus containerStatus : completedContainersStatuses) {
@@ -425,16 +433,16 @@ public class AMRMClientImpl<T extends ContainerRequest> extends AMRMClient<T> {
   }
 
   protected void removePendingChangeRequests(
-      List<Container> changedContainers) {
-    for (Container changedContainer : changedContainers) {
-      ContainerId containerId = changedContainer.getId();
+      List<UpdatedContainer> changedContainers) {
+    for (UpdatedContainer changedContainer : changedContainers) {
+      ContainerId containerId = changedContainer.getContainer().getId();
       if (pendingChange.get(containerId) == null) {
         continue;
       }
       if (LOG.isDebugEnabled()) {
         LOG.debug("RM has confirmed changed resource allocation for "
             + "container " + containerId + ". Current resource allocation:"
-            + changedContainer.getResource()
+            + changedContainer.getContainer().getResource()
             + ". Remove pending change request:"
             + pendingChange.get(containerId).getValue());
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/async/impl/TestAMRMClientAsync.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/async/impl/TestAMRMClientAsync.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/async/impl/TestAMRMClientAsync.java
index c7b3a94..dac82e4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/async/impl/TestAMRMClientAsync.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/async/impl/TestAMRMClientAsync.java
@@ -45,9 +45,11 @@ import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerState;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.ContainerUpdateType;
 import org.apache.hadoop.yarn.api.records.NMToken;
 import org.apache.hadoop.yarn.api.records.NodeReport;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.UpdatedContainer;
 import org.apache.hadoop.yarn.client.api.AMRMClient;
 import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest;
 import org.apache.hadoop.yarn.client.api.async.AMRMClientAsync;
@@ -89,20 +91,21 @@ public class TestAMRMClientAsync {
     TestCallbackHandler callbackHandler = new TestCallbackHandler();
     final AMRMClient<ContainerRequest> client = mock(AMRMClientImpl.class);
     final AtomicInteger secondHeartbeatSync = new AtomicInteger(0);
-    when(client.allocate(anyFloat())).thenReturn(response1).thenAnswer(new Answer<AllocateResponse>() {
-      @Override
-      public AllocateResponse answer(InvocationOnMock invocation)
-          throws Throwable {
-        secondHeartbeatSync.incrementAndGet();
-        while (heartbeatBlock.get()) {
-          synchronized (heartbeatBlock) {
-            heartbeatBlock.wait();
+    when(client.allocate(anyFloat())).thenReturn(response1).thenAnswer(
+        new Answer<AllocateResponse>() {
+          @Override
+          public AllocateResponse answer(InvocationOnMock invocation)
+              throws Throwable {
+            secondHeartbeatSync.incrementAndGet();
+            while (heartbeatBlock.get()) {
+              synchronized (heartbeatBlock) {
+                heartbeatBlock.wait();
+              }
+            }
+            secondHeartbeatSync.incrementAndGet();
+            return response2;
           }
-        }
-        secondHeartbeatSync.incrementAndGet();
-        return response2;
-      }
-    }).thenReturn(response3).thenReturn(emptyResponse);
+        }).thenReturn(response3).thenReturn(emptyResponse);
     when(client.registerApplicationMaster(anyString(), anyInt(), anyString()))
       .thenReturn(null);
     when(client.getAvailableResources()).thenAnswer(new Answer<Resource>() {
@@ -410,10 +413,21 @@ public class TestAMRMClientAsync {
       List<ContainerStatus> completed, List<Container> allocated,
       List<Container> increased, List<Container> decreased,
       List<NMToken> nmTokens) {
+    List<UpdatedContainer> updatedContainers = new ArrayList<>();
+    for (Container c : increased) {
+      updatedContainers.add(
+          UpdatedContainer.newInstance(
+              ContainerUpdateType.INCREASE_RESOURCE, c));
+    }
+    for (Container c : decreased) {
+      updatedContainers.add(
+          UpdatedContainer.newInstance(
+              ContainerUpdateType.DECREASE_RESOURCE, c));
+    }
     AllocateResponse response =
         AllocateResponse.newInstance(0, completed, allocated,
             new ArrayList<NodeReport>(), null, null, 1, null, nmTokens,
-            increased, decreased);
+            updatedContainers);
     return response;
   }
 
@@ -429,7 +443,7 @@ public class TestAMRMClientAsync {
       extends AMRMClientAsync.AbstractCallbackHandler {
     private volatile List<ContainerStatus> completedContainers;
     private volatile List<Container> allocatedContainers;
-    private final List<Container> changedContainers = new ArrayList<>();
+    private final List<UpdatedContainer> changedContainers = new ArrayList<>();
     Exception savedException = null;
     volatile boolean reboot = false;
     Object notifier = new Object();
@@ -448,8 +462,8 @@ public class TestAMRMClientAsync {
       return ret;
     }
 
-    public List<Container> takeChangedContainers() {
-      List<Container> ret = null;
+    public List<UpdatedContainer> takeChangedContainers() {
+      List<UpdatedContainer> ret = null;
       synchronized (changedContainers) {
         if (!changedContainers.isEmpty()) {
           ret = new ArrayList<>(changedContainers);
@@ -488,8 +502,8 @@ public class TestAMRMClientAsync {
     }
 
     @Override
-    public void onContainersResourceChanged(
-        List<Container> changed) {
+    public void onContainersUpdated(
+        List<UpdatedContainer> changed) {
       synchronized (changedContainers) {
         changedContainers.clear();
         changedContainers.addAll(changed);
@@ -564,7 +578,8 @@ public class TestAMRMClientAsync {
     public void onContainersAllocated(List<Container> containers) {}
 
     @Override
-    public void onContainersResourceChanged(List<Container> containers) {}
+    public void onContainersUpdated(
+        List<UpdatedContainer> containers) {}
 
     @Override
     public void onShutdownRequest() {}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
index e0ad2c4..38178a4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
@@ -72,6 +72,7 @@ import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.api.records.Token;
+import org.apache.hadoop.yarn.api.records.UpdatedContainer;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.client.ClientRMProxy;
 import org.apache.hadoop.yarn.client.api.AMRMClient;
@@ -1061,33 +1062,20 @@ public class TestAMRMClient {
     Assert.assertEquals(2, amClientImpl.pendingChange.size());
     // as of now: container1 asks to decrease to (512, 1)
     //            container2 asks to increase to (2048, 1)
-    List<Container> decreasedContainers;
-    List<Container> increasedContainers;
-    int allocateAttempts = 0;
-    int decreased = 0;
-    int increased = 0;
-    while (allocateAttempts < 30) {
-      // send allocation requests
-      AllocateResponse allocResponse = amClient.allocate(0.1f);
-      decreasedContainers = allocResponse.getDecreasedContainers();
-      increasedContainers = allocResponse.getIncreasedContainers();
-      decreased += decreasedContainers.size();
-      increased += increasedContainers.size();
-      if (allocateAttempts == 0) {
-        // we should get decrease confirmation right away
-        Assert.assertEquals(1, decreased);
-        // After first allocate request check change size
-        Assert.assertEquals(0, amClientImpl.change.size());
-      } else if (increased == 1) {
-        break;
-      }
-      // increase request is served after next NM heart beat is received
-      // Sleeping and retrying allocate
-      sleep(20);
-      allocateAttempts++;
-    }
-    Assert.assertEquals(1, decreased);
-    Assert.assertEquals(1, increased);
+    // send allocation requests
+    AllocateResponse allocResponse = amClient.allocate(0.1f);
+    Assert.assertEquals(0, amClientImpl.change.size());
+    // we should get decrease confirmation right away
+    List<UpdatedContainer> updatedContainers =
+        allocResponse.getUpdatedContainers();
+    Assert.assertEquals(1, updatedContainers.size());
+    // we should get increase allocation after the next NM's heartbeat to RM
+    sleep(150);
+    // get allocations
+    allocResponse = amClient.allocate(0.1f);
+    updatedContainers =
+        allocResponse.getUpdatedContainers();
+    Assert.assertEquals(1, updatedContainers.size());
   }
 
   private void testAllocation(final AMRMClientImpl<ContainerRequest> amClient)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6d9cff2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientOnRMRestart.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientOnRMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientOnRMRestart.java
index 719d9a1..f1c49f2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientOnRMRestart.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientOnRMRestart.java
@@ -39,12 +39,12 @@ import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.ContainerResourceChangeRequest;
 import org.apache.hadoop.yarn.api.records.ContainerState;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
 import org.apache.hadoop.yarn.client.api.AMRMClient;
 import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -262,7 +262,7 @@ public class TestAMRMClientOnRMRestart {
     // new NM to represent NM re-register
     nm1 = new MockNM("h1:1234", 10240, rm2.getResourceTrackerService());
     NMContainerStatus containerReport =
-        NMContainerStatus.newInstance(containerId, ContainerState.RUNNING,
+        NMContainerStatus.newInstance(containerId, 0, ContainerState.RUNNING,
             Resource.newInstance(1024, 1), "recover container", 0,
             Priority.newInstance(0), 0);
     nm1.registerNode(Collections.singletonList(containerReport),
@@ -399,7 +399,7 @@ public class TestAMRMClientOnRMRestart {
 
     ContainerId containerId = ContainerId.newContainerId(appAttemptId, 1);
     NMContainerStatus containerReport =
-        NMContainerStatus.newInstance(containerId, ContainerState.RUNNING,
+        NMContainerStatus.newInstance(containerId, 0, ContainerState.RUNNING,
             Resource.newInstance(1024, 1), "recover container", 0,
             Priority.newInstance(0), 0);
     nm1.registerNode(Arrays.asList(containerReport), null);
@@ -562,8 +562,8 @@ public class TestAMRMClientOnRMRestart {
 
     List<ResourceRequest> lastAsk = null;
     List<ContainerId> lastRelease = null;
-    List<ContainerResourceChangeRequest> lastIncrease = null;
-    List<ContainerResourceChangeRequest> lastDecrease = null;
+    List<UpdateContainerRequest> lastIncrease = null;
+    List<UpdateContainerRequest> lastDecrease = null;
     List<String> lastBlacklistAdditions;
     List<String> lastBlacklistRemovals;
 
@@ -574,8 +574,8 @@ public class TestAMRMClientOnRMRestart {
         ApplicationAttemptId applicationAttemptId, List<ResourceRequest> ask,
         List<ContainerId> release, List<String> blacklistAdditions,
         List<String> blacklistRemovals,
-        List<ContainerResourceChangeRequest> increaseRequests,
-        List<ContainerResourceChangeRequest> decreaseRequests) {
+        List<UpdateContainerRequest> increaseRequests,
+        List<UpdateContainerRequest> decreaseRequests) {
       List<ResourceRequest> askCopy = new ArrayList<ResourceRequest>();
       for (ResourceRequest req : ask) {
         ResourceRequest reqCopy =


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org