You are viewing a plain text version of this content. The canonical link for it is here.
Posted to yarn-commits@hadoop.apache.org by ar...@apache.org on 2013/12/19 03:04:52 UTC

svn commit: r1552205 [1/3] - in /hadoop/common/branches/HDFS-2832/hadoop-yarn-project: ./ hadoop-yarn/ hadoop-yarn/dev-support/ hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/ hadoop-yarn/hadoop-yarn-api/src/main/j...

Author: arp
Date: Thu Dec 19 02:03:47 2013
New Revision: 1552205

URL: http://svn.apache.org/r1552205
Log:
Merge forward from trunk to branch HDFS-2832

Added:
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/UpdateNodeResourceRequest.java
      - copied unchanged from r1552204, hadoop/common/trunk/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/UpdateNodeResourceRequest.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/UpdateNodeResourceResponse.java
      - copied unchanged from r1552204, hadoop/common/trunk/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/UpdateNodeResourceResponse.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
      - copied unchanged from r1552204, hadoop/common/trunk/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/ConfiguredRMFailoverProxyProvider.java
      - copied unchanged from r1552204, hadoop/common/trunk/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/ConfiguredRMFailoverProxyProvider.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMFailoverProxyProvider.java
      - copied unchanged from r1552204, hadoop/common/trunk/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMFailoverProxyProvider.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/UpdateNodeResourceRequestPBImpl.java
      - copied unchanged from r1552204, hadoop/common/trunk/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/UpdateNodeResourceRequestPBImpl.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/UpdateNodeResourceResponsePBImpl.java
      - copied unchanged from r1552204, hadoop/common/trunk/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/UpdateNodeResourceResponsePBImpl.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/AppAttemptAddedSchedulerEvent.java
      - copied unchanged from r1552204, hadoop/common/trunk/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/AppAttemptAddedSchedulerEvent.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/AppAttemptRemovedSchedulerEvent.java
      - copied unchanged from r1552204, hadoop/common/trunk/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/AppAttemptRemovedSchedulerEvent.java
Removed:
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/AppAddedSchedulerEvent.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/AppRemovedSchedulerEvent.java
Modified:
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/CHANGES.txt
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/KillApplicationResponse.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceOption.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/HAUtil.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ResourceManagerAdministrationProtocol.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/resourcemanager_administration_protocol.proto
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/ClientRMProxy.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestResourceManagerAdministrationProtocolPBClientImpl.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/KillApplicationResponsePBImpl.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceManagerAdministrationProtocolPBClientImpl.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/service/ResourceManagerAdministrationProtocolPBServiceImpl.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/log/AggregatedLogsBlock.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/conf/TestHAUtil.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogsBlock.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/ServerRMProxy.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppEventType.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppState.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/SchedulerEventType.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Application.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestFifoScheduler.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestMiniYARNClusterForHA.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxy.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServer.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/TestWebAppProxyServer.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/TestWebAppProxyServlet.java
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/pom.xml
    hadoop/common/branches/HDFS-2832/hadoop-yarn-project/pom.xml

Modified: hadoop/common/branches/HDFS-2832/hadoop-yarn-project/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-yarn-project/CHANGES.txt?rev=1552205&r1=1552204&r2=1552205&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-yarn-project/CHANGES.txt (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-yarn-project/CHANGES.txt Thu Dec 19 02:03:47 2013
@@ -49,6 +49,12 @@ Release 2.4.0 - UNRELEASED
     YARN-1448. AM-RM protocol changes to support container resizing (Wangda Tan
     via Sandy Ryza)
 
+    YARN-312. Introduced ResourceManagerAdministrationProtocol changes to support
+    changing resources on node. (Junping Du via vinodkv)
+
+    YARN-1028. Added FailoverProxyProvider capability to ResourceManager to help
+    with RM failover. (Karthik Kambatla via vinodkv)
+
   IMPROVEMENTS
 
     YARN-7. Support CPU resource for DistributedShell. (Junping Du via llu)
@@ -162,6 +168,22 @@ Release 2.4.0 - UNRELEASED
     YARN-408. Change CapacityScheduler to not disable delay-scheduling by default.
     (Mayank Bansal via vinodkv)
 
+    YARN-1325. Modified RM HA configuration validation to also ensure that
+    multiple RMs are configured. (Xuan Gong via vinodkv)
+
+    YARN-1311. Fixed app specific scheduler-events' names to be app-attempt
+    based. (vinodkv via jianhe)
+
+    YARN-1485. Modified RM HA configuration validation to also ensure that
+    service-address configuration are configured for every RM. (Xuan Gong via
+    vinodkv)
+
+    YARN-1435. Modified Distributed Shell to accept either the command or the
+    custom script. (Xuan Gong via zjshen)
+
+    YARN-1446. Changed client API to retry killing application till RM
+    acknowledges so as to account for RM crashes/failover. (Jian He via vinodkv)
+
   OPTIMIZATIONS
 
   BUG FIXES
@@ -233,6 +255,15 @@ Release 2.4.0 - UNRELEASED
     YARN-1405. Fixed ResourceManager to not hang when init/start fails with an
     exception w.r.t state-store. (Jian He via vinodkv)
 
+    YARN-1505. Fixed Webapplication proxy server to not hardcode its bind
+    address. (Xuan Gong via vinodkv)
+
+    YARN-1145. Fixed a potential file-handle leak in the web interface for
+    displaying aggregated logs. (Rohith Sharma via vinodkv)
+
+    YARN-1451. TestResourceManager relies on the scheduler assigning multiple
+    containers in a single node update. (Sandy Ryza via kasha)
+
 Release 2.3.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

Modified: hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml?rev=1552205&r1=1552204&r2=1552205&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml Thu Dec 19 02:03:47 2013
@@ -310,4 +310,12 @@
     <Bug pattern="IS2_INCONSISTENT_SYNC" />
   </Match>
 
+  <!-- Ignore INSTANCE not being final as it is created in sub-classes -->
+  <Match>
+    <Class name="org.apache.hadoop.yarn.client.RMProxy" />
+    <Field name="INSTANCE" />
+    <Bug pattern="MS_SHOULD_BE_FINAL"/>
+  </Match>
+
+
 </FindBugsFilter>

Modified: hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/KillApplicationResponse.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/KillApplicationResponse.java?rev=1552205&r1=1552204&r2=1552205&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/KillApplicationResponse.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/KillApplicationResponse.java Thu Dec 19 02:03:47 2013
@@ -26,10 +26,21 @@ import org.apache.hadoop.yarn.api.Applic
 import org.apache.hadoop.yarn.util.Records;
 
 /**
- * <p>The response sent by the <code>ResourceManager</code> to the client
- * aborting a submitted application.</p>
- *
- * <p>Currently it's empty.</p>
+ * <p>
+ * The response sent by the <code>ResourceManager</code> to the client aborting
+ * a submitted application.
+ * </p>
+ * <p>
+ * The response, includes:
+ * <ul>
+ * <li>A flag which indicates that the process of killing the application is
+ * completed or not.</li>
+ * </ul>
+ * Note: user is recommended to wait until this flag becomes true, otherwise if
+ * the <code>ResourceManager</code> crashes before the process of killing the
+ * application is completed, the <code>ResourceManager</code> may retry this
+ * application on recovery.
+ * </p>
  * 
  * @see ApplicationClientProtocol#forceKillApplication(KillApplicationRequest)
  */
@@ -38,9 +49,24 @@ import org.apache.hadoop.yarn.util.Recor
 public abstract class KillApplicationResponse {
   @Private
   @Unstable
-  public static KillApplicationResponse newInstance() {
+  public static KillApplicationResponse newInstance(boolean isKillCompleted) {
     KillApplicationResponse response =
         Records.newRecord(KillApplicationResponse.class);
+    response.setIsKillCompleted(isKillCompleted);
     return response;
   }
+
+  /**
+   * Get the flag which indicates that the process of killing application is completed or not.
+   */
+  @Public
+  @Stable
+  public abstract boolean getIsKillCompleted();
+
+  /**
+   * Set the flag which indicates that the process of killing application is completed or not.
+   */
+  @Private
+  @Unstable
+  public abstract void setIsKillCompleted(boolean isKillCompleted);
 }

Modified: hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceOption.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceOption.java?rev=1552205&r1=1552204&r2=1552205&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceOption.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceOption.java Thu Dec 19 02:03:47 2013
@@ -62,4 +62,10 @@ public abstract class ResourceOption {
   
   protected abstract void build();
   
+  @Override
+  public String toString() {
+    return "Resource:" + getResource().toString() 
+        + ", overCommitTimeout:" + getOverCommitTimeout();
+  }
+  
 }

Modified: hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/HAUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/HAUtil.java?rev=1552205&r1=1552204&r2=1552205&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/HAUtil.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/HAUtil.java Thu Dec 19 02:03:47 2013
@@ -58,25 +58,46 @@ public class HAUtil {
    */
   public static void verifyAndSetConfiguration(Configuration conf)
     throws YarnRuntimeException {
-    verifyAndSetRMHAIds(conf);
-    verifyAndSetRMHAId(conf);
-    verifyAndSetAllRpcAddresses(conf);
+    verifyAndSetRMHAIdsList(conf);
+    verifyAndSetCurrentRMHAId(conf);
+    verifyAndSetAllServiceAddresses(conf);
   }
 
-
-  private static void verifyAndSetRMHAIds(Configuration conf) {
+  /**
+   * Verify configuration that there are at least two RM-ids
+   * and RPC addresses are specified for each RM-id.
+   * Then set the RM-ids.
+   */
+  private static void verifyAndSetRMHAIdsList(Configuration conf) {
     Collection<String> ids =
       conf.getTrimmedStringCollection(YarnConfiguration.RM_HA_IDS);
-    if (ids.size() <= 0) {
+    if (ids.size() < 2) {
       throwBadConfigurationException(
         getInvalidValueMessage(YarnConfiguration.RM_HA_IDS,
-          conf.get(YarnConfiguration.RM_HA_IDS)));
-    } else if (ids.size() == 1) {
-      LOG.warn(getRMHAIdsWarningMessage(ids.toString()));
+          conf.get(YarnConfiguration.RM_HA_IDS) +
+          "\nHA mode requires atleast two RMs"));
     }
 
     StringBuilder setValue = new StringBuilder();
     for (String id: ids) {
+      // verify the RM service addresses configurations for every RMIds
+      for (String prefix : YarnConfiguration.RM_SERVICES_ADDRESS_CONF_KEYS) {
+        String confKey = null;
+        try {
+          confKey = addSuffix(prefix, id);
+          if (conf.getTrimmed(confKey) == null) {
+            throwBadConfigurationException(getNeedToSetValueMessage(confKey));
+          }
+        } catch (IllegalArgumentException iae) {
+          String errmsg = iae.getMessage();
+          if (confKey == null) {
+            // Error at addSuffix
+            errmsg = getInvalidValueMessage(YarnConfiguration.RM_HA_ID,
+              getRMHAId(conf));
+          }
+          throwBadConfigurationException(errmsg);
+        }
+      }
       setValue.append(id);
       setValue.append(",");
     }
@@ -84,7 +105,7 @@ public class HAUtil {
       setValue.substring(0, setValue.length() - 1));
   }
 
-  private static void verifyAndSetRMHAId(Configuration conf) {
+  private static void verifyAndSetCurrentRMHAId(Configuration conf) {
     String rmId = conf.getTrimmed(YarnConfiguration.RM_HA_ID);
     if (rmId == null) {
       throwBadConfigurationException(
@@ -123,8 +144,8 @@ public class HAUtil {
     }
   }
 
-  public static void verifyAndSetAllRpcAddresses(Configuration conf) {
-    for (String confKey : YarnConfiguration.RM_RPC_ADDRESS_CONF_KEYS) {
+  public static void verifyAndSetAllServiceAddresses(Configuration conf) {
+    for (String confKey : YarnConfiguration.RM_SERVICES_ADDRESS_CONF_KEYS) {
      verifyAndSetConfValue(confKey, conf);
     }
   }
@@ -176,7 +197,7 @@ public class HAUtil {
   @InterfaceAudience.Private
   @VisibleForTesting
   static String getConfKeyForRMInstance(String prefix, Configuration conf) {
-    return YarnConfiguration.RM_RPC_ADDRESS_CONF_KEYS.contains(prefix)
+    return YarnConfiguration.RM_SERVICES_ADDRESS_CONF_KEYS.contains(prefix)
         ? addSuffix(prefix, getRMHAId(conf))
         : prefix;
   }

Modified: hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java?rev=1552205&r1=1552204&r2=1552205&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java Thu Dec 19 02:03:47 2013
@@ -23,11 +23,12 @@ import java.util.Arrays;
 import java.util.Collections;
 import java.util.List;
 
+import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.ApplicationConstants;
@@ -285,14 +286,40 @@ public class YarnConfiguration extends C
   public static final String RM_HA_IDS = RM_HA_PREFIX + "rm-ids";
   public static final String RM_HA_ID = RM_HA_PREFIX + "id";
 
-  public static final List<String> RM_RPC_ADDRESS_CONF_KEYS =
+  @Private
+  public static final List<String> RM_SERVICES_ADDRESS_CONF_KEYS =
       Collections.unmodifiableList(Arrays.asList(
           RM_ADDRESS,
           RM_SCHEDULER_ADDRESS,
           RM_ADMIN_ADDRESS,
           RM_RESOURCE_TRACKER_ADDRESS,
-          RM_WEBAPP_ADDRESS,
-          RM_WEBAPP_HTTPS_ADDRESS));
+          HttpConfig.isSecure() ? RM_WEBAPP_HTTPS_ADDRESS
+              : RM_WEBAPP_ADDRESS));
+
+  public static final String CLIENT_FAILOVER_PREFIX =
+      YARN_PREFIX + "client.failover-";
+  public static final String CLIENT_FAILOVER_PROXY_PROVIDER =
+      CLIENT_FAILOVER_PREFIX + "proxy-provider";
+  public static final String DEFAULT_CLIENT_FAILOVER_PROXY_PROVIDER =
+      "org.apache.hadoop.yarn.client.ConfiguredRMFailoverProxyProvider";
+
+  public static final String CLIENT_FAILOVER_MAX_ATTEMPTS =
+      CLIENT_FAILOVER_PREFIX + "max-attempts";
+
+  public static final String CLIENT_FAILOVER_SLEEPTIME_BASE_MS =
+      CLIENT_FAILOVER_PREFIX + "sleep-base-ms";
+
+  public static final String CLIENT_FAILOVER_SLEEPTIME_MAX_MS =
+      CLIENT_FAILOVER_PREFIX + "sleep-max-ms";
+
+  public static final String CLIENT_FAILOVER_RETRIES =
+      CLIENT_FAILOVER_PREFIX + "retries";
+  public static final int DEFAULT_CLIENT_FAILOVER_RETRIES = 0;
+
+  public static final String CLIENT_FAILOVER_RETRIES_ON_SOCKET_TIMEOUTS =
+      CLIENT_FAILOVER_PREFIX + "retries-on-socket-timeouts";
+  public static final int
+      DEFAULT_CLIENT_FAILOVER_RETRIES_ON_SOCKET_TIMEOUTS = 0;
 
   ////////////////////////////////
   // RM state store configs
@@ -848,22 +875,31 @@ public class YarnConfiguration extends C
   public static final String IS_MINI_YARN_CLUSTER = YARN_PREFIX
       + "is.minicluster";
 
+  public static final String YARN_MC_PREFIX = YARN_PREFIX + "minicluster.";
+
   /** Whether to use fixed ports with the minicluster. */
-  public static final String YARN_MINICLUSTER_FIXED_PORTS = YARN_PREFIX
-      + "minicluster.fixed.ports";
+  public static final String YARN_MINICLUSTER_FIXED_PORTS =
+      YARN_MC_PREFIX + "fixed.ports";
 
   /**
    * Default is false to be able to run tests concurrently without port
    * conflicts.
    */
-  public static boolean DEFAULT_YARN_MINICLUSTER_FIXED_PORTS = false;
+  public static final boolean DEFAULT_YARN_MINICLUSTER_FIXED_PORTS = false;
+
+  /**
+   * Whether the NM should use RPC to connect to the RM. Default is false.
+   * Can be set to true only when using fixed ports.
+   */
+  public static final String YARN_MINICLUSTER_USE_RPC = YARN_MC_PREFIX + "use-rpc";
+  public static final boolean DEFAULT_YARN_MINICLUSTER_USE_RPC = false;
 
   /**
    * Whether users are explicitly trying to control resource monitoring
    * configuration for the MiniYARNCluster. Disabled by default.
    */
   public static final String YARN_MINICLUSTER_CONTROL_RESOURCE_MONITORING =
-      YARN_PREFIX + "minicluster.control-resource-monitoring";
+      YARN_MC_PREFIX + "control-resource-monitoring";
   public static final boolean
       DEFAULT_YARN_MINICLUSTER_CONTROL_RESOURCE_MONITORING = false;
 
@@ -879,15 +915,23 @@ public class YarnConfiguration extends C
   ////////////////////////////////
 
   /**
+   * Use YARN_CLIENT_APPLICATION_CLIENT_PROTOCOL_POLL_INTERVAL_MS instead.
    * The interval of the yarn client's querying application state after
    * application submission. The unit is millisecond.
    */
+  @Deprecated
   public static final String YARN_CLIENT_APP_SUBMISSION_POLL_INTERVAL_MS =
       YARN_PREFIX + "client.app-submission.poll-interval";
-  public static final long DEFAULT_YARN_CLIENT_APP_SUBMISSION_POLL_INTERVAL_MS =
-      1000;
 
   /**
+   * The interval that the yarn client library uses to poll the completion
+   * status of the asynchronous API of application client protocol.
+   */
+  public static final String YARN_CLIENT_APPLICATION_CLIENT_PROTOCOL_POLL_INTERVAL_MS =
+      YARN_PREFIX + "client.application-client-protocol.poll-interval-ms";
+  public static final long DEFAULT_YARN_CLIENT_APPLICATION_CLIENT_PROTOCOL_POLL_INTERVAL_MS =
+      200;
+  /**
    * Max number of threads in NMClientAsync to process container management
    * events
    */
@@ -940,7 +984,7 @@ public class YarnConfiguration extends C
   public InetSocketAddress getSocketAddr(
       String name, String defaultAddress, int defaultPort) {
     String address;
-    if (HAUtil.isHAEnabled(this) && RM_RPC_ADDRESS_CONF_KEYS.contains(name)) {
+    if (HAUtil.isHAEnabled(this) && RM_SERVICES_ADDRESS_CONF_KEYS.contains(name)) {
       address = HAUtil.getConfValueForRMInstance(name, defaultAddress, this);
     } else {
       address = get(name, defaultAddress);

Modified: hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ResourceManagerAdministrationProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ResourceManagerAdministrationProtocol.java?rev=1552205&r1=1552204&r2=1552205&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ResourceManagerAdministrationProtocol.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ResourceManagerAdministrationProtocol.java Thu Dec 19 02:03:47 2013
@@ -22,8 +22,11 @@ import java.io.IOException;
 
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Evolving;
 import org.apache.hadoop.classification.InterfaceStability.Stable;
 import org.apache.hadoop.tools.GetUserMappingsProtocol;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.ResourceOption;
 import org.apache.hadoop.yarn.exceptions.RMNotYetActiveException;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsRequest;
@@ -38,6 +41,8 @@ import org.apache.hadoop.yarn.server.api
 import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshSuperUserGroupsConfigurationResponse;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshUserToGroupsMappingsRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshUserToGroupsMappingsResponse;
+import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceResponse;
 
 @Private
 @Stable
@@ -77,4 +82,24 @@ public interface ResourceManagerAdminist
   public RefreshServiceAclsResponse refreshServiceAcls(
       RefreshServiceAclsRequest request)
   throws YarnException, IOException;
+  
+  /**
+   * <p>The interface used by admin to update nodes' resources to the
+   * <code>ResourceManager</code> </p>.
+   * 
+   * <p>The admin client is required to provide details such as a map from 
+   * {@link NodeId} to {@link ResourceOption} required to update resources on 
+   * a list of <code>RMNode</code> in <code>ResourceManager</code> etc.
+   * via the {@link UpdateNodeResourceRequest}.</p>
+   * 
+   * @param request request to update resource for a node in cluster.
+   * @return (empty) response on accepting update.
+   * @throws YarnException
+   * @throws IOException
+   */
+  @Public
+  @Evolving
+  public UpdateNodeResourceResponse updateNodeResource(
+      UpdateNodeResourceRequest request) 
+  throws YarnException, IOException;
 }

Modified: hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/resourcemanager_administration_protocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/resourcemanager_administration_protocol.proto?rev=1552205&r1=1552204&r2=1552205&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/resourcemanager_administration_protocol.proto (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/resourcemanager_administration_protocol.proto Thu Dec 19 02:03:47 2013
@@ -38,4 +38,5 @@ service ResourceManagerAdministrationPro
   rpc refreshAdminAcls(RefreshAdminAclsRequestProto) returns (RefreshAdminAclsResponseProto);
   rpc refreshServiceAcls(RefreshServiceAclsRequestProto) returns (RefreshServiceAclsResponseProto);
   rpc getGroupsForUser(GetGroupsForUserRequestProto) returns (GetGroupsForUserResponseProto);
+  rpc updateNodeResource (UpdateNodeResourceRequestProto) returns (UpdateNodeResourceResponseProto);
 }

Modified: hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto?rev=1552205&r1=1552204&r2=1552205&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto Thu Dec 19 02:03:47 2013
@@ -68,6 +68,13 @@ message GetGroupsForUserResponseProto {
   repeated string groups = 1;
 }
 
+message UpdateNodeResourceRequestProto {
+  repeated NodeResourceMapProto node_resource_map = 1;
+}
+
+message UpdateNodeResourceResponseProto {
+}
+
 ////////////////////////////////////////////////////////////////////////
 ////// RM recovery related records /////////////////////////////////////
 ////////////////////////////////////////////////////////////////////////

Modified: hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto?rev=1552205&r1=1552204&r2=1552205&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto Thu Dec 19 02:03:47 2013
@@ -63,6 +63,11 @@ message ResourceOptionProto {
   optional int32 over_commit_timeout = 2;
 }
 
+message NodeResourceMapProto {
+  optional NodeIdProto node_id = 1;
+  optional ResourceOptionProto resource_option = 2;
+}
+
 message PriorityProto {
   optional int32 priority = 1;
 }

Modified: hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto?rev=1552205&r1=1552204&r2=1552205&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto Thu Dec 19 02:03:47 2013
@@ -116,6 +116,7 @@ message KillApplicationRequestProto {
 }
 
 message KillApplicationResponseProto {
+  optional bool is_kill_completed = 1 [default = false];
 }
 
 message GetClusterMetricsRequestProto {

Modified: hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java?rev=1552205&r1=1552204&r2=1552205&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java Thu Dec 19 02:03:47 2013
@@ -218,13 +218,14 @@ public class ApplicationMaster {
   private long shellScriptPathLen = 0;
 
   // Hardcoded path to shell script in launch container's local env
-  private final String ExecShellStringPath = "ExecShellScript.sh";
+  private static final String ExecShellStringPath = "ExecShellScript.sh";
+  private static final String ExecBatScripStringtPath = "ExecBatScript.bat";
 
   // Hardcoded path to custom log_properties
-  private final String log4jPath = "log4j.properties";
+  private static final String log4jPath = "log4j.properties";
 
-  private final String shellCommandPath = "shellCommands";
-  private final String shellArgsPath = "shellArgs";
+  private static final String shellCommandPath = "shellCommands";
+  private static final String shellArgsPath = "shellArgs";
 
   private volatile boolean done;
   private volatile boolean success;
@@ -234,6 +235,9 @@ public class ApplicationMaster {
   // Launch threads
   private List<Thread> launchThreads = new ArrayList<Thread>();
 
+  private final String linux_bash_command = "bash";
+  private final String windows_command = "cmd /c";
+
   /**
    * @param args Command line args
    */
@@ -308,8 +312,6 @@ public class ApplicationMaster {
     Options opts = new Options();
     opts.addOption("app_attempt_id", true,
         "App Attempt ID. Not to be used unless for testing purposes");
-    opts.addOption("shell_script", true,
-        "Location of the shell script to be executed");
     opts.addOption("shell_env", true,
         "Environment for shell script. Specified as env_key=env_val pairs");
     opts.addOption("container_memory", true,
@@ -387,11 +389,15 @@ public class ApplicationMaster {
         + appAttemptID.getApplicationId().getClusterTimestamp()
         + ", attemptId=" + appAttemptID.getAttemptId());
 
-    if (!fileExist(shellCommandPath)) {
+    if (!fileExist(shellCommandPath)
+        && envs.get(DSConstants.DISTRIBUTEDSHELLSCRIPTLOCATION).isEmpty()) {
       throw new IllegalArgumentException(
-          "No shell command specified to be executed by application master");
+          "No shell command or shell script specified to be executed by application master");
+    }
+
+    if (fileExist(shellCommandPath)) {
+      shellCommand = readContent(shellCommandPath);
     }
-    shellCommand = readContent(shellCommandPath);
 
     if (fileExist(shellArgsPath)) {
       shellArgs = readContent(shellArgsPath);
@@ -847,7 +853,9 @@ public class ApplicationMaster {
         }
         shellRsrc.setTimestamp(shellScriptPathTimestamp);
         shellRsrc.setSize(shellScriptPathLen);
-        localResources.put(ExecShellStringPath, shellRsrc);
+        localResources.put(Shell.WINDOWS ? ExecBatScripStringtPath :
+            ExecShellStringPath, shellRsrc);
+        shellCommand = Shell.WINDOWS ? windows_command : linux_bash_command;
       }
       ctx.setLocalResources(localResources);
 
@@ -858,7 +866,8 @@ public class ApplicationMaster {
       vargs.add(shellCommand);
       // Set shell script path
       if (!shellScriptPath.isEmpty()) {
-        vargs.add(ExecShellStringPath);
+        vargs.add(Shell.WINDOWS ? ExecBatScripStringtPath
+            : ExecShellStringPath);
       }
 
       // Set args for the shell command if any

Modified: hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java?rev=1552205&r1=1552204&r2=1552205&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java Thu Dec 19 02:03:47 2013
@@ -49,6 +49,7 @@ import org.apache.hadoop.io.DataOutputBu
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
 import org.apache.hadoop.yarn.api.ApplicationConstants;
 import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
@@ -167,11 +168,14 @@ public class Client {
   // Command line options
   private Options opts;
 
-  private final String shellCommandPath = "shellCommands";
-  private final String shellArgsPath = "shellArgs";
-  private final String appMasterJarPath = "AppMaster.jar";
+  private static final String shellCommandPath = "shellCommands";
+  private static final String shellArgsPath = "shellArgs";
+  private static final String appMasterJarPath = "AppMaster.jar";
   // Hardcoded path to custom log_properties
-  private final String log4jPath = "log4j.properties";
+  private static final String log4jPath = "log4j.properties";
+
+  private static final String linuxShellPath = "ExecShellScript.sh";
+  private static final String windowBatPath = "ExecBatScript.bat";
 
   /**
    * @param args Command line arguments 
@@ -225,8 +229,11 @@ public class Client {
     opts.addOption("master_memory", true, "Amount of memory in MB to be requested to run the application master");
     opts.addOption("master_vcores", true, "Amount of virtual cores to be requested to run the application master");
     opts.addOption("jar", true, "Jar file containing the application master");
-    opts.addOption("shell_command", true, "Shell command to be executed by the Application Master");
-    opts.addOption("shell_script", true, "Location of the shell script to be executed");
+    opts.addOption("shell_command", true, "Shell command to be executed by " +
+        "the Application Master. Can only specify either --shell_command " +
+        "or --shell_script");
+    opts.addOption("shell_script", true, "Location of the shell script to be " +
+        "executed. Can only specify either --shell_command or --shell_script");
     opts.addOption("shell_args", true, "Command line args for the shell script." +
         "Multiple args can be separated by empty space.");
     opts.getOption("shell_args").setArgs(Option.UNLIMITED_VALUES);
@@ -308,12 +315,15 @@ public class Client {
 
     appMasterJar = cliParser.getOptionValue("jar");
 
-    if (!cliParser.hasOption("shell_command")) {
-      throw new IllegalArgumentException("No shell command specified to be executed by application master");
-    }
-    shellCommand = cliParser.getOptionValue("shell_command");
-
-    if (cliParser.hasOption("shell_script")) {
+    if (!cliParser.hasOption("shell_command") && !cliParser.hasOption("shell_script")) {
+      throw new IllegalArgumentException(
+          "No shell command or shell script specified to be executed by application master");
+    } else if (cliParser.hasOption("shell_command") && cliParser.hasOption("shell_script")) {
+      throw new IllegalArgumentException("Can not specify shell_command option " +
+          "and shell_script option at the same time");
+    } else if (cliParser.hasOption("shell_command")) {
+      shellCommand = cliParser.getOptionValue("shell_command");
+    } else {
       shellScriptPath = cliParser.getOptionValue("shell_script");
     }
     if (cliParser.hasOption("shell_args")) {
@@ -466,8 +476,11 @@ public class Client {
     long hdfsShellScriptTimestamp = 0;
     if (!shellScriptPath.isEmpty()) {
       Path shellSrc = new Path(shellScriptPath);
-      String shellPathSuffix = appName + "/" + appId.getId() + "/ExecShellScript.sh";
-      Path shellDst = new Path(fs.getHomeDirectory(), shellPathSuffix);
+      String shellPathSuffix =
+          appName + "/" + appId.getId() + "/"
+              + (Shell.WINDOWS ? windowBatPath : linuxShellPath);
+      Path shellDst =
+          new Path(fs.getHomeDirectory(), shellPathSuffix);
       fs.copyFromLocalFile(false, true, shellSrc, shellDst);
       hdfsShellScriptLocation = shellDst.toUri().toString(); 
       FileStatus shellFileStatus = fs.getFileStatus(shellDst);

Modified: hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java?rev=1552205&r1=1552204&r2=1552205&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java Thu Dec 19 02:03:47 2013
@@ -304,6 +304,54 @@ public class TestDistributedShell {
   }
 
   @Test(timeout=90000)
+  public void testDSShellWithShellScript() throws Exception {
+    final File basedir =
+        new File("target", TestDistributedShell.class.getName());
+    final File tmpDir = new File(basedir, "tmpDir");
+    tmpDir.mkdirs();
+    final File customShellScript = new File(tmpDir, "custom_script.sh");
+    if (customShellScript.exists()) {
+      customShellScript.delete();
+    }
+    if (!customShellScript.createNewFile()) {
+      Assert.fail("Can not create custom shell script file.");
+    }
+    PrintWriter fileWriter = new PrintWriter(customShellScript);
+    // set the output to DEBUG level
+    fileWriter.write("echo testDSShellWithShellScript");
+    fileWriter.close();
+    System.out.println(customShellScript.getAbsolutePath());
+    String[] args = {
+        "--jar",
+        APPMASTER_JAR,
+        "--num_containers",
+        "1",
+        "--shell_script",
+        customShellScript.getAbsolutePath(),
+        "--master_memory",
+        "512",
+        "--master_vcores",
+        "2",
+        "--container_memory",
+        "128",
+        "--container_vcores",
+        "1"
+    };
+
+    LOG.info("Initializing DS Client");
+    final Client client =
+        new Client(new Configuration(yarnCluster.getConfig()));
+    boolean initSuccess = client.init(args);
+    Assert.assertTrue(initSuccess);
+    LOG.info("Running DS Client");
+    boolean result = client.run();
+    LOG.info("Client run completed. Result=" + result);
+    List<String> expectedContent = new ArrayList<String>();
+    expectedContent.add("testDSShellWithShellScript");
+    verifyContainerLog(1, expectedContent, false, "");
+  }
+
+  @Test(timeout=90000)
   public void testDSShellWithInvalidArgs() throws Exception {
     Client client = new Client(new Configuration(yarnCluster.getConfig()));
 
@@ -399,6 +447,58 @@ public class TestDistributedShell {
       Assert.assertTrue("The throw exception is not expected",
           e.getMessage().contains("Invalid virtual cores specified"));
     }
+
+    LOG.info("Initializing DS Client with --shell_command and --shell_script");
+    try {
+      String[] args = {
+          "--jar",
+          APPMASTER_JAR,
+          "--num_containers",
+          "2",
+          "--shell_command",
+          Shell.WINDOWS ? "dir" : "ls",
+          "--master_memory",
+          "512",
+          "--master_vcores",
+          "2",
+          "--container_memory",
+          "128",
+          "--container_vcores",
+          "1",
+          "--shell_script",
+          "test.sh"
+      };
+      client.init(args);
+      Assert.fail("Exception is expected");
+    } catch (IllegalArgumentException e) {
+      Assert.assertTrue("The throw exception is not expected",
+          e.getMessage().contains("Can not specify shell_command option " +
+          "and shell_script option at the same time"));
+    }
+
+    LOG.info("Initializing DS Client without --shell_command and --shell_script");
+    try {
+      String[] args = {
+          "--jar",
+          APPMASTER_JAR,
+          "--num_containers",
+          "2",
+          "--master_memory",
+          "512",
+          "--master_vcores",
+          "2",
+          "--container_memory",
+          "128",
+          "--container_vcores",
+          "1"
+      };
+      client.init(args);
+      Assert.fail("Exception is expected");
+    } catch (IllegalArgumentException e) {
+      Assert.assertTrue("The throw exception is not expected",
+          e.getMessage().contains("No shell command or shell script specified " +
+          "to be executed by application master"));
+    }
   }
 
   protected static void waitForNMToRegister(NodeManager nm)
@@ -490,10 +590,10 @@ public class TestDistributedShell {
       for (File output : containerFiles[i].listFiles()) {
         if (output.getName().trim().contains("stdout")) {
           BufferedReader br = null;
+          List<String> stdOutContent = new ArrayList<String>();
           try {
 
             String sCurrentLine;
-
             br = new BufferedReader(new FileReader(output));
             int numOfline = 0;
             while ((sCurrentLine = br.readLine()) != null) {
@@ -502,12 +602,25 @@ public class TestDistributedShell {
                   numOfWords++;
                 }
               } else if (output.getName().trim().equals("stdout")){
-                Assert.assertEquals("The current is" + sCurrentLine,
-                    expectedContent.get(numOfline), sCurrentLine.trim());
-                numOfline++;
+                if (! Shell.WINDOWS) {
+                  Assert.assertEquals("The current is" + sCurrentLine,
+                      expectedContent.get(numOfline), sCurrentLine.trim());
+                  numOfline++;
+                } else {
+                  stdOutContent.add(sCurrentLine.trim());
+                }
               }
             }
-
+            /* By executing bat script using cmd /c,
+             * it will output all contents from bat script first
+             * It is hard for us to do check line by line
+             * Simply check whether output from bat file contains
+             * all the expected messages
+             */
+            if (Shell.WINDOWS && !count
+                && output.getName().trim().equals("stdout")) {
+              Assert.assertTrue(stdOutContent.containsAll(expectedContent));
+            }
           } catch (IOException e) {
             e.printStackTrace();
           } finally {
@@ -523,6 +636,5 @@ public class TestDistributedShell {
     }
     return numOfWords;
   }
-
 }
 

Modified: hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/ClientRMProxy.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/ClientRMProxy.java?rev=1552205&r1=1552204&r2=1552205&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/ClientRMProxy.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/ClientRMProxy.java Thu Dec 19 02:03:47 2013
@@ -23,6 +23,7 @@ import java.net.InetSocketAddress;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -34,17 +35,37 @@ import org.apache.hadoop.yarn.conf.YarnC
 import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
 import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocol;
 
-public class ClientRMProxy<T> extends RMProxy<T>  {
+import com.google.common.base.Preconditions;
 
+public class ClientRMProxy<T> extends RMProxy<T>  {
   private static final Log LOG = LogFactory.getLog(ClientRMProxy.class);
 
+  private interface ClientRMProtocols extends ApplicationClientProtocol,
+      ApplicationMasterProtocol, ResourceManagerAdministrationProtocol {
+    // Add nothing
+  }
+
+  static {
+    INSTANCE = new ClientRMProxy();
+  }
+
+  private ClientRMProxy(){
+    super();
+  }
+
+  /**
+   * Create a proxy to the ResourceManager for the specified protocol.
+   * @param configuration Configuration with all the required information.
+   * @param protocol Client protocol for which proxy is being requested.
+   * @param <T> Type of proxy.
+   * @return Proxy to the ResourceManager for the specified client protocol.
+   * @throws IOException
+   */
   public static <T> T createRMProxy(final Configuration configuration,
       final Class<T> protocol) throws IOException {
-    YarnConfiguration conf = (configuration instanceof YarnConfiguration)
-        ? (YarnConfiguration) configuration
-        : new YarnConfiguration(configuration);
-    InetSocketAddress rmAddress = getRMAddress(conf, protocol);
-    return createRMProxy(conf, protocol, rmAddress);
+    // This method exists only to initiate this class' static INSTANCE. TODO:
+    // FIX if possible
+    return RMProxy.createRMProxy(configuration, protocol);
   }
 
   private static void setupTokens(InetSocketAddress resourceManagerAddress)
@@ -63,7 +84,9 @@ public class ClientRMProxy<T> extends RM
     }
   }
 
-  private static InetSocketAddress getRMAddress(YarnConfiguration conf,
+  @InterfaceAudience.Private
+  @Override
+  protected InetSocketAddress getRMAddress(YarnConfiguration conf,
       Class<?> protocol) throws IOException {
     if (protocol == ApplicationClientProtocol.class) {
       return conf.getSocketAddr(YarnConfiguration.RM_ADDRESS,
@@ -89,4 +112,12 @@ public class ClientRMProxy<T> extends RM
       throw new IllegalStateException(message);
     }
   }
+
+  @InterfaceAudience.Private
+  @Override
+  protected void checkAllowedProtocols(Class<?> protocol) {
+    Preconditions.checkArgument(
+        protocol.isAssignableFrom(ClientRMProtocols.class),
+        "RM does not support this client protocol");
+  }
 }

Modified: hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java?rev=1552205&r1=1552204&r2=1552205&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java Thu Dec 19 02:03:47 2013
@@ -19,7 +19,6 @@
 package org.apache.hadoop.yarn.client.api.impl;
 
 import java.io.IOException;
-import java.net.InetSocketAddress;
 import java.util.ArrayList;
 import java.util.EnumSet;
 import java.util.List;
@@ -48,6 +47,7 @@ import org.apache.hadoop.yarn.api.protoc
 import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
@@ -78,8 +78,8 @@ public class YarnClientImpl extends Yarn
   private static final Log LOG = LogFactory.getLog(YarnClientImpl.class);
 
   protected ApplicationClientProtocol rmClient;
-  protected InetSocketAddress rmAddress;
-  protected long statePollIntervalMillis;
+  protected long submitPollIntervalMillis;
+  private long asyncApiPollIntervalMillis;
 
   private static final String ROOT = "root";
 
@@ -87,17 +87,19 @@ public class YarnClientImpl extends Yarn
     super(YarnClientImpl.class.getName());
   }
 
-  private static InetSocketAddress getRmAddress(Configuration conf) {
-    return conf.getSocketAddr(YarnConfiguration.RM_ADDRESS,
-      YarnConfiguration.DEFAULT_RM_ADDRESS, YarnConfiguration.DEFAULT_RM_PORT);
-  }
-
+  @SuppressWarnings("deprecation")
   @Override
   protected void serviceInit(Configuration conf) throws Exception {
-    this.rmAddress = getRmAddress(conf);
-    statePollIntervalMillis = conf.getLong(
+    asyncApiPollIntervalMillis =
+        conf.getLong(YarnConfiguration.YARN_CLIENT_APPLICATION_CLIENT_PROTOCOL_POLL_INTERVAL_MS,
+          YarnConfiguration.DEFAULT_YARN_CLIENT_APPLICATION_CLIENT_PROTOCOL_POLL_INTERVAL_MS);
+    submitPollIntervalMillis = asyncApiPollIntervalMillis;
+    if (conf.get(YarnConfiguration.YARN_CLIENT_APP_SUBMISSION_POLL_INTERVAL_MS)
+        != null) {
+      submitPollIntervalMillis = conf.getLong(
         YarnConfiguration.YARN_CLIENT_APP_SUBMISSION_POLL_INTERVAL_MS,
-        YarnConfiguration.DEFAULT_YARN_CLIENT_APP_SUBMISSION_POLL_INTERVAL_MS);
+        YarnConfiguration.DEFAULT_YARN_CLIENT_APPLICATION_CLIENT_PROTOCOL_POLL_INTERVAL_MS);
+    }
     super.serviceInit(conf);
   }
 
@@ -165,25 +167,41 @@ public class YarnClientImpl extends Yarn
             " is still in " + state);
       }
       try {
-        Thread.sleep(statePollIntervalMillis);
+        Thread.sleep(submitPollIntervalMillis);
       } catch (InterruptedException ie) {
       }
     }
 
-
-    LOG.info("Submitted application " + applicationId + " to ResourceManager"
-        + " at " + rmAddress);
+    LOG.info("Submitted application " + applicationId);
     return applicationId;
   }
 
   @Override
   public void killApplication(ApplicationId applicationId)
       throws YarnException, IOException {
-    LOG.info("Killing application " + applicationId);
     KillApplicationRequest request =
         Records.newRecord(KillApplicationRequest.class);
     request.setApplicationId(applicationId);
-    rmClient.forceKillApplication(request);
+
+    try {
+      int pollCount = 0;
+      while (true) {
+        KillApplicationResponse response =
+            rmClient.forceKillApplication(request);
+        if (response.getIsKillCompleted()) {
+          break;
+        }
+        if (++pollCount % 10 == 0) {
+          LOG.info("Watiting for application " + applicationId
+              + " to be killed.");
+        }
+        Thread.sleep(asyncApiPollIntervalMillis);
+      }
+    } catch (InterruptedException e) {
+      LOG.error("Interrupted while waiting for application " + applicationId
+          + " to be killed.");
+    }
+    LOG.info("Killed application " + applicationId);
   }
 
   @Override

Modified: hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestResourceManagerAdministrationProtocolPBClientImpl.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestResourceManagerAdministrationProtocolPBClientImpl.java?rev=1552205&r1=1552204&r2=1552205&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestResourceManagerAdministrationProtocolPBClientImpl.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestResourceManagerAdministrationProtocolPBClientImpl.java Thu Dec 19 02:03:47 2013
@@ -41,6 +41,8 @@ import org.apache.hadoop.yarn.server.api
 import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshSuperUserGroupsConfigurationResponse;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshUserToGroupsMappingsRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshUserToGroupsMappingsResponse;
+import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceResponse;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
@@ -159,6 +161,14 @@ public class TestResourceManagerAdminist
     RefreshAdminAclsResponse response = client.refreshAdminAcls(request);
     assertNotNull(response);
   }
+  
+  @Test
+  public void testUpdateNodeResource() throws Exception {
+    UpdateNodeResourceRequest request = recordFactory
+            .newRecordInstance(UpdateNodeResourceRequest.class);
+    UpdateNodeResourceResponse response = client.updateNodeResource(request);
+    assertNotNull(response);
+  }
 
   @Test
   public void testRefreshServiceAcls() throws Exception {

Modified: hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java?rev=1552205&r1=1552204&r2=1552205&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java Thu Dec 19 02:03:47 2013
@@ -42,6 +42,8 @@ import org.apache.hadoop.yarn.api.protoc
 import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationResponse;
 import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
@@ -89,6 +91,7 @@ public class TestYarnClient {
     rm.stop();
   }
 
+  @SuppressWarnings("deprecation")
   @Test (timeout = 30000)
   public void testSubmitApplication() {
     Configuration conf = new Configuration();
@@ -128,6 +131,23 @@ public class TestYarnClient {
     client.stop();
   }
 
+  @Test
+  public void testKillApplication() throws Exception {
+    MockRM rm = new MockRM();
+    rm.start();
+    RMApp app = rm.submitApp(2000);
+
+    Configuration conf = new Configuration();
+    @SuppressWarnings("resource")
+    final YarnClient client = new MockYarnClient();
+    client.init(conf);
+    client.start();
+
+    client.killApplication(app.getApplicationId());
+    verify(((MockYarnClient) client).getRMClient(), times(2))
+      .forceKillApplication(any(KillApplicationRequest.class));
+  }
+
   @Test(timeout = 30000)
   public void testApplicationType() throws Exception {
     Logger rootLogger = LogManager.getRootLogger();
@@ -234,6 +254,11 @@ public class TestYarnClient {
             GetApplicationReportRequest.class))).thenReturn(mockResponse);
         when(rmClient.getApplications(any(GetApplicationsRequest.class)))
             .thenReturn(mockAppResponse);
+        // return false for 1st kill request, and true for the 2nd.
+        when(rmClient.forceKillApplication(any(
+          KillApplicationRequest.class)))
+          .thenReturn(KillApplicationResponse.newInstance(false)).thenReturn(
+            KillApplicationResponse.newInstance(true));
       } catch (YarnException e) {
         Assert.fail("Exception is not expected.");
       } catch (IOException e) {
@@ -242,6 +267,10 @@ public class TestYarnClient {
       when(mockResponse.getApplicationReport()).thenReturn(mockReport);
     }
 
+    public ApplicationClientProtocol getRMClient() {
+      return rmClient;
+    }
+
     @Override
     public List<ApplicationReport> getApplications(
         Set<String> applicationTypes, EnumSet<YarnApplicationState> applicationStates)

Modified: hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/KillApplicationResponsePBImpl.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/KillApplicationResponsePBImpl.java?rev=1552205&r1=1552204&r2=1552205&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/KillApplicationResponsePBImpl.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/KillApplicationResponsePBImpl.java Thu Dec 19 02:03:47 2013
@@ -23,6 +23,7 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationResponse;
 import org.apache.hadoop.yarn.proto.YarnServiceProtos.KillApplicationResponseProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.KillApplicationResponseProtoOrBuilder;
 
 import com.google.protobuf.TextFormat;
 
@@ -67,4 +68,24 @@ public class KillApplicationResponsePBIm
   public String toString() {
     return TextFormat.shortDebugString(getProto());
   }
+
+  private void maybeInitBuilder() {
+    if (viaProto || builder == null) {
+      builder = KillApplicationResponseProto.newBuilder(proto);
+    }
+    viaProto = false;
+  }
+
+  @Override
+  public boolean getIsKillCompleted() {
+    KillApplicationResponseProtoOrBuilder p =
+        viaProto ? proto : builder;
+    return p.getIsKillCompleted();
+  }
+
+  @Override
+  public void setIsKillCompleted(boolean isKillCompleted) {
+    maybeInitBuilder();
+    builder.setIsKillCompleted(isKillCompleted);
+  }
 }  

Modified: hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java?rev=1552205&r1=1552204&r2=1552205&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java Thu Dec 19 02:03:47 2013
@@ -36,6 +36,8 @@ import org.apache.hadoop.io.retry.RetryP
 import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.io.retry.RetryProxy;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.yarn.conf.HAUtil;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.ipc.YarnRPC;
@@ -48,7 +50,68 @@ import com.google.common.annotations.Vis
 public class RMProxy<T> {
 
   private static final Log LOG = LogFactory.getLog(RMProxy.class);
+  protected static RMProxy INSTANCE;
 
+  protected RMProxy() {}
+
+  /**
+   * Verify the passed protocol is supported.
+   */
+  @Private
+  protected void checkAllowedProtocols(Class<?> protocol) {}
+
+  /**
+   * Get the ResourceManager address from the provided Configuration for the
+   * given protocol.
+   */
+  @Private
+  protected InetSocketAddress getRMAddress(
+      YarnConfiguration conf, Class<?> protocol) throws IOException {
+    throw new UnsupportedOperationException("This method should be invoked " +
+        "from an instance of ClientRMProxy or ServerRMProxy");
+  }
+
+  /**
+   * Create a proxy for the specified protocol. For non-HA,
+   * this is a direct connection to the ResourceManager address. When HA is
+   * enabled, the proxy handles the failover between the ResourceManagers as
+   * well.
+   */
+  @Private
+  protected static <T> T createRMProxy(final Configuration configuration,
+      final Class<T> protocol) throws IOException {
+    YarnConfiguration conf = (configuration instanceof YarnConfiguration)
+        ? (YarnConfiguration) configuration
+        : new YarnConfiguration(configuration);
+    RetryPolicy retryPolicy = createRetryPolicy(conf);
+    if (HAUtil.isHAEnabled(conf)) {
+      RMFailoverProxyProvider<T> provider =
+          INSTANCE.createRMFailoverProxyProvider(conf, protocol);
+      return (T) RetryProxy.create(protocol, provider, retryPolicy);
+    } else {
+      InetSocketAddress rmAddress = INSTANCE.getRMAddress(conf, protocol);
+      LOG.info("Connecting to ResourceManager at " + rmAddress);
+      T proxy = RMProxy.<T>getProxy(conf, protocol, rmAddress);
+      return (T) RetryProxy.create(protocol, proxy, retryPolicy);
+    }
+  }
+
+  /**
+   * @deprecated
+   * This method is deprecated and is not used by YARN internally any more.
+   * To create a proxy to the RM, use ClientRMProxy#createRMProxy or
+   * ServerRMProxy#createRMProxy.
+   *
+   * Create a proxy to the ResourceManager at the specified address.
+   *
+   * @param conf Configuration to generate retry policy
+   * @param protocol Protocol for the proxy
+   * @param rmAddress Address of the ResourceManager
+   * @param <T> Type information of the proxy
+   * @return Proxy to the RM
+   * @throws IOException
+   */
+  @Deprecated
   public static <T> T createRMProxy(final Configuration conf,
       final Class<T> protocol, InetSocketAddress rmAddress) throws IOException {
     RetryPolicy retryPolicy = createRetryPolicy(conf);
@@ -57,12 +120,16 @@ public class RMProxy<T> {
     return (T) RetryProxy.create(protocol, proxy, retryPolicy);
   }
 
-  private static <T> T getProxy(final Configuration conf,
+  /**
+   * Get a proxy to the RM at the specified address. To be used to create a
+   * RetryProxy.
+   */
+  @Private
+  static <T> T getProxy(final Configuration conf,
       final Class<T> protocol, final InetSocketAddress rmAddress)
       throws IOException {
     return UserGroupInformation.getCurrentUser().doAs(
       new PrivilegedAction<T>() {
-
         @Override
         public T run() {
           return (T) YarnRPC.create(conf).getProxy(protocol, rmAddress, conf);
@@ -70,6 +137,50 @@ public class RMProxy<T> {
       });
   }
 
+  /**
+   * Helper method to create FailoverProxyProvider.
+   */
+  private <T> RMFailoverProxyProvider<T> createRMFailoverProxyProvider(
+      Configuration conf, Class<T> protocol) {
+    Class<? extends RMFailoverProxyProvider<T>> defaultProviderClass;
+    try {
+      defaultProviderClass = (Class<? extends RMFailoverProxyProvider<T>>)
+          Class.forName(
+              YarnConfiguration.DEFAULT_CLIENT_FAILOVER_PROXY_PROVIDER);
+    } catch (Exception e) {
+      throw new YarnRuntimeException("Invalid default failover provider class" +
+          YarnConfiguration.DEFAULT_CLIENT_FAILOVER_PROXY_PROVIDER, e);
+    }
+
+    RMFailoverProxyProvider<T> provider = ReflectionUtils.newInstance(
+        conf.getClass(YarnConfiguration.CLIENT_FAILOVER_PROXY_PROVIDER,
+            defaultProviderClass, RMFailoverProxyProvider.class), conf);
+    provider.init(conf, (RMProxy<T>) this, protocol);
+    return provider;
+  }
+
+  /**
+   * A RetryPolicy to allow failing over upto the specified maximum time.
+   */
+  private static class FailoverUptoMaximumTimePolicy implements RetryPolicy {
+    private long maxTime;
+
+    FailoverUptoMaximumTimePolicy(long maxTime) {
+      this.maxTime = maxTime;
+    }
+
+    @Override
+    public RetryAction shouldRetry(Exception e, int retries, int failovers,
+        boolean isIdempotentOrAtMostOnce) throws Exception {
+      return System.currentTimeMillis() < maxTime
+          ? RetryAction.FAILOVER_AND_RETRY
+          : RetryAction.FAIL;
+    }
+  }
+
+  /**
+   * Fetch retry policy from Configuration
+   */
   @Private
   @VisibleForTesting
   public static RetryPolicy createRetryPolicy(Configuration conf) {
@@ -81,19 +192,10 @@ public class RMProxy<T> {
         conf.getLong(
             YarnConfiguration.RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS,
             YarnConfiguration
-            .DEFAULT_RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS);
-
-    if (rmConnectionRetryIntervalMS < 0) {
-      throw new YarnRuntimeException("Invalid Configuration. " +
-          YarnConfiguration.RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS +
-          " should not be negative.");
-    }
+                .DEFAULT_RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS);
 
     boolean waitForEver = (rmConnectWaitMS == -1);
-
-    if (waitForEver) {
-      return  RetryPolicies.RETRY_FOREVER;
-    } else {
+    if (!waitForEver) {
       if (rmConnectWaitMS < 0) {
         throw new YarnRuntimeException("Invalid Configuration. "
             + YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_MS
@@ -110,18 +212,54 @@ public class RMProxy<T> {
       }
     }
 
+    // Handle HA case first
+    if (HAUtil.isHAEnabled(conf)) {
+      final long failoverSleepBaseMs = conf.getLong(
+          YarnConfiguration.CLIENT_FAILOVER_SLEEPTIME_BASE_MS,
+          rmConnectionRetryIntervalMS);
+
+      final long failoverSleepMaxMs = conf.getLong(
+          YarnConfiguration.CLIENT_FAILOVER_SLEEPTIME_MAX_MS,
+          rmConnectionRetryIntervalMS);
+
+      int maxFailoverAttempts = conf.getInt(
+          YarnConfiguration.CLIENT_FAILOVER_MAX_ATTEMPTS, -1);
+
+      RetryPolicy basePolicy = RetryPolicies.TRY_ONCE_THEN_FAIL;
+      if (maxFailoverAttempts == -1) {
+        if (waitForEver) {
+          basePolicy = RetryPolicies.FAILOVER_FOREVER;
+        } else {
+          basePolicy = new FailoverUptoMaximumTimePolicy(
+              System.currentTimeMillis() + rmConnectWaitMS);
+        }
+        maxFailoverAttempts = 0;
+      }
+
+      return RetryPolicies.failoverOnNetworkException(basePolicy,
+          maxFailoverAttempts, failoverSleepBaseMs, failoverSleepMaxMs);
+    }
+
+    if (waitForEver) {
+      return RetryPolicies.RETRY_FOREVER;
+    }
+
+    if (rmConnectionRetryIntervalMS < 0) {
+      throw new YarnRuntimeException("Invalid Configuration. " +
+          YarnConfiguration.RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS +
+          " should not be negative.");
+    }
+
     RetryPolicy retryPolicy =
         RetryPolicies.retryUpToMaximumTimeWithFixedSleep(rmConnectWaitMS,
-            rmConnectionRetryIntervalMS,
-            TimeUnit.MILLISECONDS);
+            rmConnectionRetryIntervalMS, TimeUnit.MILLISECONDS);
 
     Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap =
         new HashMap<Class<? extends Exception>, RetryPolicy>();
     exceptionToPolicyMap.put(ConnectException.class, retryPolicy);
     //TO DO: after HADOOP-9576,  IOException can be changed to EOFException
     exceptionToPolicyMap.put(IOException.class, retryPolicy);
-
-    return RetryPolicies.retryByException(RetryPolicies.TRY_ONCE_THEN_FAIL,
-      exceptionToPolicyMap);
+    return RetryPolicies.retryByException(
+        RetryPolicies.TRY_ONCE_THEN_FAIL, exceptionToPolicyMap);
   }
 }

Modified: hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java?rev=1552205&r1=1552204&r2=1552205&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java Thu Dec 19 02:03:47 2013
@@ -53,6 +53,7 @@ import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.SecureIOUtils;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.file.tfile.TFile;
@@ -294,7 +295,7 @@ public class AggregatedLogFormat {
       out.close();
     }
 
-    public void closeWriter() {
+    public void close() {
       try {
         this.writer.close();
       } catch (IOException e) {
@@ -569,9 +570,8 @@ public class AggregatedLogFormat {
       out.println("");
     }
 
-    public void close() throws IOException {
-      this.scanner.close();
-      this.fsDataIStream.close();
+    public void close() {
+      IOUtils.cleanup(LOG, scanner, reader, fsDataIStream);
     }
   }
 

Modified: hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceManagerAdministrationProtocolPBClientImpl.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceManagerAdministrationProtocolPBClientImpl.java?rev=1552205&r1=1552204&r2=1552205&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceManagerAdministrationProtocolPBClientImpl.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceManagerAdministrationProtocolPBClientImpl.java Thu Dec 19 02:03:47 2013
@@ -37,6 +37,7 @@ import org.apache.hadoop.yarn.proto.Yarn
 import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshServiceAclsRequestProto;
 import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshSuperUserGroupsConfigurationRequestProto;
 import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshUserToGroupsMappingsRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.UpdateNodeResourceRequestProto;
 import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocol;
 import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocolPB;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsRequest;
@@ -51,6 +52,8 @@ import org.apache.hadoop.yarn.server.api
 import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshSuperUserGroupsConfigurationResponse;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshUserToGroupsMappingsRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshUserToGroupsMappingsResponse;
+import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceResponse;
 import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshAdminAclsRequestPBImpl;
 import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshAdminAclsResponsePBImpl;
 import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesRequestPBImpl;
@@ -63,6 +66,8 @@ import org.apache.hadoop.yarn.server.api
 import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshSuperUserGroupsConfigurationResponsePBImpl;
 import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshUserToGroupsMappingsRequestPBImpl;
 import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshUserToGroupsMappingsResponsePBImpl;
+import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UpdateNodeResourceRequestPBImpl;
+import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UpdateNodeResourceResponsePBImpl;
 
 import com.google.protobuf.ServiceException;
 
@@ -186,5 +191,19 @@ public class ResourceManagerAdministrati
       throw ProtobufHelper.getRemoteException(e);
     }
   }
+
+  @Override
+  public UpdateNodeResourceResponse updateNodeResource(
+      UpdateNodeResourceRequest request) throws YarnException, IOException {
+    UpdateNodeResourceRequestProto requestProto =
+        ((UpdateNodeResourceRequestPBImpl) request).getProto();
+    try {
+      return new UpdateNodeResourceResponsePBImpl(proxy.updateNodeResource(null,
+          requestProto));
+    } catch (ServiceException e) {
+      RPCUtil.unwrapAndThrowException(e);
+      return null;
+    }
+  }
   
 }